Compare commits

..

1 Commits

Author SHA1 Message Date
Gregory Becker
6cb5700b1c try using nosearch to deprioritize api docs in search 2024-03-01 13:47:15 -08:00
1264 changed files with 9068 additions and 25607 deletions

View File

@@ -1,4 +0,0 @@
{
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
}

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Load spack environment at terminal startup
cat <<EOF >> /root/.bashrc
. /workspaces/spack/share/spack/setup-env.sh
EOF
# Load spack environment in this script
. /workspaces/spack/share/spack/setup-env.sh
# Ensure generic targets for maximum matching with buildcaches
spack config --scope site add "packages:all:require:[target=x86_64_v3]"
spack config --scope site add "concretizer:targets:granularity:generic"
# Find compiler and install gcc-runtime
spack compiler find --scope site
# Setup buildcaches
spack mirror add --scope site develop https://binaries.spack.io/develop
spack buildcache keys --install --trust

View File

@@ -22,8 +22,8 @@ jobs:
matrix: matrix:
operating_system: ["ubuntu-latest", "macos-latest"] operating_system: ["ubuntu-latest", "macos-latest"]
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{inputs.python_version}} python-version: ${{inputs.python_version}}
- name: Install Python packages - name: Install Python packages
@@ -34,7 +34,7 @@ jobs:
run: | run: |
. share/spack/setup-env.sh . share/spack/setup-env.sh
coverage run $(which spack) audit packages coverage run $(which spack) audit packages
coverage run $(which spack) -d audit externals coverage run $(which spack) audit externals
coverage combine coverage combine
coverage xml coverage xml
- name: Package audits (without coverage) - name: Package audits (without coverage)
@@ -43,9 +43,7 @@ jobs:
. share/spack/setup-env.sh . share/spack/setup-env.sh
$(which spack) audit packages $(which spack) audit packages
$(which spack) audit externals $(which spack) audit externals
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }} if: ${{ inputs.with_coverage == 'true' }}
with: with:
flags: unittests,audits flags: unittests,audits
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static cmake bison bison-devel libstdc++-static
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \ bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup repo - name: Setup repo
@@ -158,8 +158,8 @@ jobs:
run: | run: |
brew install cmake bison@2.7 tree brew install cmake bison@2.7 tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: "3.12" python-version: "3.12"
- name: Bootstrap clingo - name: Bootstrap clingo
@@ -182,7 +182,7 @@ jobs:
run: | run: |
brew install tree brew install tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
set -ex set -ex
@@ -207,7 +207,7 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup repo - name: Setup repo
@@ -250,7 +250,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \ bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -287,7 +287,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
gawk gawk
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -320,7 +320,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
@@ -338,7 +338,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh

View File

@@ -50,14 +50,12 @@ jobs:
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'], [rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'], [rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'], [fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'],
[fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38'], [fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38']]
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'],
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']]
name: Build ${{ matrix.dockerfile[0] }} name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack' if: github.repository == 'spack/spack'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta id: docker_meta
@@ -98,10 +96,10 @@ jobs:
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
@@ -109,13 +107,13 @@ jobs:
- name: Log in to DockerHub - name: Log in to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }} - name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
with: with:
context: dockerfiles/${{ matrix.dockerfile[0] }} context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }} platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -18,7 +18,6 @@ jobs:
prechecks: prechecks:
needs: [ changes ] needs: [ changes ]
uses: ./.github/workflows/valid-style.yml uses: ./.github/workflows/valid-style.yml
secrets: inherit
with: with:
with_coverage: ${{ needs.changes.outputs.core }} with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks: all-prechecks:
@@ -36,12 +35,12 @@ jobs:
core: ${{ steps.filter.outputs.core }} core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }} packages: ${{ steps.filter.outputs.packages }}
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
if: ${{ github.event_name == 'push' }} if: ${{ github.event_name == 'push' }}
with: with:
fetch-depth: 0 fetch-depth: 0
# For pull requests it's not necessary to checkout the code # For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 - uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a
id: filter id: filter
with: with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below # See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
@@ -71,17 +70,14 @@ jobs:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml uses: ./.github/workflows/bootstrap.yml
secrets: inherit
unit-tests: unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml uses: ./.github/workflows/unit_tests.yaml
secrets: inherit
windows: windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ] needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml uses: ./.github/workflows/windows_python.yml
secrets: inherit
all: all:
needs: [ windows, unit-tests, bootstrap ] needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps: build-paraview-deps:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages

View File

@@ -1,4 +1,4 @@
black==24.4.0 black==24.2.0
clingo==5.7.1 clingo==5.7.1
flake8==7.0.0 flake8==7.0.0
isort==5.13.2 isort==5.13.2

View File

@@ -51,10 +51,10 @@ jobs:
on_develop: false on_develop: false
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install System packages - name: Install System packages
@@ -91,19 +91,17 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }} UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with: with:
flags: unittests,linux,${{ matrix.concretizer }} flags: unittests,linux,${{ matrix.concretizer }}
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test shell integration # Test shell integration
shell: shell:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: '3.11' python-version: '3.11'
- name: Install System packages - name: Install System packages
@@ -124,11 +122,9 @@ jobs:
COVERAGE: true COVERAGE: true
run: | run: |
share/spack/qa/run-shell-tests share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with: with:
flags: shelltests,linux flags: shelltests,linux
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test RHEL8 UBI with platform Python. This job is run # Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack # only on PRs modifying core Spack
@@ -141,7 +137,7 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version
@@ -160,10 +156,10 @@ jobs:
clingo-cffi: clingo-cffi:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: '3.11' python-version: '3.11'
- name: Install System packages - name: Install System packages
@@ -185,23 +181,20 @@ jobs:
SPACK_TEST_SOLVER: clingo SPACK_TEST_SOLVER: clingo
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
with: with:
flags: unittests,linux,clingo flags: unittests,linux,clingo
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Run unit tests on MacOS # Run unit tests on MacOS
macos: macos:
runs-on: ${{ matrix.os }} runs-on: macos-latest
strategy: strategy:
matrix: matrix:
os: [macos-latest, macos-14]
python-version: ["3.11"] python-version: ["3.11"]
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install Python packages - name: Install Python packages
@@ -223,8 +216,6 @@ jobs:
$(which spack) solve zlib $(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x) common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}" $(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with: with:
flags: unittests,macos flags: unittests,macos
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -18,8 +18,8 @@ jobs:
validate: validate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style: style:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
@@ -56,7 +56,6 @@ jobs:
share/spack/qa/run-style-tests share/spack/qa/run-style-tests
audit: audit:
uses: ./.github/workflows/audit.yaml uses: ./.github/workflows/audit.yaml
secrets: inherit
with: with:
with_coverage: ${{ inputs.with_coverage }} with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11' python_version: '3.11'
@@ -70,7 +69,7 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests: unit-tests:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
@@ -33,18 +33,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1 ./share/spack/qa/validate_last_exit.ps1
coverage combine -a coverage combine -a
coverage xml coverage xml
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with: with:
flags: unittests,windows flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd: unit-tests-cmd:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
@@ -59,18 +57,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1 ./share/spack/qa/validate_last_exit.ps1
coverage combine -a coverage combine -a
coverage xml coverage xml
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed - uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with: with:
flags: unittests,windows flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil: build-abseil:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages

View File

@@ -88,7 +88,7 @@ Resources:
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack. [bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions): * [**Github Discussions**](https://github.com/spack/spack/discussions):
for Q&A and discussions. Note the pinned discussions for announcements. for Q&A and discussions. Note the pinned discussions for announcements.
* **X**: [@spackpm](https://twitter.com/spackpm). Be sure to * **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us! `@mention` us!
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack): * **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack):
only for announcements. Please use other venues for discussions. only for announcements. Please use other venues for discussions.

View File

@@ -15,7 +15,7 @@ concretizer:
# as possible, rather than building. If `false`, we'll always give you a fresh # as possible, rather than building. If `false`, we'll always give you a fresh
# concretization. If `dependencies`, we'll only reuse dependencies but # concretization. If `dependencies`, we'll only reuse dependencies but
# give you a fresh concretization for your root specs. # give you a fresh concretization for your root specs.
reuse: true reuse: dependencies
# Options that tune which targets are considered for concretization. The # Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time # concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets # needed to reach a solution increases noticeably with the number of targets
@@ -42,8 +42,3 @@ concretizer:
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.) # "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG) # "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal strategy: minimal
# Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {}

View File

@@ -101,12 +101,6 @@ config:
verify_ssl: true verify_ssl: true
# This is where custom certs for proxy/firewall are stored.
# It can be a path or environment variable. To match ssl env configuration
# the default is the environment variable SSL_CERT_FILE
ssl_certs: $SSL_CERT_FILE
# Suppress gpg warnings from binary package verification # Suppress gpg warnings from binary package verification
# Only suppresses warnings, gpg failure will still fail the install # Only suppresses warnings, gpg failure will still fail the install
# Potential rationale to set True: users have already explicitly trusted the # Potential rationale to set True: users have already explicitly trusted the

View File

@@ -19,6 +19,7 @@ packages:
- apple-clang - apple-clang
- clang - clang
- gcc - gcc
- intel
providers: providers:
elf: [libelf] elf: [libelf]
fuse: [macfuse] fuse: [macfuse]

View File

@@ -15,7 +15,7 @@
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
packages: packages:
all: all:
compiler: [gcc, clang, oneapi, xl, nag, fj, aocc] compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
providers: providers:
awk: [gawk] awk: [gawk]
blas: [openblas, amdblis] blas: [openblas, amdblis]
@@ -24,7 +24,6 @@ packages:
elf: [elfutils] elf: [elfutils]
fftw-api: [fftw, amdfftw] fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame] flame: [libflame, amdlibflame]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse] fuse: [libfuse]
gl: [glx, osmesa] gl: [glx, osmesa]
glu: [mesa-glu, openglu] glu: [mesa-glu, openglu]
@@ -35,10 +34,7 @@ packages:
java: [openjdk, jdk, ibm-java] java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg] jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame] lapack: [openblas, amdlibflame]
libc: [glibc, musl]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx, mesa18+glx] libglx: [mesa+glx, mesa18+glx]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm] libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa] libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit] lua-lang: [lua, lua-luajit-openresty, lua-luajit]

View File

@@ -59,6 +59,7 @@ upload:
apidoc: apidoc:
sphinx-apidoc -f -T -o . ../spack sphinx-apidoc -f -T -o . ../spack
sphinx-apidoc -f -T -o . ../llnl sphinx-apidoc -f -T -o . ../llnl
./nosearch-api-docs # set :nosearch: at top of each file
help: help:
@echo "Please use \`make <target>' where <target> is one of" @echo "Please use \`make <target>' where <target> is one of"

View File

@@ -1119,9 +1119,6 @@ and ``3.4.2``. Similarly, ``@4.2:`` means any version above and including
``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and ``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and
includes any version with major version ``3``. includes any version with major version ``3``.
Versions are ordered lexicograpically by its components. For more details
on the order, see :ref:`the packaging guide <version-comparison>`.
Notice that you can distinguish between the specific version ``@=3.2`` and Notice that you can distinguish between the specific version ``@=3.2`` and
the range ``@3.2``. This is useful for packages that follow a versioning the range ``@3.2``. This is useful for packages that follow a versioning
scheme that omits the zero patch version number: ``3.2``, ``3.2.1``, scheme that omits the zero patch version number: ``3.2``, ``3.2.1``,

View File

@@ -220,40 +220,6 @@ section of the configuration:
.. _binary_caches_oci: .. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
----------------------------------------- -----------------------------------------
OCI / Docker V2 registries as build cache OCI / Docker V2 registries as build cache
----------------------------------------- -----------------------------------------

View File

@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console .. code-block:: console
% spack -b find % spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store" ==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages ==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------ -- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b % spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap" ==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find % spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store" ==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages ==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack. commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -250,7 +250,7 @@ generator is Ninja. To switch to the Ninja generator, simply add:
.. code-block:: python .. code-block:: python
generator("ninja") generator = "Ninja"
``CMakePackage`` defaults to "Unix Makefiles". If you switch to the ``CMakePackage`` defaults to "Unix Makefiles". If you switch to the

View File

@@ -173,72 +173,6 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
] ]
^^^^^^^
Testing
^^^^^^^
``PerlPackage`` provides a simple stand-alone test of the successfully
installed package to confirm that installed perl module(s) can be used.
These tests can be performed any time after the installation using
``spack -v test run``. (For more information on the command, see
:ref:`cmd-spack-test-run`.)
The base class automatically detects perl modules based on the presence
of ``*.pm`` files under the package's library directory. For example,
the files under ``perl-bignum``'s perl library are:
.. code-block:: console
$ find . -name "*.pm"
./bigfloat.pm
./bigrat.pm
./Math/BigFloat/Trace.pm
./Math/BigInt/Trace.pm
./Math/BigRat/Trace.pm
./bigint.pm
./bignum.pm
which results in the package having the ``use_modules`` property containing:
.. code-block:: python
use_modules = [
"bigfloat",
"bigrat",
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
"bigint",
"bignum",
]
.. note::
This list can often be used to catch missing dependencies.
If the list is somehow wrong, you can provide the names of the modules
yourself by overriding ``use_modules`` like so:
.. code-block:: python
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
If you only want a subset of the automatically detected modules to be
tested, you could instead define the ``skip_modules`` property on the
package. So, instead of overriding ``use_modules`` as shown above, you
could define the following:
.. code-block:: python
skip_modules = [
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
]
for the same use tests.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack Alternatives to Spack
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^

View File

@@ -145,22 +145,6 @@ hosts when making ``ssl`` connections. Set to ``false`` to disable, and
tools like ``curl`` will use their ``--insecure`` options. Disabling tools like ``curl`` will use their ``--insecure`` options. Disabling
this can expose you to attacks. Use at your own risk. this can expose you to attacks. Use at your own risk.
--------------------
``ssl_certs``
--------------------
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to a file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
-------------------- --------------------
``checksum`` ``checksum``
-------------------- --------------------

View File

@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance). machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings #. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site, affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes. system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``. #. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich] mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes: .. _platform-scopes:
------------------------ ------------------------

View File

@@ -233,12 +233,6 @@ The OS that are currently supported are summarized in the table below:
* - Fedora Linux 38 * - Fedora Linux 38
- ``fedora:38`` - ``fedora:38``
- ``spack/fedora38`` - ``spack/fedora38``
* - Fedora Linux 39
- ``fedora:39``
- ``spack/fedora39``
* - Fedora Linux 40
- ``fedora:40``
- ``spack/fedora40``

View File

@@ -552,11 +552,11 @@ With either interpreter you can run a single command:
.. code-block:: console .. code-block:: console
$ spack python -c 'from spack.spec import Spec; Spec("python").concretized()' $ spack python -c 'import distro; distro.linux_distribution()'
... ('Ubuntu', '18.04', 'Bionic Beaver')
$ spack python -i ipython -c 'from spack.spec import Spec; Spec("python").concretized()' $ spack python -i ipython -c 'import distro; distro.linux_distribution()'
Out[1]: ... Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
or a file: or a file:
@@ -1071,9 +1071,9 @@ Announcing a release
We announce releases in all of the major Spack communication channels. We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are Publishing the release takes care of GitHub. The remaining channels are
X, Slack, and the mailing list. Here are the steps: Twitter, Slack, and the mailing list. Here are the steps:
#. Announce the release on X. #. Announce the release on Twitter.
* Compose the tweet on the ``@spackpm`` account per the * Compose the tweet on the ``@spackpm`` account per the
``spack-twitter`` slack channel. ``spack-twitter`` slack channel.

View File

@@ -952,17 +952,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}" ^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}" all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if be the first non-``all`` entry that the spec satisfies, or ``all`` if

View File

@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
(5 durations < 0.005s hidden. Use -vv to show these durations.) (5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================ =========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)

View File

@@ -250,10 +250,9 @@ Compiler configuration
Spack has the ability to build packages with multiple compilers and Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``, specifying them manually in ``compilers.yaml``, or automatically by
or automatically by running ``spack compiler find``, but for running ``spack compiler find``, but for convenience Spack will
convenience Spack will automatically detect compilers the first time automatically detect compilers the first time it needs them.
it needs them.
.. _cmd-spack-compilers: .. _cmd-spack-compilers:
@@ -458,48 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
prepend_path: # Similar for append|remove_path prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
.. code-block:: yaml
packages:
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
environment:
set:
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
paths:
cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler Build Your Own Compiler

View File

@@ -273,21 +273,9 @@ builtin support through the ``depends_on`` function, the latter simply uses a ``
statement. Both module systems (at least in newer versions) do reference counting, so that if a statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are. module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values: The ``autoload`` key accepts the values ``none``, ``direct``, and ``all``. To disable it, use
``none``, and to enable, it's best to stick to ``direct``, which only autoloads the direct link and
* ``none``: no autoloading run type dependencies, relying on recursive autoloading to load the rest.
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
A common complaint about autoloading is the large number of modules that are visible to the user. A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
@@ -309,11 +297,11 @@ Environment Modules requires version 4.7 or higher.
tcl: tcl:
hide_implicits: true hide_implicits: true
all: all:
autoload: direct # or `run` autoload: direct
lmod: lmod:
hide_implicits: true hide_implicits: true
all: all:
autoload: direct # or `run` autoload: direct
.. _anonymous_specs: .. _anonymous_specs:

View File

@@ -0,0 +1,6 @@
#!/bin/sh
# Set :nosearch: at top of each api doc file
for filename in {spack,llnl}.*.rst; do
$(echo ":nosearch:"; cat $filename) > $filename
done

View File

@@ -893,50 +893,26 @@ as an option to the ``version()`` directive. Example situations would be a
"snapshot"-like Version Control System (VCS) tag, a VCS branch such as "snapshot"-like Version Control System (VCS) tag, a VCS branch such as
``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball. ``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball.
.. _version-comparison:
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
Version comparison Version comparison
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
Spack imposes a generic total ordering on the set of versions,
independently from the package they are associated with.
Most Spack versions are numeric, a tuple of integers; for example, Most Spack versions are numeric, a tuple of integers; for example,
``0.1``, ``6.96`` or ``1.2.3.1``. In this very basic case, version ``0.1``, ``6.96`` or ``1.2.3.1``. Spack knows how to compare and sort
comparison is lexicographical on the numeric components: numeric versions.
``1.2 < 1.2.1 < 1.2.2 < 1.10``.
Spack can also supports string components such as ``1.1.1a`` and Some Spack versions involve slight extensions of numeric syntax; for
``1.y.0``. String components are considered less than numeric example, ``py-sphinx-rtd-theme@=0.1.10a0``. In this case, numbers are
components, so ``1.y.0 < 1.0``. This is for consistency with always considered to be "newer" than letters. This is for consistency
`RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_. String with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
components do not have to be separated by dots or any other delimiter.
So, the contrived version ``1y0`` is identical to ``1.y.0``.
Pre-release suffixes also contain string parts, but they are handled Spack versions may also be arbitrary non-numeric strings, for example
in a special way. For example ``1.2.3alpha1`` is parsed as a pre-release ``develop``, ``master``, ``local``.
of the version ``1.2.3``. This allows Spack to order it before the
actual release: ``1.2.3alpha1 < 1.2.3``. Spack supports alpha, beta and
release candidate suffixes: ``1.2alpha1 < 1.2beta1 < 1.2rc1 < 1.2``. Any
suffix not recognized as a pre-release is treated as an ordinary
string component, so ``1.2 < 1.2-mysuffix``.
Finally, there are a few special string components that are considered The order on versions is defined as follows. A version string is split
"infinity versions". They include ``develop``, ``main``, ``master``, into a list of components based on delimiters such as ``.``, ``-`` etc.
``head``, ``trunk``, and ``stable``. For example: ``1.2 < develop``. Lists are then ordered lexicographically, where components are ordered
These are useful for specifying the most recent development version of as follows:
a package (often a moving target like a git branch), without assigning
a specific version number. Infinity versions are not automatically used when determining the latest version of a package unless explicitly required by another package or user.
More formally, the order on versions is defined as follows. A version
string is split into a list of components based on delimiters such as
``.`` and ``-`` and string boundaries. The components are split into
the **release** and a possible **pre-release** (if the last component
is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``).
The release components are ordered lexicographically, with comparsion
between different types of components as follows:
#. The following special strings are considered larger than any other #. The following special strings are considered larger than any other
numeric or non-numeric version component, and satisfy the following numeric or non-numeric version component, and satisfy the following
@@ -949,9 +925,6 @@ between different types of components as follows:
#. All other non-numeric components are less than numeric components, #. All other non-numeric components are less than numeric components,
and are ordered alphabetically. and are ordered alphabetically.
Finally, if the release components are equal, the pre-release components
are used to break the tie, in the obvious way.
The logic behind this sort order is two-fold: The logic behind this sort order is two-fold:
#. Non-numeric versions are usually used for special cases while #. Non-numeric versions are usually used for special cases while
@@ -6435,12 +6408,9 @@ the ``paths`` attribute:
echo "Target: x86_64-pc-linux-gnu" echo "Target: x86_64-pc-linux-gnu"
echo "Thread model: posix" echo "Thread model: posix"
echo "InstalledDir: /usr/bin" echo "InstalledDir: /usr/bin"
platforms: ["linux", "darwin"]
results: results:
- spec: 'llvm@3.9.1 +clang~lld~lldb' - spec: 'llvm@3.9.1 +clang~lld~lldb'
If the ``platforms`` attribute is present, tests are run only if the current host
matches one of the listed platforms.
Each test is performed by first creating a temporary directory structure as Each test is performed by first creating a temporary directory structure as
specified in the corresponding ``layout`` and by then running specified in the corresponding ``layout`` and by then running
package detection and checking that the outcome matches the expected package detection and checking that the outcome matches the expected
@@ -6474,10 +6444,6 @@ package detection and checking that the outcome matches the expected
- A spec that is expected from detection - A spec that is expected from detection
- Any valid spec - Any valid spec
- Yes - Yes
* - ``results:[0]:extra_attributes``
- Extra attributes expected on the associated Spec
- Nested dictionary with string as keys, and regular expressions as leaf values
- No
""""""""""""""""""""""""""""""" """""""""""""""""""""""""""""""
Reuse tests from other packages Reuse tests from other packages

View File

@@ -2,12 +2,12 @@ sphinx==7.2.6
sphinxcontrib-programoutput==0.17 sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0 sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0 sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.1 python-levenshtein==0.25.0
docutils==0.20.1 docutils==0.20.1
pygments==2.17.2 pygments==2.17.2
urllib3==2.2.1 urllib3==2.2.1
pytest==8.1.1 pytest==8.0.2
isort==5.13.2 isort==5.13.2
black==24.4.0 black==24.2.0
flake8==7.0.0 flake8==7.0.0
mypy==1.9.0 mypy==1.8.0

255
lib/spack/env/cc vendored
View File

@@ -47,8 +47,7 @@ SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG SPACK_LINKER_ARG
SPACK_SHORT_SPEC SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS SPACK_SYSTEM_DIRS"
SPACK_MANAGED_DIRS"
# Optional parameters that aren't required to be set # Optional parameters that aren't required to be set
@@ -174,6 +173,22 @@ preextend() {
unset IFS unset IFS
} }
# system_dir PATH
# test whether a path is a system directory
system_dir() {
IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
path="$1"
for sd in $SPACK_SYSTEM_DIRS; do
if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
# success if path starts with a system prefix
unset IFS
return 0
fi
done
unset IFS
return 1 # fail if path starts no system prefix
}
# Fail with a clear message if the input contains any bell characters. # Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
die "Compiler command line contains our separator ('${lsep}'). Cannot parse." die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
@@ -186,18 +201,6 @@ for param in $params; do
fi fi
done done
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
}
"
# Check if optional parameters are defined # Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them # If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
@@ -245,7 +248,7 @@ case "$command" in
lang_flags=C lang_flags=C
debug_flags="-g" debug_flags="-g"
;; ;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC) c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX" command="$SPACK_CXX"
language="C++" language="C++"
comp="CXX" comp="CXX"
@@ -417,12 +420,11 @@ input_command="$*"
parse_Wl() { parse_Wl() {
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then if [ "$wl_expect_rpath" = yes ]; then
path_order "$1" if system_dir "$1"; then
case $? in append return_system_rpath_dirs_list "$1"
0) append return_spack_store_rpath_dirs_list "$1" ;; else
1) append return_rpath_dirs_list "$1" ;; append return_rpath_dirs_list "$1"
2) append return_system_rpath_dirs_list "$1" ;; fi
esac
wl_expect_rpath=no wl_expect_rpath=no
else else
case "$1" in case "$1" in
@@ -430,25 +432,21 @@ parse_Wl() {
arg="${1#-rpath=}" arg="${1#-rpath=}"
if [ -z "$arg" ]; then if [ -z "$arg" ]; then
shift; continue shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;; ;;
--rpath=*) --rpath=*)
arg="${1#--rpath=}" arg="${1#--rpath=}"
if [ -z "$arg" ]; then if [ -z "$arg" ]; then
shift; continue shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;; ;;
-rpath|--rpath) -rpath|--rpath)
wl_expect_rpath=yes wl_expect_rpath=yes
@@ -475,20 +473,12 @@ categorize_arguments() {
return_other_args_list="" return_other_args_list=""
return_isystem_was_used="" return_isystem_was_used=""
return_isystem_spack_store_include_dirs_list=""
return_isystem_system_include_dirs_list="" return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list="" return_isystem_include_dirs_list=""
return_spack_store_include_dirs_list=""
return_system_include_dirs_list="" return_system_include_dirs_list=""
return_include_dirs_list="" return_include_dirs_list=""
return_spack_store_lib_dirs_list=""
return_system_lib_dirs_list="" return_system_lib_dirs_list=""
return_lib_dirs_list="" return_lib_dirs_list=""
return_spack_store_rpath_dirs_list=""
return_system_rpath_dirs_list="" return_system_rpath_dirs_list=""
return_rpath_dirs_list="" return_rpath_dirs_list=""
@@ -536,7 +526,7 @@ categorize_arguments() {
continue continue
fi fi
replaced="$after$stripped" replaced="$after$stripped"
# it matched, remove it # it matched, remove it
shift shift
@@ -556,32 +546,29 @@ categorize_arguments() {
arg="${1#-isystem}" arg="${1#-isystem}"
return_isystem_was_used=true return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg" if system_dir "$arg"; then
case $? in append return_isystem_system_include_dirs_list "$arg"
0) append return_isystem_spack_store_include_dirs_list "$arg" ;; else
1) append return_isystem_include_dirs_list "$arg" ;; append return_isystem_include_dirs_list "$arg"
2) append return_isystem_system_include_dirs_list "$arg" ;; fi
esac
;; ;;
-I*) -I*)
arg="${1#-I}" arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg" if system_dir "$arg"; then
case $? in append return_system_include_dirs_list "$arg"
0) append return_spack_store_include_dirs_list "$arg" ;; else
1) append return_include_dirs_list "$arg" ;; append return_include_dirs_list "$arg"
2) append return_system_include_dirs_list "$arg" ;; fi
esac
;; ;;
-L*) -L*)
arg="${1#-L}" arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg" if system_dir "$arg"; then
case $? in append return_system_lib_dirs_list "$arg"
0) append return_spack_store_lib_dirs_list "$arg" ;; else
1) append return_lib_dirs_list "$arg" ;; append return_lib_dirs_list "$arg"
2) append return_system_lib_dirs_list "$arg" ;; fi
esac
;; ;;
-l*) -l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69, # -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -614,32 +601,29 @@ categorize_arguments() {
break break
elif [ "$xlinker_expect_rpath" = yes ]; then elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path> # Register the path of -Xlinker -rpath <other args> -Xlinker <path>
path_order "$1" if system_dir "$1"; then
case $? in append return_system_rpath_dirs_list "$1"
0) append return_spack_store_rpath_dirs_list "$1" ;; else
1) append return_rpath_dirs_list "$1" ;; append return_rpath_dirs_list "$1"
2) append return_system_rpath_dirs_list "$1" ;; fi
esac
xlinker_expect_rpath=no xlinker_expect_rpath=no
else else
case "$1" in case "$1" in
-rpath=*) -rpath=*)
arg="${1#-rpath=}" arg="${1#-rpath=}"
path_order "$arg" if system_dir "$arg"; then
case $? in append return_system_rpath_dirs_list "$arg"
0) append return_spack_store_rpath_dirs_list "$arg" ;; else
1) append return_rpath_dirs_list "$arg" ;; append return_rpath_dirs_list "$arg"
2) append return_system_rpath_dirs_list "$arg" ;; fi
esac
;; ;;
--rpath=*) --rpath=*)
arg="${1#--rpath=}" arg="${1#--rpath=}"
path_order "$arg" if system_dir "$arg"; then
case $? in append return_system_rpath_dirs_list "$arg"
0) append return_spack_store_rpath_dirs_list "$arg" ;; else
1) append return_rpath_dirs_list "$arg" ;; append return_rpath_dirs_list "$arg"
2) append return_system_rpath_dirs_list "$arg" ;; fi
esac
;; ;;
-rpath|--rpath) -rpath|--rpath)
xlinker_expect_rpath=yes xlinker_expect_rpath=yes
@@ -677,25 +661,16 @@ categorize_arguments() {
} }
categorize_arguments "$@" categorize_arguments "$@"
include_dirs_list="$return_include_dirs_list"
spack_store_include_dirs_list="$return_spack_store_include_dirs_list" lib_dirs_list="$return_lib_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list" rpath_dirs_list="$return_rpath_dirs_list"
include_dirs_list="$return_include_dirs_list" system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list" system_rpath_dirs_list="$return_system_rpath_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list" isystem_was_used="$return_isystem_was_used"
lib_dirs_list="$return_lib_dirs_list" isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list" other_args_list="$return_other_args_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
# #
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and # Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -755,7 +730,7 @@ esac
# Linker flags # Linker flags
case "$mode" in case "$mode" in
ccld) ld|ccld)
extend spack_flags_list SPACK_LDFLAGS extend spack_flags_list SPACK_LDFLAGS
;; ;;
esac esac
@@ -763,25 +738,16 @@ esac
IFS="$lsep" IFS="$lsep"
categorize_arguments $spack_flags_list categorize_arguments $spack_flags_list
unset IFS unset IFS
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list" spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list" spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list" spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list" spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list" spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_include_dirs_list="$return_include_dirs_list" spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list" spack_flags_other_args_list="$return_other_args_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag # On macOS insert headerpad_max_install_names linker flag
@@ -801,13 +767,11 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
# Append RPATH directories. Note that in the case of the # Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies # top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed. # it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS extend rpath_dirs_list SPACK_RPATH_DIRS
fi fi
fi fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS extend lib_dirs_list SPACK_LINK_DIRS
fi fi
@@ -834,50 +798,38 @@ case "$mode" in
;; ;;
esac esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
# #
# Finally, reassemble the command line. # Finally, reassemble the command line.
# #
args_list="$flags_list" args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system) # Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two # NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_spack_store_include_dirs_list -I extend args_list spack_flags_include_dirs_list "-I"
extend args_list spack_store_include_dirs_list -I extend args_list include_dirs_list "-I"
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}" extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}" extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
fi
;;
esac
extend args_list spack_flags_system_include_dirs_list -I extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}" extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}" extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system) # Library search paths
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
extend args_list spack_flags_lib_dirs_list "-L" extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L" extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L" extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L" extend args_list system_lib_dirs_list "-L"
@@ -887,12 +839,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add" append args_list "$linker_arg$dtags_to_add"
fi fi
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
extend args_list spack_store_rpath_dirs_list "$rpath"
extend args_list spack_flags_rpath_dirs_list "$rpath" extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath" extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath" extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath" extend args_list system_rpath_dirs_list "$rpath"
;; ;;
@@ -900,12 +848,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add" append args_list "$dtags_to_add"
fi fi
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}" extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}" extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}" extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}" extend args_list system_rpath_dirs_list "-rpath${lsep}"
;; ;;
@@ -969,3 +913,4 @@ fi
# Execute the full command, preserving spaces with IFS set # Execute the full command, preserving spaces with IFS set
# to the alarm bell separator. # to the alarm bell separator.
IFS="$lsep"; exec $full_command_list IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec * Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures * Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf) * Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
astunparse astunparse
---------------- ----------------

View File

@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]: Tag.attrib, merge_attrib]:
if hasattr(self, a): if hasattr(self, a):
if memo is not None: if memo is not None:
setattr(t, a, copy.deepcopy(getattr(self, a), memo)) setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
else: else:
setattr(t, a, getattr(self, a)) setattr(t, a, getattr(self, a))
# fmt: on # fmt: on

View File

@@ -1,3 +1,2 @@
"""Init file to avoid namespace packages""" """Init file to avoid namespace packages"""
__version__ = "0.2.2"
__version__ = "0.2.3"

View File

@@ -3,7 +3,6 @@
""" """
import sys import sys
from .cli import main from .cli import main
sys.exit(main()) sys.exit(main())

View File

@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
def cpu() -> int: def cpu() -> int:
"""Run the `archspec cpu` subcommand.""" """Run the `archspec cpu` subcommand."""
try: print(archspec.cpu.host())
print(archspec.cpu.host())
except FileNotFoundError as exc:
print(exc)
return 1
return 0 return 0

View File

@@ -5,14 +5,10 @@
"""The "cpu" package permits to query and compare different """The "cpu" package permits to query and compare different
CPU microarchitectures. CPU microarchitectures.
""" """
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host from .detect import host
from .microarchitecture import (
TARGETS,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
version_components,
)
__all__ = [ __all__ = [
"Microarchitecture", "Microarchitecture",

View File

@@ -4,17 +4,15 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures""" """Detection of CPU microarchitectures"""
import collections import collections
import functools
import os import os
import platform import platform
import re import re
import struct
import subprocess import subprocess
import warnings import warnings
from typing import Dict, List, Optional, Set, Tuple, Union
from ..vendor.cpuid.cpuid import CPUID from .microarchitecture import generic_microarchitecture, TARGETS
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture from .schema import TARGETS_JSON
from .schema import CPUID_JSON, TARGETS_JSON
#: Mapping from operating systems to chain of commands #: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu #: to obtain a dictionary of raw info on the current cpu
@@ -24,46 +22,43 @@
#: functions checking the compatibility of the host with a given target #: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {} COMPATIBILITY_CHECKS = {}
# Constants for commonly used architectures
X86_64 = "x86_64"
AARCH64 = "aarch64"
PPC64LE = "ppc64le"
PPC64 = "ppc64"
RISCV64 = "riscv64"
def info_dict(operating_system):
def detection(operating_system: str): """Decorator to mark functions that are meant to return raw info on
"""Decorator to mark functions that are meant to return partial information on the current cpu. the current cpu.
Args: Args:
operating_system: operating system where this function can be used. operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
""" """
def decorator(factory): def decorator(factory):
INFO_FACTORY[operating_system].append(factory) INFO_FACTORY[operating_system].append(factory)
return factory
@functools.wraps(factory)
def _impl():
info = factory()
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
return info
return _impl
return decorator return decorator
def partial_uarch( @info_dict(operating_system="Linux")
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0 def proc_cpuinfo():
) -> Microarchitecture: """Returns a raw info dictionary by parsing the first entry of
"""Construct a partial microarchitecture, from information gathered during system scan.""" ``/proc/cpuinfo``
return Microarchitecture( """
name=name, info = {}
parents=[],
vendor=vendor,
features=features or set(),
compilers={},
generation=generation,
)
@detection(operating_system="Linux")
def proc_cpuinfo() -> Microarchitecture:
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
data = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
for line in file: for line in file:
key, separator, value = line.partition(":") key, separator, value = line.partition(":")
@@ -75,96 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
# #
# we are on a blank line separating two cpus. Exit early as # we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo # we want to read just the first entry in /proc/cpuinfo
if separator != ":" and data: if separator != ":" and info:
break break
data[key.strip()] = value.strip() info[key.strip()] = value.strip()
return info
architecture = _machine()
if architecture == X86_64:
return partial_uarch(
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
)
if architecture == AARCH64:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
)
if architecture in (PPC64LE, PPC64):
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
return partial_uarch(generation=generation)
if architecture == RISCV64:
if data.get("uarch") == "sifive,u74-mc":
data["uarch"] = "u74mc"
return partial_uarch(name=data.get("uarch", RISCV64))
return generic_microarchitecture(architecture)
class CpuidInfoCollector:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
def _check_output(args, env): def _check_output(args, env):
@@ -173,25 +83,14 @@ def _check_output(args, env):
return str(output.decode("utf-8")) return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": "x86_64",
"ARM64": "aarch64",
}
def _machine(): def _machine():
"""Return the machine architecture we are on""" """ "Return the machine architecture we are on"""
operating_system = platform.system() operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us # If we are not on Darwin, trust what Python tells us
if operating_system not in ("Darwin", "Windows"): if operating_system != "Darwin":
return platform.machine() return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter # On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we # built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that. # need to fix that.
@@ -204,47 +103,54 @@ def _machine():
if "Apple" in output: if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return # Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter. # "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64 return "aarch64"
return X86_64 return "x86_64"
@detection(operating_system="Darwin") @info_dict(operating_system="Darwin")
def sysctl_info() -> Microarchitecture: def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl.""" """Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path() child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str: def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip() return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == X86_64: if _machine() == "x86_64":
features = ( flags = (
f'{sysctl("-n", "machdep.cpu.features").lower()} ' sysctl("-n", "machdep.cpu.features").lower()
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}' + " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
) )
features = set(features.split()) info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
# Flags detected on Darwin turned to their linux counterpart info = {
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items(): "vendor_id": "Apple",
if darwin_flag in features: "flags": [],
features.update(linux_flag.split()) "model": model,
"CPU implementer": "Apple",
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features) "model name": sysctl("-n", "machdep.cpu.brand_string"),
}
model = "unknown" return info
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
def _ensure_bin_usrbin_in_path(): def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there # Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items()) child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep) search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"): for additional_path in ("/sbin", "/usr/sbin"):
@@ -254,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
return child_environment return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str: def adjust_raw_flags(info):
"""Adjust the vendor field to make it human-readable""" """Adjust the flags detected on the system to homogenize
if "CPU implementer" not in data: slightly different representations.
return "generic" """
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from # Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources: # different sources:
@@ -267,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def # https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/ # https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"] arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"] arm_code = info["CPU implementer"]
return arm_vendors.get(arm_code, arm_code) if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
def _feature_set(data: Dict[str, str], key: str) -> Set[str]: def raw_info_dictionary():
return set(data.get(key, "").split()) """Returns a dictionary with information on the cpu of the current host.
This function calls all the viable factories one after the other until
def detected_info() -> Microarchitecture: there's one that is able to produce the requested information.
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
""" """
# pylint: disable=broad-except # pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]: for factory in INFO_FACTORY[platform.system()]:
try: try:
return factory() info = factory()
except Exception as exc: except Exception as exc:
warnings.warn(str(exc)) warnings.warn(str(exc))
return generic_microarchitecture(_machine()) if info:
adjust_raw_flags(info)
adjust_raw_vendor(info)
break
return info
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]: def compatible_microarchitectures(info):
"""Returns an unordered list of known micro-architectures that are compatible with the """Returns an unordered list of known micro-architectures that are
partial Microarchitecture passed as input. compatible with the info dictionary passed as argument.
Args:
info (dict): dictionary containing information on the host cpu
""" """
architecture_family = _machine() architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host # If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False) tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [ return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family) generic_microarchitecture(architecture_family)
@@ -306,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
def host(): def host():
"""Detects the host micro-architecture and returns it.""" """Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu # Retrieve a dictionary with raw information on the host's cpu
info = detected_info() info = raw_info_dictionary()
# Get a list of possible candidates for this micro-architecture # Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info) candidates = compatible_microarchitectures(info)
@@ -334,15 +258,16 @@ def sorting_fn(item):
return max(candidates, key=sorting_fn) return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]): def compatibility_check(architecture_family):
"""Decorator to register a function as a proper compatibility check. """Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument, A compatibility check function takes the raw info dictionary as a first
and an arbitrary target Microarchitecture as the second argument. It returns True if the argument and an arbitrary target as the second argument. It returns True
target is compatible with first argument, False otherwise. if the target is compatible with the info dictionary, False otherwise.
Args: Args:
architecture_family: architecture family for which this test can be used architecture_family (str or tuple): architecture family for which
this test can be used, e.g. x86_64 or ppc64le etc.
""" """
# Turn the argument into something iterable # Turn the argument into something iterable
if isinstance(architecture_family, str): if isinstance(architecture_family, str):
@@ -355,57 +280,86 @@ def decorator(func):
return decorator return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64)) @compatibility_check(architecture_family=("ppc64le", "ppc64"))
def compatibility_check_for_power(info, target): def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures.""" """Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
# We can use a target if it descends from our machine type and our # We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation. # generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[_machine()] arch_root = TARGETS[basename]
return ( return (
target == arch_root or arch_root in target.ancestors target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation ) and target.generation <= generation
@compatibility_check(architecture_family=X86_64) @compatibility_check(architecture_family="x86_64")
def compatibility_check_for_x86_64(info, target): def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures.""" """Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
# We can use a target if it descends from our machine type, is from our # We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features # vendor, and we have all of its features
arch_root = TARGETS[X86_64] arch_root = TARGETS[basename]
return ( return (
(target == arch_root or arch_root in target.ancestors) (target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic") and target.vendor in (vendor, "generic")
and target.features.issubset(info.features) and target.features.issubset(features)
) )
@compatibility_check(architecture_family=AARCH64) @compatibility_check(architecture_family="aarch64")
def compatibility_check_for_aarch64(info, target): def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures.""" """Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture # a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64: if target.vendor == "generic" and target.name != "aarch64":
return False return False
arch_root = TARGETS[AARCH64] arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in ( arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor, vendor,
"generic", "generic",
) )
# On macOS it seems impossible to get all the CPU features # On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model # with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin": if platform.system() == "Darwin":
model = TARGETS[info.name] model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors) return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features) return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family=RISCV64) @compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target): def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures.""" """Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64] basename = "riscv64"
uarch = info.get("uarch")
# sifive unmatched board
if uarch == "sifive,u74-mc":
uarch = "u74mc"
# catch-all for unknown uarchs
else:
uarch = "riscv64"
arch_root = TARGETS[basename]
return (target == arch_root or arch_root in target.ancestors) and ( return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic" target == uarch or target.vendor == "generic"
) )

View File

@@ -13,7 +13,6 @@
import archspec import archspec
import archspec.cpu.alias import archspec.cpu.alias
import archspec.cpu.schema import archspec.cpu.schema
from .alias import FEATURE_ALIASES from .alias import FEATURE_ALIASES
from .schema import LazyDictionary from .schema import LazyDictionary
@@ -48,7 +47,7 @@ class Microarchitecture:
which has "broadwell" as a parent, supports running binaries which has "broadwell" as a parent, supports running binaries
optimized for "broadwell". optimized for "broadwell".
vendor (str): vendor of the micro-architecture vendor (str): vendor of the micro-architecture
features (set of str): supported CPU flags. Note that the semantic features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will the flags supported by a given CPU while Arm processors will
@@ -181,28 +180,24 @@ def generic(self):
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"] generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
return max(generics, key=lambda x: len(x.ancestors)) return max(generics, key=lambda x: len(x.ancestors))
def to_dict(self): def to_dict(self, return_list_of_items=False):
"""Returns a dictionary representation of this object.""" """Returns a dictionary representation of this object.
return {
"name": str(self.name),
"vendor": str(self.vendor),
"features": sorted(str(x) for x in self.features),
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
}
@staticmethod Args:
def from_dict(data) -> "Microarchitecture": return_list_of_items (bool): if True returns an ordered list of
"""Construct a microarchitecture from a dictionary representation.""" items instead of the dictionary
return Microarchitecture( """
name=data["name"], list_of_items = [
parents=[TARGETS[x] for x in data["parents"]], ("name", str(self.name)),
vendor=data["vendor"], ("vendor", str(self.vendor)),
features=set(data["features"]), ("features", sorted(str(x) for x in self.features)),
compilers=data.get("compilers", {}), ("generation", self.generation),
generation=data.get("generation", 0), ("parents", [str(x) for x in self.parents]),
) ]
if return_list_of_items:
return list_of_items
return dict(list_of_items)
def optimization_flags(self, compiler, version): def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs """Returns a string containing the optimization flags that needs
@@ -276,7 +271,9 @@ def tuplify(ver):
flags = flags_fmt.format(**compiler_entry) flags = flags_fmt.format(**compiler_entry)
return flags return flags
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}" msg = (
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
)
if compiler_info: if compiler_info:
versions = [x["versions"] for x in compiler_info] versions = [x["versions"] for x in compiler_info]
msg += f' [supported compiler versions are {", ".join(versions)}]' msg += f' [supported compiler versions are {", ".join(versions)}]'
@@ -292,7 +289,9 @@ def generic_microarchitecture(name):
Args: Args:
name (str): name of the micro-architecture name (str): name of the micro-architecture
""" """
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={}) return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
)
def version_components(version): def version_components(version):
@@ -346,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
compilers = values.get("compilers", {}) compilers = values.get("compilers", {})
generation = values.get("generation", 0) generation = values.get("generation", 0)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation) targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {} known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"] data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]

View File

@@ -7,9 +7,7 @@
""" """
import collections.abc import collections.abc
import json import json
import os import os.path
import pathlib
from typing import Tuple
class LazyDictionary(collections.abc.MutableMapping): class LazyDictionary(collections.abc.MutableMapping):
@@ -48,65 +46,21 @@ def __len__(self):
return len(self.data) return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file def _load_json_file(json_file):
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR" json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
#: Environment variable that might point to a directory with extensions to JSON files def _factory():
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR" filename = os.path.join(json_dir, json_file)
with open(filename, "r", encoding="utf-8") as file:
return json.load(file)
return _factory
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
#: In memory representation of the data in microarchitectures.json, #: In memory representation of the data in microarchitectures.json,
#: loaded on first access #: loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True)) TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access #: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json")) SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))

View File

@@ -9,11 +9,11 @@ language specific APIs.
Currently the repository contains the following JSON files: Currently the repository contains the following JSON files:
```console ```console
cpu/ .
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64 ├── COPYRIGHT
── cpuid_schema.json # Schema for the file above ── cpu
├── microarchitectures.json # Contains information on CPU microarchitectures    ├── microarchitectures.json # Contains information on CPU microarchitectures
└── microarchitectures_schema.json # Schema for the file above    └── microarchitectures_schema.json # Schema for the file above
``` ```

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"vendor": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"highest_extension_support": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
},
"extension-flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
}
}
}

View File

@@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Anders Høst
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,76 +0,0 @@
cpuid.py
========
Now, this is silly!
Pure Python library for accessing information about x86 processors
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
instruction. Well, not exactly pure Python...
It works by allocating a small piece of virtual memory, copying
a raw x86 function to that memory, giving the memory execute
permissions and then calling the memory as a function. The injected
function executes the CPUID instruction and copies the result back
to a ctypes.Structure where is can be read by Python.
It should work fine on both 32 and 64 bit versions of Windows and Linux
running x86 processors. Apple OS X and other BSD systems should also work,
not tested though...
Why?
----
For poops and giggles. Plus, having access to a low-level feature
without having to compile a C wrapper is pretty neat.
Examples
--------
Getting info with eax=0:
import cpuid
q = cpuid.CPUID()
eax, ebx, ecx, edx = q(0)
Running the files:
$ python example.py
Vendor ID : GenuineIntel
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
Vector instructions supported:
SSE : Yes
SSE2 : Yes
SSE3 : Yes
SSSE3 : Yes
SSE4.1 : Yes
SSE4.2 : Yes
SSE4a : --
AVX : --
AVX2 : --
$ python cpuid.py
CPUID A B C D
00000000 0000000b 756e6547 6c65746e 49656e69
00000001 000106a5 00100800 009ce3bd bfebfbff
00000002 55035a01 00f0b2e4 00000000 09ca212c
00000003 00000000 00000000 00000000 00000000
00000004 00000000 00000000 00000000 00000000
00000005 00000040 00000040 00000003 00001120
00000006 00000003 00000002 00000001 00000000
00000007 00000000 00000000 00000000 00000000
00000008 00000000 00000000 00000000 00000000
00000009 00000000 00000000 00000000 00000000
0000000a 07300403 00000044 00000000 00000603
0000000b 00000000 00000000 00000095 00000000
80000000 80000008 00000000 00000000 00000000
80000001 00000000 00000000 00000001 28100800
80000002 65746e49 2952286c 6f655820 2952286e
80000003 55504320 20202020 20202020 57202020
80000004 30353533 20402020 37302e33 007a4847
80000005 00000000 00000000 00000000 00000000
80000006 00000000 00000000 01006040 00000000
80000007 00000000 00000000 00000000 00000100
80000008 00003024 00000000 00000000 00000000

View File

@@ -1,172 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import platform
import os
import ctypes
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
# Posix x86_64:
# Three first call registers : RDI, RSI, RDX
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
# Windows x86_64:
# Three first call registers : RCX, RDX, R8
# Volatile registers : RAX, RCX, RDX, R8-11
# cdecl 32 bit:
# Three first call registers : Stack (%esp)
# Volatile registers : EAX, ECX, EDX
_POSIX_64_OPC = [
0x53, # push %rbx
0x89, 0xf0, # mov %esi,%eax
0x89, 0xd1, # mov %edx,%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%rdi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
0x5b, # pop %rbx
0xc3 # retq
]
_WINDOWS_64_OPC = [
0x53, # push %rbx
0x89, 0xd0, # mov %edx,%eax
0x49, 0x89, 0xc9, # mov %rcx,%r9
0x44, 0x89, 0xc1, # mov %r8d,%ecx
0x0f, 0xa2, # cpuid
0x41, 0x89, 0x01, # mov %eax,(%r9)
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
0x5b, # pop %rbx
0xc3 # retq
]
_CDECL_32_OPC = [
0x53, # push %ebx
0x57, # push %edi
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%edi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
0x5f, # pop %edi
0x5b, # pop %ebx
0xc3 # ret
]
is_windows = os.name == "nt"
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
class CPUID_struct(ctypes.Structure):
_register_names = ("eax", "ebx", "ecx", "edx")
_fields_ = [(r, c_uint32) for r in _register_names]
def __getitem__(self, item):
if item not in self._register_names:
raise KeyError(item)
return getattr(self, item)
def __repr__(self):
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
class CPUID(object):
def __init__(self):
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
raise SystemError("Only available for x86")
if is_windows:
if is_64bit:
# VirtualAlloc seems to fail under some weird
# circumstances when ctypes.windll.kernel32 is
# used under 64 bit Python. CDLL fixes this.
self.win = ctypes.CDLL("kernel32.dll")
opc = _WINDOWS_64_OPC
else:
# Here ctypes.windll.kernel32 is needed to get the
# right DLL. Otherwise it will fail when running
# 32 bit Python on 64 bit Windows.
self.win = ctypes.windll.kernel32
opc = _CDECL_32_OPC
else:
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
size = len(opc)
code = (ctypes.c_ubyte * size)(*opc)
if is_windows:
self.win.VirtualAlloc.restype = c_void_p
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
if not self.addr:
raise MemoryError("Could not allocate RWX memory")
ctypes.memmove(self.addr, code, size)
else:
from mmap import (
mmap,
MAP_PRIVATE,
MAP_ANONYMOUS,
PROT_WRITE,
PROT_READ,
PROT_EXEC,
)
self.mm = mmap(
-1,
size,
flags=MAP_PRIVATE | MAP_ANONYMOUS,
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
)
self.mm.write(code)
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
self.func_ptr = func_type(self.addr)
def __call__(self, eax, ecx=0):
struct = self.registers_for(eax=eax, ecx=ecx)
return struct.eax, struct.ebx, struct.ecx, struct.edx
def registers_for(self, eax, ecx=0):
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
containing eax, ebx, ecx, and edx.
"""
struct = CPUID_struct()
self.func_ptr(struct, eax, ecx)
return struct
def __del__(self):
if is_windows:
self.win.VirtualFree.restype = c_long
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
self.win.VirtualFree(self.addr, 0, 0x8000)
else:
self.mm.close()
if __name__ == "__main__":
def valid_inputs():
cpuid = CPUID()
for eax in (0x0, 0x80000000):
highest, _, _, _ = cpuid(eax)
while eax <= highest:
regs = cpuid(eax)
yield (eax, regs)
eax += 1
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
for eax, regs in valid_inputs():
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import struct
import cpuid
def cpu_vendor(cpu):
_, b, c, d = cpu(0)
return struct.pack("III", b, d, c).decode("utf-8")
def cpu_name(cpu):
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
for i in range(2, 5)))
return name.split('\x00', 1)[0]
def is_set(cpu, leaf, subleaf, reg_idx, bit):
"""
@param {leaf} %eax
@param {sublead} %ecx, 0 in most cases
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
@param {bit} bit of reg selected by {reg_idx}, 0-based
"""
regs = cpu(leaf, subleaf)
if (1 << bit) & regs[reg_idx]:
return "Yes"
else:
return "--"
if __name__ == "__main__":
cpu = cpuid.CPUID()
print("Vendor ID : %s" % cpu_vendor(cpu))
print("CPU name : %s" % cpu_name(cpu))
print()
print("Vector instructions supported:")
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
# Intel RDT CMT/MBM
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))

View File

@@ -1,13 +0,0 @@
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/comments.py b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
index 1badeda585..892c868af3 100644
--- a/lib/spack/external/_vendoring/ruamel/yaml/comments.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
- setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ setattr(t, a, copy.deepcopy(getattr(self, a), memo))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -42,6 +42,11 @@ def convert_to_posix_path(path: str) -> str:
return format_os_path(path, mode=Path.unix) return format_os_path(path, mode=Path.unix)
def convert_to_windows_path(path: str) -> str:
"""Converts the input path to Windows style."""
return format_os_path(path, mode=Path.windows)
def convert_to_platform_path(path: str) -> str: def convert_to_platform_path(path: str) -> str:
"""Converts the input path to the current platform's native style.""" """Converts the input path to the current platform's native style."""
return format_os_path(path, mode=Path.platform_path) return format_os_path(path, mode=Path.platform_path)

View File

@@ -12,7 +12,7 @@
# Archive extensions allowed in Spack # Archive extensions allowed in Spack
PREFIX_EXTENSIONS = ("tar", "TAR") PREFIX_EXTENSIONS = ("tar", "TAR")
EXTENSIONS = ("gz", "bz2", "xz", "Z") EXTENSIONS = ("gz", "bz2", "xz", "Z")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz", "whl") NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz # Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = ( ALLOWED_ARCHIVE_TYPES = (
@@ -357,8 +357,10 @@ def strip_version_suffixes(path_or_url: str) -> str:
r"i[36]86", r"i[36]86",
r"ppc64(le)?", r"ppc64(le)?",
r"armv?(7l|6l|64)?", r"armv?(7l|6l|64)?",
# PyPI wheels # PyPI
r"-(?:py|cp)[23].*", r"[._-]py[23].*\.whl",
r"[._-]cp[23].*\.whl",
r"[._-]win.*\.exe",
] ]
for regex in suffix_regexes: for regex in suffix_regexes:
@@ -401,7 +403,7 @@ def expand_contracted_extension_in_path(
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]: def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
"""Returns compression extension for a compressed archive""" """Returns compression extension for a compressed archive"""
extension = expand_contracted_extension(extension) extension = expand_contracted_extension(extension)
for ext in EXTENSIONS: for ext in [*EXTENSIONS]:
if ext in extension: if ext in extension:
return ext return ext
return None return None

View File

@@ -198,32 +198,15 @@ def getuid():
return os.getuid() return os.getuid()
def _win_rename(src, dst):
# os.replace will still fail if on Windows (but not POSIX) if the dst
# is a symlink to a directory (all other cases have parity Windows <-> Posix)
if os.path.islink(dst) and os.path.isdir(os.path.realpath(dst)):
if os.path.samefile(src, dst):
# src and dst are the same
# do nothing and exit early
return
# If dst exists and is a symlink to a directory
# we need to remove dst and then perform rename/replace
# this is safe to do as there's no chance src == dst now
os.remove(dst)
os.replace(src, dst)
@system_path_filter @system_path_filter
def rename(src, dst): def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists # On Windows, os.rename will fail if the destination file already exists
# os.replace is the same as os.rename on POSIX and is MoveFileExW w/
# the MOVEFILE_REPLACE_EXISTING flag on Windows
# Windows invocation is abstracted behind additonal logic handling
# remaining cases of divergent behavior accross platforms
if sys.platform == "win32": if sys.platform == "win32":
_win_rename(src, dst) # Windows path existence checks will sometimes fail on junctions/links/symlinks
else: # so check for that case
os.replace(src, dst) if os.path.exists(dst) or islink(dst):
os.remove(dst)
os.rename(src, dst)
@system_path_filter @system_path_filter
@@ -254,6 +237,16 @@ def _get_mime_type():
return file_command("-b", "-h", "--mime-type") return file_command("-b", "-h", "--mime-type")
@memoized
def _get_mime_type_compressed():
"""Same as _get_mime_type but attempts to check for
compression first
"""
mime_uncompressed = _get_mime_type()
mime_uncompressed.add_default_arg("-Z")
return mime_uncompressed
def mime_type(filename): def mime_type(filename):
"""Returns the mime type and subtype of a file. """Returns the mime type and subtype of a file.
@@ -269,6 +262,21 @@ def mime_type(filename):
return type, subtype return type, subtype
def compressed_mime_type(filename):
"""Same as mime_type but checks for type that has been compressed
Args:
filename (str): file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS. #: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"] library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -300,6 +308,13 @@ def paths_containing_libs(paths, library_names):
return rpaths_to_include return rpaths_to_include
@system_path_filter
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file( def filter_file(
regex: str, regex: str,
repl: Union[str, Callable[[Match], str]], repl: Union[str, Callable[[Match], str]],
@@ -894,6 +909,17 @@ def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK) return os.path.isfile(path) and os.access(path, os.X_OK)
@system_path_filter
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable("file")
file.add_default_env("LC_ALL", "C")
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
return output.strip()
def has_shebang(path): def has_shebang(path):
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened.""" """Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
try: try:
@@ -1143,6 +1169,20 @@ def write_tmp_and_move(filename):
shutil.move(tmp, filename) shutil.move(tmp, filename)
@contextmanager
@system_path_filter
def open_if_filename(str_or_file, mode="r"):
"""Takes either a path or a file object, and opens it if it is a path.
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, str):
with open(str_or_file, mode) as f:
yield f
else:
yield str_or_file
@system_path_filter @system_path_filter
def touch(path): def touch(path):
"""Creates an empty file at the specified path.""" """Creates an empty file at the specified path."""
@@ -1234,12 +1274,10 @@ def windows_sfn(path: os.PathLike):
import ctypes import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True) k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Method with null values returns size of short path name
sz = k32.GetShortPathNameW(path, None, 0)
# stub Windows types TCHAR[LENGTH] # stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * sz TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr() ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ctypes.byref(ret_str), sz) k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value return ret_str.value
@@ -1257,6 +1295,19 @@ def temp_cwd():
shutil.rmtree(tmp_dir, **kwargs) shutil.rmtree(tmp_dir, **kwargs)
@contextmanager
@system_path_filter
def temp_rename(orig_path, temp_path):
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
if not same_path:
shutil.move(orig_path, temp_path)
try:
yield
finally:
if not same_path:
shutil.move(temp_path, orig_path)
@system_path_filter @system_path_filter
def can_access(file_name): def can_access(file_name):
"""True if we have read/write access to the file.""" """True if we have read/write access to the file."""

View File

@@ -98,6 +98,36 @@ def caller_locals():
del stack del stack
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
try:
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
return base_name
def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
def attr_setdefault(obj, name, value): def attr_setdefault(obj, name, value):
"""Like dict.setdefault, but for objects.""" """Like dict.setdefault, but for objects."""
if not hasattr(obj, name): if not hasattr(obj, name):
@@ -483,6 +513,42 @@ def copy(self):
return clone return clone
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
if elt[3] == function_name:
return True
return False
finally:
del stack
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
def match_predicate(*args): def match_predicate(*args):
"""Utility function for making string matching predicates. """Utility function for making string matching predicates.
@@ -698,6 +764,11 @@ def pretty_seconds(seconds):
return pretty_seconds_formatter(seconds)(seconds) return pretty_seconds_formatter(seconds)(seconds)
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
class ObjectWrapper: class ObjectWrapper:
"""Base class that wraps an object. Derived classes can add new behavior """Base class that wraps an object. Derived classes can add new behavior
while staying undercover. while staying undercover.
@@ -772,30 +843,6 @@ def __repr__(self):
return repr(self.instance) return repr(self.instance)
def get_entry_points(*, group: str):
"""Wrapper for ``importlib.metadata.entry_points``
Args:
group: entry points to select
Returns:
EntryPoints for ``group`` or empty list if unsupported
"""
try:
import importlib.metadata # type: ignore # novermin
except ImportError:
return []
try:
return importlib.metadata.entry_points(group=group)
except TypeError:
# Prior to Python 3.10, entry_points accepted no parameters and always
# returned a dictionary of entry points, keyed by group. See
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
return importlib.metadata.entry_points().get(group, [])
def load_module_from_file(module_name, module_path): def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file. """Loads a python module from the path of the corresponding file.
@@ -864,6 +911,25 @@ def uniq(sequence):
return uniq_list return uniq_list
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
class Devnull:
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10): def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements, """Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example:: replacing intervening elements with '...'. For example::

View File

@@ -815,6 +815,10 @@ def __init__(self, path):
super().__init__(msg) super().__init__(msg)
class LockLimitError(LockError):
"""Raised when exceed maximum attempts to acquire a lock."""
class LockTimeoutError(LockError): class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out.""" """Raised when an attempt to acquire a lock times out."""

View File

@@ -12,7 +12,7 @@
import traceback import traceback
from datetime import datetime from datetime import datetime
from sys import platform as _platform from sys import platform as _platform
from typing import Any, NoReturn from typing import NoReturn
if _platform != "win32": if _platform != "win32":
import fcntl import fcntl
@@ -44,6 +44,10 @@ def is_debug(level=1):
return _debug >= level return _debug >= level
def is_stacktrace():
return _stacktrace
def set_debug(level=0): def set_debug(level=0):
global _debug global _debug
assert level >= 0, "Debug level must be a positive value" assert level >= 0, "Debug level must be a positive value"
@@ -158,22 +162,21 @@ def get_timestamp(force=False):
return "" return ""
def msg(message: Any, *args: Any, newline: bool = True) -> None: def msg(message, *args, **kwargs):
if not msg_enabled(): if not msg_enabled():
return return
if isinstance(message, Exception): if isinstance(message, Exception):
message = f"{message.__class__.__name__}: {message}" message = "%s: %s" % (message.__class__.__name__, str(message))
else:
message = str(message)
newline = kwargs.get("newline", True)
st_text = "" st_text = ""
if _stacktrace: if _stacktrace:
st_text = process_stacktrace(2) st_text = process_stacktrace(2)
if newline:
nl = "\n" if newline else "" cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cwrite(f"@*b{{{st_text}==>}} {get_timestamp()}{cescape(_output_filter(message))}{nl}") else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
for arg in args: for arg in args:
print(indent + _output_filter(str(arg))) print(indent + _output_filter(str(arg)))
@@ -249,6 +252,37 @@ def die(message, *args, **kwargs) -> NoReturn:
sys.exit(1) sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
elif abort is not None:
prompt += " (%s to abort) " % abort
number = None
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
def get_yes_or_no(prompt, **kwargs): def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None) default_value = kwargs.get("default", None)

View File

@@ -237,6 +237,7 @@ def transpose():
def colified( def colified(
elts: List[Any], elts: List[Any],
cols: int = 0, cols: int = 0,
output: Optional[IO] = None,
indent: int = 0, indent: int = 0,
padding: int = 2, padding: int = 2,
tty: Optional[bool] = None, tty: Optional[bool] = None,

View File

@@ -62,7 +62,6 @@
import re import re
import sys import sys
from contextlib import contextmanager from contextlib import contextmanager
from typing import Optional
class ColorParseError(Exception): class ColorParseError(Exception):
@@ -96,7 +95,7 @@ def __init__(self, message):
} # white } # white
# Regex to be used for color formatting # Regex to be used for color formatting
COLOR_RE = re.compile(r"@(?:(@)|(\.)|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)") color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)"
# Mapping from color arguments to values for tty.set_color # Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False} color_when_values = {"always": True, "auto": None, "never": False}
@@ -204,64 +203,77 @@ def color_when(value):
set_color_when(old_value) set_color_when(old_value)
def _escape(s: str, color: bool, enclose: bool, zsh: bool) -> str: class match_to_ansi:
"""Returns a TTY escape sequence for a color""" def __init__(self, color=True, enclose=False, zsh=False):
if color: self.color = _color_when_value(color)
if zsh: self.enclose = enclose
result = rf"\e[0;{s}m" self.zsh = zsh
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.zsh:
result = rf"\e[0;{s}m"
else:
result = f"\033[{s}m"
if self.enclose:
result = rf"\[{result}\]"
return result
else: else:
result = f"\033[{s}m" return ""
if enclose: def __call__(self, match):
result = rf"\[{result}\]" """Convert a match object generated by ``color_re`` into an ansi
color code. This can be used as a handler in ``re.sub``.
"""
style, color, text = match.groups()
m = match.group(0)
return result if m == "@@":
else: return "@"
return "" elif m == "@.":
return self.escape(0)
elif m == "@":
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError(
"Invalid color specifier: '%s' in '%s'" % (color, match.string)
)
string += ";" + str(colors[color])
colored_text = ""
if text:
colored_text = text + self.escape(0)
return self.escape(string) + colored_text
def colorize( def colorize(string, **kwargs):
string: str, color: Optional[bool] = None, enclose: bool = False, zsh: bool = False
) -> str:
"""Replace all color expressions in a string with ANSI control codes. """Replace all color expressions in a string with ANSI control codes.
Args: Args:
string: The string to replace string (str): The string to replace
Returns: Returns:
The filtered string str: The filtered string
Keyword Arguments: Keyword Arguments:
color: If False, output will be plain text without control codes, for output to color (bool): If False, output will be plain text without control
non-console devices (default: automatically choose color or not) codes, for output to non-console devices.
enclose: If True, enclose ansi color sequences with enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width. square brackets to prevent misestimation of terminal width.
zsh: If True, use zsh ansi codes instead of bash ones (for variables like PS1) zsh (bool): If True, use zsh ansi codes instead of bash ones (for variables like PS1)
""" """
color = color if color is not None else get_color_when() color = _color_when_value(kwargs.get("color", get_color_when()))
zsh = kwargs.get("zsh", False)
def match_to_ansi(match): string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string, zsh)
"""Convert a match object generated by ``COLOR_RE`` into an ansi string = string.replace("}}", "}")
color code. This can be used as a handler in ``re.sub``. return string
"""
escaped_at, dot, style, color_code, text = match.groups()
if escaped_at:
return "@"
elif dot:
return _escape(0, color, enclose, zsh)
elif not (style or color_code):
raise ColorParseError(
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
)
ansi_code = _escape(f"{styles[style]};{colors.get(color_code, '')}", color, enclose, zsh)
if text:
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
else:
return ansi_code
return COLOR_RE.sub(match_to_ansi, string).replace("}}", "}")
def clen(string): def clen(string):
@@ -293,7 +305,7 @@ def cprint(string, stream=None, color=None):
cwrite(string + "\n", stream, color) cwrite(string + "\n", stream, color)
def cescape(string: str) -> str: def cescape(string):
"""Escapes special characters needed for color codes. """Escapes special characters needed for color codes.
Replaces the following symbols with their equivalent literal forms: Replaces the following symbols with their equivalent literal forms:
@@ -309,7 +321,10 @@ def cescape(string: str) -> str:
Returns: Returns:
(str): the string with color codes escaped (str): the string with color codes escaped
""" """
return string.replace("@", "@@").replace("}", "}}") string = str(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
return string
class ColorStream: class ColorStream:

View File

@@ -1111,76 +1111,4 @@ def _test_detection_by_executable(pkgs, error_cls):
details = [msg.format(s, idx) for s in sorted(not_expected)] details = [msg.format(s, idx) for s in sorted(not_expected)]
errors.append(error_cls(summary=summary, details=details)) errors.append(error_cls(summary=summary, details=details))
matched_detection = []
for candidate in expected_specs:
try:
idx = specs.index(candidate)
except (AttributeError, ValueError):
pass
matched_detection.append((candidate, specs[idx]))
def _compare_extra_attribute(_expected, _detected, *, _spec):
result = []
# Check items are of the same type
if not isinstance(_detected, type(_expected)):
_summary = f'{pkg_name}: error when trying to detect "{_expected}"'
_details = [f"{_detected} was detected instead"]
return [error_cls(summary=_summary, details=_details)]
# If they are string expected is a regex
if isinstance(_expected, str):
try:
_regex = re.compile(_expected)
except re.error:
_summary = f'{pkg_name}: illegal regex in "{_spec}" extra attributes'
_details = [f"{_expected} is not a valid regex"]
return [error_cls(summary=_summary, details=_details)]
if not _regex.match(_detected):
_summary = (
f'{pkg_name}: error when trying to match "{_expected}" '
f"in extra attributes"
)
_details = [f"{_detected} does not match the regex"]
return [error_cls(summary=_summary, details=_details)]
if isinstance(_expected, dict):
_not_detected = set(_expected.keys()) - set(_detected.keys())
if _not_detected:
_summary = f"{pkg_name}: cannot detect some attributes for spec {_spec}"
_details = [
f'"{_expected}" was expected',
f'"{_detected}" was detected',
] + [f'attribute "{s}" was not detected' for s in sorted(_not_detected)]
result.append(error_cls(summary=_summary, details=_details))
_common = set(_expected.keys()) & set(_detected.keys())
for _key in _common:
result.extend(
_compare_extra_attribute(_expected[_key], _detected[_key], _spec=_spec)
)
return result
for expected, detected in matched_detection:
# We might not want to test all attributes, so avoid not_expected
not_detected = set(expected.extra_attributes) - set(detected.extra_attributes)
if not_detected:
summary = f"{pkg_name}: cannot detect some attributes for spec {expected}"
details = [
f'"{s}" was not detected [test_id={idx}]' for s in sorted(not_detected)
]
errors.append(error_cls(summary=summary, details=details))
common = set(expected.extra_attributes) & set(detected.extra_attributes)
for key in common:
errors.extend(
_compare_extra_attribute(
expected.extra_attributes[key],
detected.extra_attributes[key],
_spec=expected,
)
)
return errors return errors

View File

@@ -17,6 +17,7 @@
import tarfile import tarfile
import tempfile import tempfile
import time import time
import traceback
import urllib.error import urllib.error
import urllib.parse import urllib.parse
import urllib.request import urllib.request
@@ -110,6 +111,10 @@ def __init__(self, errors):
super().__init__(self.message) super().__init__(self.message)
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class BinaryCacheIndex: class BinaryCacheIndex:
""" """
The BinaryCacheIndex tracks what specs are available on (usually remote) The BinaryCacheIndex tracks what specs are available on (usually remote)
@@ -536,6 +541,83 @@ def binary_index_location():
BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
def compute_hash(data): def compute_hash(data):
if isinstance(data, str): if isinstance(data, str):
data = data.encode("utf-8") data = data.encode("utf-8")
@@ -910,10 +992,15 @@ def url_read_method(url):
if entry.endswith("spec.json") or entry.endswith("spec.json.sig") if entry.endswith("spec.json") or entry.endswith("spec.json.sig")
] ]
read_fn = url_read_method read_fn = url_read_method
except KeyError as inst:
msg = "No packages at {0}: {1}".format(cache_prefix, inst)
tty.warn(msg)
except Exception as err: except Exception as err:
# If we got some kind of S3 (access denied or other connection error), the first non # If we got some kind of S3 (access denied or other connection
# boto-specific class in the exception is Exception. Just print a warning and return # error), the first non boto-specific class in the exception
tty.warn(f"Encountered problem listing packages at {cache_prefix}: {err}") # hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing packages at {0}: {1}".format(cache_prefix, err)
tty.warn(msg)
return file_list, read_fn return file_list, read_fn
@@ -960,10 +1047,11 @@ def generate_package_index(cache_prefix, concurrency=32):
""" """
try: try:
file_list, read_fn = _spec_files_from_cache(cache_prefix) file_list, read_fn = _spec_files_from_cache(cache_prefix)
except ListMirrorSpecsError as e: except ListMirrorSpecsError as err:
raise GenerateIndexError(f"Unable to generate package index: {e}") from e tty.error("Unable to generate package index, {0}".format(err))
return
tty.debug(f"Retrieving spec descriptor files from {cache_prefix} to build index") tty.debug("Retrieving spec descriptor files from {0} to build index".format(cache_prefix))
tmpdir = tempfile.mkdtemp() tmpdir = tempfile.mkdtemp()
@@ -973,22 +1061,27 @@ def generate_package_index(cache_prefix, concurrency=32):
try: try:
_read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency) _read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
except Exception as e: except Exception as err:
raise GenerateIndexError( msg = "Encountered problem pushing package index to {0}: {1}".format(cache_prefix, err)
f"Encountered problem pushing package index to {cache_prefix}: {e}" tty.warn(msg)
) from e tty.debug("\n" + traceback.format_exc())
finally: finally:
shutil.rmtree(tmpdir, ignore_errors=True) shutil.rmtree(tmpdir)
def generate_key_index(key_prefix, tmpdir=None): def generate_key_index(key_prefix, tmpdir=None):
"""Create the key index page. """Create the key index page.
Creates (or replaces) the "index.json" page at the location given in key_prefix. This page Creates (or replaces) the "index.json" page at the location given in
contains an entry for each key (.pub) under key_prefix. key_prefix. This page contains an entry for each key (.pub) under
key_prefix.
""" """
tty.debug(f"Retrieving key.pub files from {url_util.format(key_prefix)} to build key index") tty.debug(
" ".join(
("Retrieving key.pub files from", url_util.format(key_prefix), "to build key index")
)
)
try: try:
fingerprints = ( fingerprints = (
@@ -996,8 +1089,17 @@ def generate_key_index(key_prefix, tmpdir=None):
for entry in web_util.list_url(key_prefix, recursive=False) for entry in web_util.list_url(key_prefix, recursive=False)
if entry.endswith(".pub") if entry.endswith(".pub")
) )
except Exception as e: except KeyError as inst:
raise CannotListKeys(f"Encountered problem listing keys at {key_prefix}: {e}") from e msg = "No keys at {0}: {1}".format(key_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing keys at {0}: {1}".format(key_prefix, err)
tty.warn(msg)
return
remove_tmpdir = False remove_tmpdir = False
@@ -1022,13 +1124,12 @@ def generate_key_index(key_prefix, tmpdir=None):
keep_original=False, keep_original=False,
extra_args={"ContentType": "application/json"}, extra_args={"ContentType": "application/json"},
) )
except Exception as e: except Exception as err:
raise GenerateIndexError( msg = "Encountered problem pushing key index to {0}: {1}".format(key_prefix, err)
f"Encountered problem pushing key index to {key_prefix}: {e}" tty.warn(msg)
) from e
finally: finally:
if remove_tmpdir: if remove_tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True) shutil.rmtree(tmpdir)
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None: def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
@@ -1099,8 +1200,7 @@ def push_or_raise(spec: Spec, out_url: str, options: PushOptions):
used at the mirror (following <tarball_directory_name>). used at the mirror (following <tarball_directory_name>).
This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or
spec.json file already exist in the buildcache. It raises :py:class:`PushToBuildCacheError` spec.json file already exist in the buildcache.
when the tarball or spec.json file cannot be pushed to the buildcache.
""" """
if not spec.concrete: if not spec.concrete:
raise ValueError("spec must be concrete to build tarball") raise ValueError("spec must be concrete to build tarball")
@@ -1178,18 +1278,13 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
key = select_signing_key(options.key) key = select_signing_key(options.key)
sign_specfile(key, options.force, specfile_path) sign_specfile(key, options.force, specfile_path)
try: # push tarball and signed spec json to remote mirror
# push tarball and signed spec json to remote mirror web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url(
web_util.push_to_url( signed_specfile_path if not options.unsigned else specfile_path,
signed_specfile_path if not options.unsigned else specfile_path, remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
remote_signed_specfile_path if not options.unsigned else remote_specfile_path, keep_original=False,
keep_original=False, )
)
except Exception as e:
raise PushToBuildCacheError(
f"Encountered problem pushing binary {remote_spackfile_path}: {e}"
) from e
# push the key to the build cache's _pgp directory so it can be # push the key to the build cache's _pgp directory so it can be
# imported # imported
@@ -1201,6 +1296,8 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
if options.regenerate_index: if options.regenerate_index:
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir))) generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
return None
class NotInstalledError(spack.error.SpackError): class NotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged.""" """Raised when a spec is not installed but picked to be packaged."""
@@ -1255,6 +1352,28 @@ def specs_to_be_packaged(
return [s for s in itertools.chain(roots, deps) if not s.external] return [s for s in itertools.chain(roots, deps) if not s.external]
def push(spec: Spec, mirror_url: str, options: PushOptions):
"""Create and push binary package for a single spec to the specified
mirror url.
Args:
spec: Spec to package and push
mirror_url: Desired destination url for binary package
options:
Returns:
True if package was pushed, False otherwise.
"""
try:
push_or_raise(spec, mirror_url, options)
except NoOverwriteException as e:
warnings.warn(str(e))
return False
return True
def try_verify(specfile_path): def try_verify(specfile_path):
"""Utility function to attempt to verify a local file. Assumes the """Utility function to attempt to verify a local file. Assumes the
file is a clearsigned signature file. file is a clearsigned signature file.
@@ -2587,96 +2706,3 @@ def conditional_fetch(self) -> FetchIndexResult:
raise FetchIndexError(f"Remote index {url_manifest} is invalid") raise FetchIndexError(f"Remote index {url_manifest} is invalid")
return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False) return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False)
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class GenerateIndexError(spack.error.SpackError):
"""Raised when unable to generate key or package index for mirror"""
class CannotListKeys(GenerateIndexError):
"""Raised when unable to list keys when generating key index"""
class PushToBuildCacheError(spack.error.SpackError):
"""Raised when unable to push objects to binary mirror"""

View File

@@ -213,6 +213,9 @@ def _root_spec(spec_str: str) -> str:
platform = str(spack.platforms.host()) platform = str(spack.platforms.host())
if platform == "darwin": if platform == "darwin":
spec_str += " %apple-clang" spec_str += " %apple-clang"
elif platform == "windows":
# TODO (johnwparent): Remove version constraint when clingo patch is up
spec_str += " %msvc@:19.37"
elif platform == "linux": elif platform == "linux":
spec_str += " %gcc" spec_str += " %gcc"
elif platform == "freebsd": elif platform == "freebsd":

View File

@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
mixed_toolchain=sys.platform == "darwin" mixed_toolchain=sys.platform == "darwin"
) )
if new_compilers: if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers) spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
@contextlib.contextmanager @contextlib.contextmanager

View File

@@ -173,14 +173,35 @@ def _read_metadata(self, package_name: str) -> Any:
return data return data
def _install_by_hash( def _install_by_hash(
self, pkg_hash: str, pkg_sha256: str, bincache_platform: spack.platforms.Platform self,
pkg_hash: str,
pkg_sha256: str,
index: List[spack.spec.Spec],
bincache_platform: spack.platforms.Platform,
) -> None: ) -> None:
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
# Reconstruct the compiler that we need to use for bootstrapping
compiler_entry = {
"modules": [],
"operating_system": str(index_spec.os),
"paths": {
"cc": "/dev/null",
"cxx": "/dev/null",
"f77": "/dev/null",
"fc": "/dev/null",
},
"spec": str(index_spec.compiler),
"target": str(index_spec.target.family),
}
with spack.platforms.use_platform(bincache_platform): with spack.platforms.use_platform(bincache_platform):
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True) with spack.config.override("compilers", [{"compiler": compiler_entry}]):
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query): spec_str = "/" + pkg_hash
spack.binary_distribution.install_root_node( query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
match, unsigned=True, force=True, sha256=pkg_sha256 matches = spack.store.find([spec_str], multiple=False, query_fn=query)
) for match in matches:
spack.binary_distribution.install_root_node(
match, unsigned=True, force=True, sha256=pkg_sha256
)
def _install_and_test( def _install_and_test(
self, self,
@@ -211,7 +232,7 @@ def _install_and_test(
continue continue
for _, pkg_hash, pkg_sha256 in item["binaries"]: for _, pkg_hash, pkg_sha256 in item["binaries"]:
self._install_by_hash(pkg_hash, pkg_sha256, bincache_platform) self._install_by_hash(pkg_hash, pkg_sha256, index, bincache_platform)
info: ConfigDictionary = {} info: ConfigDictionary = {}
if test_fn(query_spec=abstract_spec, query_info=info): if test_fn(query_spec=abstract_spec, query_info=info):

View File

@@ -43,7 +43,7 @@
from collections import defaultdict from collections import defaultdict
from enum import Flag, auto from enum import Flag, auto
from itertools import chain from itertools import chain
from typing import List, Set, Tuple from typing import List, Tuple
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.string import plural from llnl.string import plural
@@ -57,10 +57,8 @@
import spack.build_systems.meson import spack.build_systems.meson
import spack.build_systems.python import spack.build_systems.python
import spack.builder import spack.builder
import spack.compilers
import spack.config import spack.config
import spack.deptypes as dt import spack.deptypes as dt
import spack.error
import spack.main import spack.main
import spack.package_base import spack.package_base
import spack.paths import spack.paths
@@ -68,7 +66,6 @@
import spack.repo import spack.repo
import spack.schema.environment import spack.schema.environment
import spack.spec import spack.spec
import spack.stage
import spack.store import spack.store
import spack.subprocess_context import spack.subprocess_context
import spack.user_environment import spack.user_environment
@@ -81,7 +78,7 @@
from spack.installer import InstallError from spack.installer import InstallError
from spack.util.cpus import determine_number_of_jobs from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import ( from spack.util.environment import (
SYSTEM_DIR_CASE_ENTRY, SYSTEM_DIRS,
EnvironmentModifications, EnvironmentModifications,
env_flag, env_flag,
filter_system_paths, filter_system_paths,
@@ -104,13 +101,9 @@
# Spack's compiler wrappers. # Spack's compiler wrappers.
# #
SPACK_ENV_PATH = "SPACK_ENV_PATH" SPACK_ENV_PATH = "SPACK_ENV_PATH"
SPACK_MANAGED_DIRS = "SPACK_MANAGED_DIRS"
SPACK_INCLUDE_DIRS = "SPACK_INCLUDE_DIRS" SPACK_INCLUDE_DIRS = "SPACK_INCLUDE_DIRS"
SPACK_LINK_DIRS = "SPACK_LINK_DIRS" SPACK_LINK_DIRS = "SPACK_LINK_DIRS"
SPACK_RPATH_DIRS = "SPACK_RPATH_DIRS" SPACK_RPATH_DIRS = "SPACK_RPATH_DIRS"
SPACK_STORE_INCLUDE_DIRS = "SPACK_STORE_INCLUDE_DIRS"
SPACK_STORE_LINK_DIRS = "SPACK_STORE_LINK_DIRS"
SPACK_STORE_RPATH_DIRS = "SPACK_STORE_RPATH_DIRS"
SPACK_RPATH_DEPS = "SPACK_RPATH_DEPS" SPACK_RPATH_DEPS = "SPACK_RPATH_DEPS"
SPACK_LINK_DEPS = "SPACK_LINK_DEPS" SPACK_LINK_DEPS = "SPACK_LINK_DEPS"
SPACK_PREFIX = "SPACK_PREFIX" SPACK_PREFIX = "SPACK_PREFIX"
@@ -423,7 +416,7 @@ def set_compiler_environment_variables(pkg, env):
env.set("SPACK_COMPILER_SPEC", str(spec.compiler)) env.set("SPACK_COMPILER_SPEC", str(spec.compiler))
env.set("SPACK_SYSTEM_DIRS", SYSTEM_DIR_CASE_ENTRY) env.set("SPACK_SYSTEM_DIRS", ":".join(SYSTEM_DIRS))
compiler.setup_custom_environment(pkg, env) compiler.setup_custom_environment(pkg, env)
@@ -551,26 +544,9 @@ def update_compiler_args_for_dep(dep):
include_dirs = list(dedupe(filter_system_paths(include_dirs))) include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs))) rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
# Spack managed directories include the stage, store and upstream stores. We extend this with env.set(SPACK_LINK_DIRS, ":".join(link_dirs))
# their real paths to make it more robust (e.g. /tmp vs /private/tmp on macOS). env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs))
spack_managed_dirs: Set[str] = { env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
spack.stage.get_stage_root(),
spack.store.STORE.db.root,
*(db.root for db in spack.store.STORE.db.upstream_dbs),
}
spack_managed_dirs.update([os.path.realpath(p) for p in spack_managed_dirs])
env.set(SPACK_MANAGED_DIRS, "|".join(f'"{p}/"*' for p in sorted(spack_managed_dirs)))
is_spack_managed = lambda p: any(p.startswith(store) for store in spack_managed_dirs)
link_dirs_spack, link_dirs_system = stable_partition(link_dirs, is_spack_managed)
include_dirs_spack, include_dirs_system = stable_partition(include_dirs, is_spack_managed)
rpath_dirs_spack, rpath_dirs_system = stable_partition(rpath_dirs, is_spack_managed)
env.set(SPACK_LINK_DIRS, ":".join(link_dirs_system))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs_system))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs_system))
env.set(SPACK_STORE_LINK_DIRS, ":".join(link_dirs_spack))
env.set(SPACK_STORE_INCLUDE_DIRS, ":".join(include_dirs_spack))
env.set(SPACK_STORE_RPATH_DIRS, ":".join(rpath_dirs_spack))
def set_package_py_globals(pkg, context: Context = Context.BUILD): def set_package_py_globals(pkg, context: Context = Context.BUILD):
@@ -607,22 +583,10 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
# Put spack compiler paths in module scope. (Some packages use it # Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build) # in setup_run_environment etc, so don't put it context == build)
link_dir = spack.paths.build_env_path link_dir = spack.paths.build_env_path
pkg_compiler = None module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
try: module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
pkg_compiler = pkg.compiler module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
except spack.compilers.NoCompilerForSpecError as e: module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
tty.debug(f"cannot set 'spack_cc': {str(e)}")
if pkg_compiler is not None:
module.spack_cc = os.path.join(link_dir, pkg_compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg_compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg_compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg_compiler.link_paths["fc"])
else:
module.spack_cc = None
module.spack_cxx = None
module.spack_f77 = None
module.spack_fc = None
# Useful directories within the prefix are encapsulated in # Useful directories within the prefix are encapsulated in
# a Prefix object. # a Prefix object.
@@ -825,7 +789,7 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
for mod in ["cray-mpich", "cray-libsci"]: for mod in ["cray-mpich", "cray-libsci"]:
module("unload", mod) module("unload", mod)
if target and target.module_name: if target.module_name:
load_module(target.module_name) load_module(target.module_name)
load_external_modules(pkg) load_external_modules(pkg)

View File

@@ -434,6 +434,11 @@ def _do_patch_libtool(self):
r"crtendS\.o", r"crtendS\.o",
]: ]:
x.filter(regex=(rehead + o), repl="") x.filter(regex=(rehead + o), repl="")
elif self.pkg.compiler.name == "dpcpp":
# Hack to filter out spurious predep_objects when building with Intel dpcpp
# (see https://github.com/spack/spack/issues/32863):
x.filter(regex=r"^(predep_objects=.*)/tmp/conftest-[0-9A-Fa-f]+\.o", repl=r"\1")
x.filter(regex=r"^(predep_objects=.*)/tmp/a-[0-9A-Fa-f]+\.o", repl=r"\1")
elif self.pkg.compiler.name == "nag": elif self.pkg.compiler.name == "nag":
for tag in ["fc", "f77"]: for tag in ["fc", "f77"]:
marker = markers[tag] marker = markers[tag]
@@ -536,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
if os.path.exists(self.configure_abs_path): if os.path.exists(self.configure_abs_path):
return return
# Else try to regenerate it, which requires a few build dependencies # Else try to regenerate it, which reuquires a few build dependencies
ensure_build_dependencies_or_raise( ensure_build_dependencies_or_raise(
spec=spec, spec=spec,
dependencies=["autoconf", "automake", "libtool"], dependencies=["autoconf", "automake", "libtool"],

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections.abc import collections.abc
import os import os
import re
from typing import Tuple from typing import Tuple
import llnl.util.filesystem as fs import llnl.util.filesystem as fs
@@ -16,12 +15,6 @@
from .cmake import CMakeBuilder, CMakePackage from .cmake import CMakeBuilder, CMakePackage
def spec_uses_toolchain(spec):
gcc_toolchain_regex = re.compile(".*gcc-toolchain.*")
using_toolchain = list(filter(gcc_toolchain_regex.match, spec.compiler_flags["cxxflags"]))
return using_toolchain
def cmake_cache_path(name, value, comment="", force=False): def cmake_cache_path(name, value, comment="", force=False):
"""Generate a string for a cmake cache variable""" """Generate a string for a cmake cache variable"""
force_str = " FORCE" if force else "" force_str = " FORCE" if force else ""
@@ -220,7 +213,7 @@ def initconfig_mpi_entries(self):
else: else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE # starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC # vs the older versions which expect MPIEXEC
if spec["cmake"].satisfies("@3.10:"): if self.pkg.spec["cmake"].satisfies("@3.10:"):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec)) entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
else: else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec)) entries.append(cmake_cache_path("MPIEXEC", mpiexec))
@@ -255,17 +248,12 @@ def initconfig_hardware_entries(self):
# Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages # Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir)) entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
# CUDA_FLAGS archs = spec.variants["cuda_arch"].value
cuda_flags = [] if archs[0] != "none":
arch_str = ";".join(archs)
if not spec.satisfies("cuda_arch=none"): entries.append(
cuda_archs = ";".join(spec.variants["cuda_arch"].value) cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", "{0}".format(arch_str))
entries.append(cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", cuda_archs)) )
if spec_uses_toolchain(spec):
cuda_flags.append("-Xcompiler {}".format(spec_uses_toolchain(spec)[0]))
entries.append(cmake_cache_string("CMAKE_CUDA_FLAGS", " ".join(cuda_flags)))
if "+rocm" in spec: if "+rocm" in spec:
entries.append("#------------------{0}".format("-" * 30)) entries.append("#------------------{0}".format("-" * 30))
@@ -274,6 +262,9 @@ def initconfig_hardware_entries(self):
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary # Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix))) entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
entries.append(
cmake_cache_path("HIP_CXX_COMPILER", "{0}".format(self.spec["hip"].hipcc))
)
llvm_bin = spec["llvm-amdgpu"].prefix.bin llvm_bin = spec["llvm-amdgpu"].prefix.bin
llvm_prefix = spec["llvm-amdgpu"].prefix llvm_prefix = spec["llvm-amdgpu"].prefix
# Some ROCm systems seem to point to /<path>/rocm-<ver>/ and # Some ROCm systems seem to point to /<path>/rocm-<ver>/ and
@@ -286,9 +277,11 @@ def initconfig_hardware_entries(self):
archs = self.spec.variants["amdgpu_target"].value archs = self.spec.variants["amdgpu_target"].value
if archs[0] != "none": if archs[0] != "none":
arch_str = ";".join(archs) arch_str = ";".join(archs)
entries.append(cmake_cache_string("CMAKE_HIP_ARCHITECTURES", arch_str)) entries.append(
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str)) cmake_cache_string("CMAKE_HIP_ARCHITECTURES", "{0}".format(arch_str))
entries.append(cmake_cache_string("GPU_TARGETS", arch_str)) )
entries.append(cmake_cache_string("AMDGPU_TARGETS", "{0}".format(arch_str)))
entries.append(cmake_cache_string("GPU_TARGETS", "{0}".format(arch_str)))
return entries return entries

View File

@@ -16,7 +16,7 @@
class CargoPackage(spack.package_base.PackageBase): class CargoPackage(spack.package_base.PackageBase):
"""Specialized class for packages built using cargo.""" """Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build #: This attribute is used in UI queries that need to know the build
#: system base class #: system base class

View File

@@ -21,7 +21,7 @@
class MakefilePackage(spack.package_base.PackageBase): class MakefilePackage(spack.package_base.PackageBase):
"""Specialized class for packages built using Makefiles.""" """Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build #: This attribute is used in UI queries that need to know the build
#: system base class #: system base class

View File

@@ -14,7 +14,7 @@
from llnl.util.link_tree import LinkTree from llnl.util.link_tree import LinkTree
from spack.build_environment import dso_suffix from spack.build_environment import dso_suffix
from spack.directives import conflicts, license, redistribute, variant from spack.directives import conflicts, variant
from spack.package_base import InstallError from spack.package_base import InstallError
from spack.util.environment import EnvironmentModifications from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable from spack.util.executable import Executable
@@ -26,11 +26,10 @@ class IntelOneApiPackage(Package):
"""Base class for Intel oneAPI packages.""" """Base class for Intel oneAPI packages."""
homepage = "https://software.intel.com/oneapi" homepage = "https://software.intel.com/oneapi"
license("https://intel.ly/393CijO")
# oneAPI license does not allow mirroring outside of the # oneAPI license does not allow mirroring outside of the
# organization (e.g. University/Company). # organization (e.g. University/Company).
redistribute(source=False, binary=False) redistribute_source = False
for c in [ for c in [
"target=ppc64:", "target=ppc64:",

View File

@@ -4,15 +4,12 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect import inspect
import os import os
from typing import Iterable
from llnl.util.filesystem import filter_file, find from llnl.util.filesystem import filter_file
from llnl.util.lang import memoized
import spack.builder import spack.builder
import spack.package_base import spack.package_base
from spack.directives import build_system, extends from spack.directives import build_system, extends
from spack.install_test import SkipTest, test_part
from spack.util.executable import Executable from spack.util.executable import Executable
from ._checks import BaseBuilder, execute_build_time_tests from ._checks import BaseBuilder, execute_build_time_tests
@@ -31,58 +28,6 @@ class PerlPackage(spack.package_base.PackageBase):
extends("perl", when="build_system=perl") extends("perl", when="build_system=perl")
@property
@memoized
def _platform_dir(self):
"""Name of platform-specific module subdirectory."""
perl = self.spec["perl"].command
options = "-E", "use Config; say $Config{archname}"
out = perl(*options, output=str.split, error=str.split)
return out.strip()
@property
def use_modules(self) -> Iterable[str]:
"""Names of the package's perl modules."""
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
# Drop the platform directory, if present
if self._platform_dir:
platform_dir = self._platform_dir + os.sep
module_files = [m.replace(platform_dir, "") for m in module_files]
# Drop the extension and library path
prefix = self.prefix.lib + os.sep
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
# Drop the perl subdirectory as well
return ["::".join(m.split(os.sep)[1:]) for m in modules]
@property
def skip_modules(self) -> Iterable[str]:
"""Names of modules that should be skipped when running tests.
These are a subset of use_modules.
Returns:
List of strings of module names.
"""
return []
def test_use(self):
"""Test 'use module'"""
if not self.use_modules:
raise SkipTest("Test requires use_modules package property.")
perl = self.spec["perl"].command
for module in self.use_modules:
if module in self.skip_modules:
continue
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
options = ["-we", f'use strict; use {module}; print("OK\n")']
out = perl(*options, output=str.split, error=str.split)
assert "OK" in out
@spack.builder.builder("perl") @spack.builder.builder("perl")
class PerlBuilder(BaseBuilder): class PerlBuilder(BaseBuilder):
@@ -107,7 +52,7 @@ class PerlBuilder(BaseBuilder):
phases = ("configure", "build", "install") phases = ("configure", "build", "install")
#: Names associated with package methods in the old build-system format #: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "check", "test_use") legacy_methods = ("configure_args", "check")
#: Names associated with package attributes in the old build-system format #: Names associated with package attributes in the old build-system format
legacy_attributes = () legacy_attributes = ()

View File

@@ -27,7 +27,7 @@
import spack.package_base import spack.package_base
import spack.spec import spack.spec
import spack.store import spack.store
from spack.directives import build_system, depends_on, extends from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import test_part from spack.install_test import test_part
from spack.spec import Spec from spack.spec import Spec
@@ -56,6 +56,8 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
class PythonExtension(spack.package_base.PackageBase): class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart")
@property @property
def import_modules(self) -> Iterable[str]: def import_modules(self) -> Iterable[str]:
"""Names of modules that the Python package provides. """Names of modules that the Python package provides.

View File

@@ -75,12 +75,9 @@
# does not like its directory structure. # does not like its directory structure.
# #
import os
import spack.variant import spack.variant
from spack.directives import conflicts, depends_on, variant from spack.directives import conflicts, depends_on, variant
from spack.package_base import PackageBase from spack.package_base import PackageBase
from spack.util.environment import EnvironmentModifications
class ROCmPackage(PackageBase): class ROCmPackage(PackageBase):
@@ -157,25 +154,6 @@ def hip_flags(amdgpu_target):
archs = ",".join(amdgpu_target) archs = ",".join(amdgpu_target)
return "--amdgpu-target={0}".format(archs) return "--amdgpu-target={0}".format(archs)
def asan_on(self, env: EnvironmentModifications):
llvm_path = self.spec["llvm-amdgpu"].prefix
env.set("CC", llvm_path + "/bin/clang")
env.set("CXX", llvm_path + "/bin/clang++")
env.set("ASAN_OPTIONS", "detect_leaks=0")
for root, _, files in os.walk(llvm_path):
if "libclang_rt.asan-x86_64.so" in files:
asan_lib_path = root
env.prepend_path("LD_LIBRARY_PATH", asan_lib_path)
if "rhel" in self.spec.os or "sles" in self.spec.os:
SET_DWARF_VERSION_4 = "-gdwarf-5"
else:
SET_DWARF_VERSION_4 = ""
env.set("CFLAGS", f"-fsanitize=address -shared-libasan -g {SET_DWARF_VERSION_4}")
env.set("CXXFLAGS", f"-fsanitize=address -shared-libasan -g {SET_DWARF_VERSION_4}")
env.set("LDFLAGS", "-Wl,--enable-new-dtags -fuse-ld=lld -fsanitize=address -g -Wl,")
# HIP version vs Architecture # HIP version vs Architecture
# TODO: add a bunch of lines like: # TODO: add a bunch of lines like:

View File

@@ -9,8 +9,6 @@
import inspect import inspect
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from llnl.util import lang
import spack.build_environment import spack.build_environment
#: Builder classes, as registered by the "builder" decorator #: Builder classes, as registered by the "builder" decorator
@@ -233,27 +231,24 @@ def __new__(mcs, name, bases, attr_dict):
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER): for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
staged_callbacks = temporary_stage.callbacks staged_callbacks = temporary_stage.callbacks
# Here we have an adapter from an old-style package. This means there is no # We don't have callbacks in this class, move on
# hierarchy of builders, and every callback that had to be combined between if not staged_callbacks:
# *Package and *Builder has been combined already by _PackageAdapterMeta
if name == "Adapter":
continue continue
# If we are here we have callbacks. To get a complete list, we accumulate all the # If we are here we have callbacks. To get a complete list, get first what
# callbacks from base classes, we deduplicate them, then prepend what we have # was attached to parent classes, then prepend what we have registered here.
# registered here.
# #
# The order should be: # The order should be:
# 1. Callbacks are registered in order within the same class # 1. Callbacks are registered in order within the same class
# 2. Callbacks defined in derived classes precede those defined in base # 2. Callbacks defined in derived classes precede those defined in base
# classes # classes
callbacks_from_base = []
for base in bases: for base in bases:
current_callbacks = getattr(base, temporary_stage.attribute_name, None) callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
if not current_callbacks: if callbacks_from_base:
continue break
callbacks_from_base.extend(current_callbacks) else:
callbacks_from_base = list(lang.dedupe(callbacks_from_base)) callbacks_from_base = []
# Set the callbacks in this class and flush the temporary stage # Set the callbacks in this class and flush the temporary stage
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
del temporary_stage.callbacks[:] del temporary_stage.callbacks[:]

File diff suppressed because it is too large Load Diff

View File

@@ -334,7 +334,8 @@ def display_specs(specs, args=None, **kwargs):
variants (bool): Show variants with specs variants (bool): Show variants with specs
indent (int): indent each line this much indent (int): indent each line this much
groups (bool): display specs grouped by arch/compiler (default True) groups (bool): display specs grouped by arch/compiler (default True)
decorator (typing.Callable): function to call to decorate specs decorators (dict): dictionary mappng specs to decorators
header_callback (typing.Callable): called at start of arch/compiler groups
all_headers (bool): show headers even when arch/compiler aren't defined all_headers (bool): show headers even when arch/compiler aren't defined
output (typing.IO): A file object to write to. Default is ``sys.stdout`` output (typing.IO): A file object to write to. Default is ``sys.stdout``
@@ -383,13 +384,15 @@ def get_arg(name, default=None):
vfmt = "{variants}" if variants else "" vfmt = "{variants}" if variants else ""
format_string = nfmt + "{@version}" + ffmt + vfmt format_string = nfmt + "{@version}" + ffmt + vfmt
transform = {"package": decorator, "fullpackage": decorator}
def fmt(s, depth=0): def fmt(s, depth=0):
"""Formatter function for all output specs""" """Formatter function for all output specs"""
string = "" string = ""
if hashes: if hashes:
string += gray_hash(s, hlen) + " " string += gray_hash(s, hlen) + " "
string += depth * " " string += depth * " "
string += decorator(s, s.cformat(format_string)) string += s.cformat(format_string, transform=transform)
return string return string
def format_list(specs): def format_list(specs):
@@ -448,7 +451,7 @@ def filter_loaded_specs(specs):
return [x for x in specs if x.dag_hash() in hashes] return [x for x in specs if x.dag_hash() in hashes]
def print_how_many_pkgs(specs, pkg_type="", suffix=""): def print_how_many_pkgs(specs, pkg_type=""):
"""Given a list of specs, this will print a message about how many """Given a list of specs, this will print a message about how many
specs are in that list. specs are in that list.
@@ -459,7 +462,7 @@ def print_how_many_pkgs(specs, pkg_type="", suffix=""):
category, e.g. if pkg_type is "installed" then the message category, e.g. if pkg_type is "installed" then the message
would be "3 installed packages" would be "3 installed packages"
""" """
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package") + suffix) tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package"))
def spack_is_git_repo(): def spack_is_git_repo():

View File

@@ -133,11 +133,6 @@ def setup_parser(subparser: argparse.ArgumentParser):
help="when pushing to an OCI registry, tag an image containing all root specs and their " help="when pushing to an OCI registry, tag an image containing all root specs and their "
"runtime dependencies", "runtime dependencies",
) )
push.add_argument(
"--private",
action="store_true",
help="for a private mirror, include non-redistributable packages",
)
arguments.add_common_arguments(push, ["specs", "jobs"]) arguments.add_common_arguments(push, ["specs", "jobs"])
push.set_defaults(func=push_fn) push.set_defaults(func=push_fn)
@@ -280,37 +275,23 @@ def setup_parser(subparser: argparse.ArgumentParser):
# Sync buildcache entries from one mirror to another # Sync buildcache entries from one mirror to another
sync = subparsers.add_parser("sync", help=sync_fn.__doc__) sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
sync.add_argument(
sync_manifest_source = sync.add_argument_group( "--manifest-glob", help="a quoted glob pattern identifying copy manifest files"
"Manifest Source",
"Specify a list of build cache objects to sync using manifest file(s)."
'This option takes the place of the "source mirror" for synchronization'
'and optionally takes a "destination mirror" ',
) )
sync_manifest_source.add_argument( sync.add_argument(
"--manifest-glob", help="a quoted glob pattern identifying CI rebuild manifest files"
)
sync_source_mirror = sync.add_argument_group(
"Named Source",
"Specify a single registered source mirror to synchronize from. This option requires"
"the specification of a destination mirror.",
)
sync_source_mirror.add_argument(
"src_mirror", "src_mirror",
metavar="source mirror", metavar="source mirror",
nargs="?",
type=arguments.mirror_name_or_url, type=arguments.mirror_name_or_url,
nargs="?",
help="source mirror name, path, or URL", help="source mirror name, path, or URL",
) )
sync.add_argument( sync.add_argument(
"dest_mirror", "dest_mirror",
metavar="destination mirror", metavar="destination mirror",
nargs="?",
type=arguments.mirror_name_or_url, type=arguments.mirror_name_or_url,
nargs="?",
help="destination mirror name, path, or URL", help="destination mirror name, path, or URL",
) )
sync.set_defaults(func=sync_fn) sync.set_defaults(func=sync_fn)
# Update buildcache index without copying any additional packages # Update buildcache index without copying any additional packages
@@ -372,25 +353,6 @@ def _make_pool() -> MaybePool:
return NoPool() return NoPool()
def _skip_no_redistribute_for_public(specs):
remaining_specs = list()
removed_specs = list()
for spec in specs:
if spec.package.redistribute_binary:
remaining_specs.append(spec)
else:
removed_specs.append(spec)
if removed_specs:
colified_output = tty.colify.colified(list(s.name for s in removed_specs), indent=4)
tty.debug(
"The following specs will not be added to the binary cache"
" because they cannot be redistributed:\n"
f"{colified_output}\n"
"You can use `--private` to include them."
)
return remaining_specs
def push_fn(args): def push_fn(args):
"""create a binary package and push it to a mirror""" """create a binary package and push it to a mirror"""
if args.spec_file: if args.spec_file:
@@ -441,8 +403,6 @@ def push_fn(args):
root="package" in args.things_to_install, root="package" in args.things_to_install,
dependencies="dependencies" in args.things_to_install, dependencies="dependencies" in args.things_to_install,
) )
if not args.private:
specs = _skip_no_redistribute_for_public(specs)
# When pushing multiple specs, print the url once ahead of time, as well as how # When pushing multiple specs, print the url once ahead of time, as well as how
# many specs are being pushed. # many specs are being pushed.
@@ -1110,17 +1070,7 @@ def sync_fn(args):
requires an active environment in order to know which specs to sync requires an active environment in order to know which specs to sync
""" """
if args.manifest_glob: if args.manifest_glob:
# Passing the args.src_mirror here because it is not possible to manifest_copy(glob.glob(args.manifest_glob))
# have the destination be required when specifying a named source
# mirror and optional for the --manifest-glob argument. In the case
# of manifest glob sync, the source mirror positional argument is the
# destination mirror if it is specified. If there are two mirrors
# specified, the second is ignored and the first is the override
# destination.
if args.dest_mirror:
tty.warn(f"Ignoring unused arguemnt: {args.dest_mirror.name}")
manifest_copy(glob.glob(args.manifest_glob), args.src_mirror)
return 0 return 0
if args.src_mirror is None or args.dest_mirror is None: if args.src_mirror is None or args.dest_mirror is None:
@@ -1171,7 +1121,7 @@ def sync_fn(args):
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
def manifest_copy(manifest_file_list, dest_mirror=None): def manifest_copy(manifest_file_list):
"""Read manifest files containing information about specific specs to copy """Read manifest files containing information about specific specs to copy
from source to destination, remove duplicates since any binary packge for from source to destination, remove duplicates since any binary packge for
a given hash should be the same as any other, and copy all files specified a given hash should be the same as any other, and copy all files specified
@@ -1185,17 +1135,10 @@ def manifest_copy(manifest_file_list, dest_mirror=None):
# Last duplicate hash wins # Last duplicate hash wins
deduped_manifest[spec_hash] = copy_list deduped_manifest[spec_hash] = copy_list
build_cache_dir = bindist.build_cache_relative_path()
for spec_hash, copy_list in deduped_manifest.items(): for spec_hash, copy_list in deduped_manifest.items():
for copy_file in copy_list: for copy_file in copy_list:
dest = copy_file["dest"] tty.debug("copying {0} to {1}".format(copy_file["src"], copy_file["dest"]))
if dest_mirror: copy_buildcache_file(copy_file["src"], copy_file["dest"])
src_relative_path = os.path.join(
build_cache_dir, copy_file["src"].rsplit(build_cache_dir, 1)[1].lstrip("/")
)
dest = url_util.join(dest_mirror.push_url, src_relative_path)
tty.debug("copying {0} to {1}".format(copy_file["src"], dest))
copy_buildcache_file(copy_file["src"], dest)
def update_index(mirror: spack.mirror.Mirror, update_keys=False): def update_index(mirror: spack.mirror.Mirror, update_keys=False):
@@ -1222,18 +1165,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path() url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path()
) )
try: bindist.generate_key_index(keys_url)
bindist.generate_key_index(keys_url)
except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors.
tty.warn(f"did not update the key index: {e}")
def update_index_fn(args): def update_index_fn(args):
"""update a buildcache index""" """update a buildcache index"""
return update_index(args.mirror, update_keys=args.keys) update_index(args.mirror, update_keys=args.keys)
def buildcache(parser, args): def buildcache(parser, args):
return args.func(args) if args.func:
args.func(args)

View File

@@ -183,7 +183,7 @@ def checksum(parser, args):
print() print()
if args.add_to_package: if args.add_to_package:
add_versions_to_package(pkg, version_lines, args.batch) add_versions_to_package(pkg, version_lines)
def print_checksum_status(pkg: PackageBase, version_hashes: dict): def print_checksum_status(pkg: PackageBase, version_hashes: dict):
@@ -229,7 +229,7 @@ def print_checksum_status(pkg: PackageBase, version_hashes: dict):
tty.die("Invalid checksums found.") tty.die("Invalid checksums found.")
def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool): def add_versions_to_package(pkg: PackageBase, version_lines: str):
""" """
Add checksumed versions to a package's instructions and open a user's Add checksumed versions to a package's instructions and open a user's
editor so they may double check the work of the function. editor so they may double check the work of the function.
@@ -282,5 +282,5 @@ def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool
tty.msg(f"Added {num_versions_added} new versions to {pkg.name}") tty.msg(f"Added {num_versions_added} new versions to {pkg.name}")
tty.msg(f"Open {filename} to review the additions.") tty.msg(f"Open {filename} to review the additions.")
if sys.stdout.isatty() and not is_batch: if sys.stdout.isatty():
editor(filename) editor(filename)

View File

@@ -14,7 +14,6 @@
import spack.binary_distribution as bindist import spack.binary_distribution as bindist
import spack.ci as spack_ci import spack.ci as spack_ci
import spack.cmd
import spack.cmd.buildcache as buildcache import spack.cmd.buildcache as buildcache
import spack.config as cfg import spack.config as cfg
import spack.environment as ev import spack.environment as ev
@@ -33,7 +32,6 @@
SPACK_COMMAND = "spack" SPACK_COMMAND = "spack"
MAKE_COMMAND = "make" MAKE_COMMAND = "make"
INSTALL_FAIL_CODE = 1 INSTALL_FAIL_CODE = 1
FAILED_CREATE_BUILDCACHE_CODE = 100
def deindent(desc): def deindent(desc):
@@ -707,9 +705,11 @@ def ci_rebuild(args):
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg) cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
cdash_handler.copy_test_results(reports_dir, job_test_dir) cdash_handler.copy_test_results(reports_dir, job_test_dir)
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0: if install_exit_code == 0:
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
# will result in a non-zero exit code. Pushing is best-effort.
mirror_urls = [buildcache_mirror_url] mirror_urls = [buildcache_mirror_url]
# TODO: Remove this block in Spack 0.23 # TODO: Remove this block in Spack 0.23
@@ -721,12 +721,13 @@ def ci_rebuild(args):
destination_mirror_urls=mirror_urls, destination_mirror_urls=mirror_urls,
sign_binaries=spack_ci.can_sign_binaries(), sign_binaries=spack_ci.can_sign_binaries(),
): ):
if not result.success: msg = tty.msg if result.success else tty.warn
install_exit_code = FAILED_CREATE_BUILDCACHE_CODE msg(
(tty.msg if result.success else tty.error)( "{} {} to {}".format(
f'{"Pushed" if result.success else "Failed to push"} ' "Pushed" if result.success else "Failed to push",
f'{job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when())} ' job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
f"to {result.url}" result.url,
)
) )
# If this is a develop pipeline, check if the spec that we just built is # If this is a develop pipeline, check if the spec that we just built is
@@ -747,22 +748,22 @@ def ci_rebuild(args):
tty.warn(msg.format(broken_spec_path, err)) tty.warn(msg.format(broken_spec_path, err))
else: else:
# If the install did not succeed, print out some instructions on how to reproduce this
# build failure outside of the pipeline environment.
tty.debug("spack install exited non-zero, will not create buildcache") tty.debug("spack install exited non-zero, will not create buildcache")
api_root_url = os.environ.get("CI_API_V4_URL") api_root_url = os.environ.get("CI_API_V4_URL")
ci_project_id = os.environ.get("CI_PROJECT_ID") ci_project_id = os.environ.get("CI_PROJECT_ID")
ci_job_id = os.environ.get("CI_JOB_ID") ci_job_id = os.environ.get("CI_JOB_ID")
repro_job_url = f"{api_root_url}/projects/{ci_project_id}/jobs/{ci_job_id}/artifacts" repro_job_url = "{0}/projects/{1}/jobs/{2}/artifacts".format(
api_root_url, ci_project_id, ci_job_id
)
# Control characters cause this to be printed in blue so it stands out # Control characters cause this to be printed in blue so it stands out
print( reproduce_msg = """
f"""
\033[34mTo reproduce this build locally, run: \033[34mTo reproduce this build locally, run:
spack ci reproduce-build {repro_job_url} [--working-dir <dir>] [--autostart] spack ci reproduce-build {0} [--working-dir <dir>] [--autostart]
If this project does not have public pipelines, you will need to first: If this project does not have public pipelines, you will need to first:
@@ -770,9 +771,12 @@ def ci_rebuild(args):
... then follow the printed instructions.\033[0;0m ... then follow the printed instructions.\033[0;0m
""" """.format(
repro_job_url
) )
print(reproduce_msg)
rebuild_timer.stop() rebuild_timer.stop()
try: try:
with open("install_timers.json", "w") as timelog: with open("install_timers.json", "w") as timelog:

View File

@@ -570,14 +570,6 @@ def add_concretizer_args(subparser):
default=None, default=None,
help="reuse installed dependencies only", help="reuse installed dependencies only",
) )
subgroup.add_argument(
"--deprecated",
action=ConfigSetAction,
dest="config:deprecated",
const=True,
default=None,
help="allow concretizer to select deprecated versions",
)
def add_connection_args(subparser, add_help): def add_connection_args(subparser, add_help):

View File

@@ -89,7 +89,7 @@ def compiler_find(args):
paths, scope=None, mixed_toolchain=args.mixed_toolchain paths, scope=None, mixed_toolchain=args.mixed_toolchain
) )
if new_compilers: if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope) spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
n = len(new_compilers) n = len(new_compilers)
s = "s" if n > 1 else "" s = "s" if n > 1 else ""

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"]) arguments.add_common_arguments(subparser, ["jobs"])
subparser.add_argument( subparser.add_argument(
"-d", "-d",
"--source-path", "--source-path",
@@ -34,6 +34,7 @@ def setup_parser(subparser):
dest="ignore_deps", dest="ignore_deps",
help="do not try to install dependencies of requested packages", help="do not try to install dependencies of requested packages",
) )
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"--keep-prefix", "--keep-prefix",
action="store_true", action="store_true",
@@ -62,6 +63,7 @@ def setup_parser(subparser):
choices=["root", "all"], choices=["root", "all"],
help="run tests on only root packages or all packages", help="run tests on only root packages or all packages",
) )
arguments.add_common_arguments(subparser, ["spec"])
stop_group = subparser.add_mutually_exclusive_group() stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument( stop_group.add_argument(
@@ -123,6 +125,9 @@ def dev_build(self, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
tests = False tests = False
if args.test == "all": if args.test == "all":
tests = True tests = True

View File

@@ -9,7 +9,6 @@
import shutil import shutil
import sys import sys
import tempfile import tempfile
from pathlib import Path
from typing import Optional from typing import Optional
import llnl.string as string import llnl.string as string
@@ -45,7 +44,6 @@
"deactivate", "deactivate",
"create", "create",
["remove", "rm"], ["remove", "rm"],
["rename", "mv"],
["list", "ls"], ["list", "ls"],
["status", "st"], ["status", "st"],
"loads", "loads",
@@ -474,82 +472,11 @@ def env_remove(args):
tty.msg(f"Successfully removed environment '{bad_env_name}'") tty.msg(f"Successfully removed environment '{bad_env_name}'")
#
# env rename
#
def env_rename_setup_parser(subparser):
"""rename an existing environment"""
subparser.add_argument(
"mv_from", metavar="from", help="name (or path) of existing environment"
)
subparser.add_argument(
"mv_to", metavar="to", help="new name (or path) for existing environment"
)
subparser.add_argument(
"-d",
"--dir",
action="store_true",
help="the specified arguments correspond to directory paths",
)
subparser.add_argument(
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
)
def env_rename(args):
"""Rename an environment.
This renames a managed environment or moves an anonymous environment.
"""
# Directory option has been specified
if args.dir:
if not ev.is_env_dir(args.mv_from):
tty.die("The specified path does not correspond to a valid spack environment")
from_path = Path(args.mv_from)
if not args.force:
if ev.is_env_dir(args.mv_to):
tty.die(
"The new path corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
if Path(args.mv_to).exists():
tty.die("The new path already exists; specify the --force flag to overwrite it.")
to_path = Path(args.mv_to)
# Name option being used
elif ev.exists(args.mv_from):
from_path = ev.environment.environment_dir_from_name(args.mv_from)
if not args.force and ev.exists(args.mv_to):
tty.die(
"The new name corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
to_path = ev.environment.root(args.mv_to)
# Neither
else:
tty.die("The specified name does not correspond to a managed spack environment")
# Guard against renaming from or to an active environment
active_env = ev.active_environment()
if active_env:
from_env = ev.Environment(from_path)
if from_env.path == active_env.path:
tty.die("Cannot rename active environment")
if to_path == active_env.path:
tty.die(f"{args.mv_to} is an active environment")
shutil.rmtree(to_path, ignore_errors=True)
fs.rename(from_path, to_path)
tty.msg(f"Successfully renamed environment {args.mv_from} to {args.mv_to}")
# #
# env list # env list
# #
def env_list_setup_parser(subparser): def env_list_setup_parser(subparser):
"""list managed environments""" """list available environments"""
def env_list(args): def env_list(args):

View File

@@ -18,7 +18,6 @@
import spack.cray_manifest as cray_manifest import spack.cray_manifest as cray_manifest
import spack.detection import spack.detection
import spack.error import spack.error
import spack.repo
import spack.util.environment import spack.util.environment
from spack.cmd.common import arguments from spack.cmd.common import arguments
@@ -153,9 +152,9 @@ def external_find(args):
def packages_to_search_for( def packages_to_search_for(
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]] *, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
): ):
result = list( result = []
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)} for current_tag in tags:
) result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
if names: if names:
# Match both fully qualified and unqualified # Match both fully qualified and unqualified

View File

@@ -18,7 +18,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"-m", "-m",
"--missing", "--missing",
@@ -28,7 +28,7 @@ def setup_parser(subparser):
subparser.add_argument( subparser.add_argument(
"-D", "--dependencies", action="store_true", help="also fetch all dependencies" "-D", "--dependencies", action="store_true", help="also fetch all dependencies"
) )
arguments.add_concretizer_args(subparser) arguments.add_common_arguments(subparser, ["specs"])
subparser.epilog = ( subparser.epilog = (
"With an active environment, the specs " "With an active environment, the specs "
"parameter can be omitted. In this case all (uninstalled" "parameter can be omitted. In this case all (uninstalled"
@@ -40,6 +40,9 @@ def fetch(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.specs: if args.specs:
specs = spack.cmd.parse_specs(args.specs, concretize=True) specs = spack.cmd.parse_specs(args.specs, concretize=True)
else: else:

View File

@@ -3,6 +3,7 @@
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import copy
import sys import sys
import llnl.util.lang import llnl.util.lang
@@ -13,7 +14,6 @@
import spack.cmd as cmd import spack.cmd as cmd
import spack.environment as ev import spack.environment as ev
import spack.repo import spack.repo
import spack.store
from spack.cmd.common import arguments from spack.cmd.common import arguments
from spack.database import InstallStatuses from spack.database import InstallStatuses
@@ -69,12 +69,6 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["long", "very_long", "tags", "namespaces"]) arguments.add_common_arguments(subparser, ["long", "very_long", "tags", "namespaces"])
subparser.add_argument(
"-r",
"--only-roots",
action="store_true",
help="don't show full list of installed specs in an environment",
)
subparser.add_argument( subparser.add_argument(
"-c", "-c",
"--show-concretized", "--show-concretized",
@@ -146,12 +140,6 @@ def setup_parser(subparser):
subparser.add_argument( subparser.add_argument(
"--only-deprecated", action="store_true", help="show only deprecated packages" "--only-deprecated", action="store_true", help="show only deprecated packages"
) )
subparser.add_argument(
"--install-tree",
action="store",
default="all",
help="Install trees to query: 'all' (default), 'local', 'upstream', upstream name or path",
)
subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]") subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]")
subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]") subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]")
@@ -180,12 +168,6 @@ def query_arguments(args):
q_args = {"installed": installed, "known": known, "explicit": explicit} q_args = {"installed": installed, "known": known, "explicit": explicit}
install_tree = args.install_tree
upstreams = spack.config.get("upstreams", {})
if install_tree in upstreams.keys():
install_tree = upstreams[install_tree]["install_tree"]
q_args["install_tree"] = install_tree
# Time window of installation # Time window of installation
for attribute in ("start_date", "end_date"): for attribute in ("start_date", "end_date"):
date = getattr(args, attribute) date = getattr(args, attribute)
@@ -195,22 +177,26 @@ def query_arguments(args):
return q_args return q_args
def make_env_decorator(env): def setup_env(env):
"""Create a function for decorating specs when in an environment.""" """Create a function for decorating specs when in an environment."""
roots = set(env.roots()) def strip_build(seq):
removed = set(env.removed_specs()) return set(s.copy(deps=("link", "run")) for s in seq)
added = set(strip_build(env.added_specs()))
roots = set(strip_build(env.roots()))
removed = set(strip_build(env.removed_specs()))
def decorator(spec, fmt): def decorator(spec, fmt):
# add +/-/* to show added/removed/root specs # add +/-/* to show added/removed/root specs
if any(spec.dag_hash() == r.dag_hash() for r in roots): if any(spec.dag_hash() == r.dag_hash() for r in roots):
return color.colorize(f"@*{{{fmt}}}") return color.colorize("@*{%s}" % fmt)
elif spec in removed: elif spec in removed:
return color.colorize(f"@K{{{fmt}}}") return color.colorize("@K{%s}" % fmt)
else: else:
return fmt return "%s" % fmt
return decorator return decorator, added, roots, removed
def display_env(env, args, decorator, results): def display_env(env, args, decorator, results):
@@ -225,51 +211,28 @@ def display_env(env, args, decorator, results):
""" """
tty.msg("In environment %s" % env.name) tty.msg("In environment %s" % env.name)
num_roots = len(env.user_specs) or "No" if not env.user_specs:
tty.msg(f"{num_roots} root specs") tty.msg("No root specs")
else:
tty.msg("Root specs")
concrete_specs = { # Root specs cannot be displayed with prefixes, since those are not
root: concrete_root # set for abstract specs. Same for hashes
for root, concrete_root in zip(env.concretized_user_specs, env.concrete_roots()) root_args = copy.copy(args)
} root_args.paths = False
def root_decorator(spec, string): # Roots are displayed with variants, etc. so that we can see
"""Decorate root specs with their install status if needed""" # specifically what the user asked for.
concrete = concrete_specs.get(spec)
if concrete:
status = color.colorize(concrete.install_status().value)
hash = concrete.dag_hash()
else:
status = color.colorize(spack.spec.InstallStatus.absent.value)
hash = "-" * 32
# TODO: status has two extra spaces on the end of it, but fixing this and other spec
# TODO: space format idiosyncrasies is complicated. Fix this eventually
status = status[:-2]
if args.long or args.very_long:
hash = color.colorize(f"@K{{{hash[: 7 if args.long else None]}}}")
return f"{status} {hash} {string}"
else:
return f"{status} {string}"
with spack.store.STORE.db.read_transaction():
cmd.display_specs( cmd.display_specs(
env.user_specs, env.user_specs,
args, root_args,
# these are overrides of CLI args decorator=lambda s, f: color.colorize("@*{%s}" % f),
paths=False,
long=False,
very_long=False,
# these enforce details in the root specs to show what the user asked for
namespaces=True, namespaces=True,
show_flags=True, show_flags=True,
show_full_compiler=True, show_full_compiler=True,
decorator=root_decorator,
variants=True, variants=True,
) )
print()
print()
if args.show_concretized: if args.show_concretized:
tty.msg("Concretized roots") tty.msg("Concretized roots")
@@ -279,7 +242,7 @@ def root_decorator(spec, string):
# Display a header for the installed packages section IF there are installed # Display a header for the installed packages section IF there are installed
# packages. If there aren't any, we'll just end up printing "0 installed packages" # packages. If there aren't any, we'll just end up printing "0 installed packages"
# later. # later.
if results and not args.only_roots: if results:
tty.msg("Installed packages") tty.msg("Installed packages")
@@ -288,10 +251,9 @@ def find(parser, args):
results = args.specs(**q_args) results = args.specs(**q_args)
env = ev.active_environment() env = ev.active_environment()
if not env and args.only_roots: decorator = lambda s, f: f
tty.die("-r / --only-roots requires an active environment") if env:
decorator, _, roots, _ = setup_env(env)
decorator = make_env_decorator(env) if env else lambda s, f: f
# use groups by default except with format. # use groups by default except with format.
if args.groups is None: if args.groups is None:
@@ -318,12 +280,9 @@ def find(parser, args):
if env: if env:
display_env(env, args, decorator, results) display_env(env, args, decorator, results)
count_suffix = " (not shown)" cmd.display_specs(results, args, decorator=decorator, all_headers=True)
if not args.only_roots:
cmd.display_specs(results, args, decorator=decorator, all_headers=True)
count_suffix = ""
# print number of installed packages last (as the list may be long) # print number of installed packages last (as the list may be long)
if sys.stdout.isatty() and args.groups: if sys.stdout.isatty() and args.groups:
pkg_type = "loaded" if args.loaded else "installed" pkg_type = "loaded" if args.loaded else "installed"
spack.cmd.print_how_many_pkgs(results, pkg_type, suffix=count_suffix) spack.cmd.print_how_many_pkgs(results, pkg_type)

View File

@@ -263,8 +263,8 @@ def _fmt_name_and_default(variant):
return color.colorize(f"@c{{{variant.name}}} @C{{[{_fmt_value(variant.default)}]}}") return color.colorize(f"@c{{{variant.name}}} @C{{[{_fmt_value(variant.default)}]}}")
def _fmt_when(when: "spack.spec.Spec", indent: int): def _fmt_when(when, indent):
return color.colorize(f"{indent * ' '}@B{{when}} {color.cescape(str(when))}") return color.colorize(f"{indent * ' '}@B{{when}} {color.cescape(when)}")
def _fmt_variant_description(variant, width, indent): def _fmt_variant_description(variant, width, indent):
@@ -441,7 +441,7 @@ def get_url(version):
return "No URL" return "No URL"
url = get_url(preferred) if pkg.has_code else "" url = get_url(preferred) if pkg.has_code else ""
line = version(" {0}".format(pad(preferred))) + color.cescape(str(url)) line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cwrite(line) color.cwrite(line)
print() print()
@@ -464,7 +464,7 @@ def get_url(version):
continue continue
for v, url in vers: for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(str(url)) line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line) color.cprint(line)
@@ -475,7 +475,10 @@ def print_virtuals(pkg, args):
color.cprint(section_title("Virtual Packages: ")) color.cprint(section_title("Virtual Packages: "))
if pkg.provided: if pkg.provided:
for when, specs in reversed(sorted(pkg.provided.items())): for when, specs in reversed(sorted(pkg.provided.items())):
line = " %s provides %s" % (when.cformat(), ", ".join(s.cformat() for s in specs)) line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line) print(line)
else: else:
@@ -494,9 +497,7 @@ def print_licenses(pkg, args):
pad = padder(pkg.licenses, 4) pad = padder(pkg.licenses, 4)
for when_spec in pkg.licenses: for when_spec in pkg.licenses:
license_identifier = pkg.licenses[when_spec] license_identifier = pkg.licenses[when_spec]
line = license(" {0}".format(pad(license_identifier))) + color.cescape( line = license(" {0}".format(pad(license_identifier))) + color.cescape(when_spec)
str(when_spec)
)
color.cprint(line) color.cprint(line)

View File

@@ -176,7 +176,7 @@ def setup_parser(subparser):
dest="install_source", dest="install_source",
help="install source files in prefix", help="install source files in prefix",
) )
arguments.add_common_arguments(subparser, ["no_checksum"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"-v", "-v",
"--verbose", "--verbose",
@@ -326,6 +326,9 @@ def install(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.log_file and not args.log_format: if args.log_file and not args.log_format:
msg = "the '--log-format' must be specified when using '--log-file'" msg = "the '--log-format' must be specified when using '--log-file'"
tty.die(msg) tty.die(msg)
@@ -420,9 +423,10 @@ def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter_
with reporter_factory(specs_to_install): with reporter_factory(specs_to_install):
env.install_specs(specs_to_install, **install_kwargs) env.install_specs(specs_to_install, **install_kwargs)
finally: finally:
if env.views: # TODO: this is doing way too much to trigger
with env.write_transaction(): # views and modules to be generated.
env.write(regenerate=True) with env.write_transaction():
env.write(regenerate=True)
def concrete_specs_from_cli(args, install_kwargs): def concrete_specs_from_cli(args, install_kwargs):

View File

@@ -5,6 +5,8 @@
import sys import sys
import llnl.util.tty as tty
import spack.cmd import spack.cmd
import spack.cmd.find import spack.cmd.find
import spack.environment as ev import spack.environment as ev
@@ -68,6 +70,16 @@ def setup_parser(subparser):
help="load the first match if multiple packages match the spec", help="load the first match if multiple packages match the spec",
) )
subparser.add_argument(
"--only",
default="package,dependencies",
dest="things_to_load",
choices=["package", "dependencies"],
help="select whether to load the package and its dependencies\n\n"
"the default is to load the package and all dependencies. alternatively, "
"one can decide to load only the package or only the dependencies",
)
subparser.add_argument( subparser.add_argument(
"--list", "--list",
action="store_true", action="store_true",
@@ -98,6 +110,11 @@ def load(parser, args):
) )
return 1 return 1
if args.things_to_load != "package,dependencies":
tty.warn(
"The `--only` flag in spack load is deprecated and will be removed in Spack v0.22"
)
with spack.store.STORE.db.read_transaction(): with spack.store.STORE.db.read_transaction():
env_mod = uenv.environment_modifications_for_specs(*specs) env_mod = uenv.environment_modifications_for_specs(*specs)
for spec in specs: for spec in specs:

View File

@@ -53,7 +53,6 @@ def setup_parser(subparser):
"-S", "--stages", action="store_true", help="top level stage directory" "-S", "--stages", action="store_true", help="top level stage directory"
) )
directories.add_argument( directories.add_argument(
"-c",
"--source-dir", "--source-dir",
action="store_true", action="store_true",
help="source directory for a spec (requires it to be staged first)", help="source directory for a spec (requires it to be staged first)",

View File

@@ -28,7 +28,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command") sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
@@ -71,13 +71,7 @@ def setup_parser(subparser):
help="the number of versions to fetch for each spec, choose 'all' to" help="the number of versions to fetch for each spec, choose 'all' to"
" retrieve all versions of each package", " retrieve all versions of each package",
) )
create_parser.add_argument(
"--private",
action="store_true",
help="for a private mirror, include non-redistributable packages",
)
arguments.add_common_arguments(create_parser, ["specs"]) arguments.add_common_arguments(create_parser, ["specs"])
arguments.add_concretizer_args(create_parser)
# Destroy # Destroy
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__) destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
@@ -113,11 +107,6 @@ def setup_parser(subparser):
"and source use `--type binary --type source` (default)" "and source use `--type binary --type source` (default)"
), ),
) )
add_parser.add_argument(
"--autopush",
action="store_true",
help=("set mirror to push automatically after installation"),
)
add_parser_signed = add_parser.add_mutually_exclusive_group(required=False) add_parser_signed = add_parser.add_mutually_exclusive_group(required=False)
add_parser_signed.add_argument( add_parser_signed.add_argument(
"--unsigned", "--unsigned",
@@ -185,21 +174,6 @@ def setup_parser(subparser):
), ),
) )
set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'") set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'")
set_parser_autopush = set_parser.add_mutually_exclusive_group(required=False)
set_parser_autopush.add_argument(
"--autopush",
help="set mirror to push automatically after installation",
action="store_true",
default=None,
dest="autopush",
)
set_parser_autopush.add_argument(
"--no-autopush",
help="set mirror to not push automatically after installation",
action="store_false",
default=None,
dest="autopush",
)
set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False) set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False)
set_parser_unsigned.add_argument( set_parser_unsigned.add_argument(
"--unsigned", "--unsigned",
@@ -243,7 +217,6 @@ def mirror_add(args):
or args.type or args.type
or args.oci_username or args.oci_username
or args.oci_password or args.oci_password
or args.autopush
or args.signed is not None or args.signed is not None
): ):
connection = {"url": args.url} connection = {"url": args.url}
@@ -260,8 +233,6 @@ def mirror_add(args):
if args.type: if args.type:
connection["binary"] = "binary" in args.type connection["binary"] = "binary" in args.type
connection["source"] = "source" in args.type connection["source"] = "source" in args.type
if args.autopush:
connection["autopush"] = args.autopush
if args.signed is not None: if args.signed is not None:
connection["signed"] = args.signed connection["signed"] = args.signed
mirror = spack.mirror.Mirror(connection, name=args.name) mirror = spack.mirror.Mirror(connection, name=args.name)
@@ -298,8 +269,6 @@ def _configure_mirror(args):
changes["access_pair"] = [args.oci_username, args.oci_password] changes["access_pair"] = [args.oci_username, args.oci_password]
if getattr(args, "signed", None) is not None: if getattr(args, "signed", None) is not None:
changes["signed"] = args.signed changes["signed"] = args.signed
if getattr(args, "autopush", None) is not None:
changes["autopush"] = args.autopush
# argparse cannot distinguish between --binary and --no-binary when same dest :( # argparse cannot distinguish between --binary and --no-binary when same dest :(
# notice that set-url does not have these args, so getattr # notice that set-url does not have these args, so getattr
@@ -364,6 +333,7 @@ def concrete_specs_from_user(args):
specs = filter_externals(specs) specs = filter_externals(specs)
specs = list(set(specs)) specs = list(set(specs))
specs.sort(key=lambda s: (s.name, s.version)) specs.sort(key=lambda s: (s.name, s.version))
specs, _ = lang.stable_partition(specs, predicate_fn=not_excluded_fn(args))
return specs return specs
@@ -408,50 +378,36 @@ def concrete_specs_from_cli_or_file(args):
return specs return specs
class IncludeFilter: def not_excluded_fn(args):
def __init__(self, args): """Return a predicate that evaluate to True if a spec was not explicitly
self.exclude_specs = [] excluded by the user.
if args.exclude_file: """
self.exclude_specs.extend(specs_from_text_file(args.exclude_file, concretize=False)) exclude_specs = []
if args.exclude_specs: if args.exclude_file:
self.exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split())) exclude_specs.extend(specs_from_text_file(args.exclude_file, concretize=False))
self.private = args.private if args.exclude_specs:
exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split()))
def __call__(self, x): def not_excluded(x):
return all([self._not_license_excluded(x), self._not_cmdline_excluded(x)]) return not any(x.satisfies(y) for y in exclude_specs)
def _not_license_excluded(self, x): return not_excluded
"""True if the spec is for a private mirror, or as long as the
package does not explicitly forbid redistributing source."""
if self.private:
return True
elif x.package_class.redistribute_source(x):
return True
else:
tty.debug(
"Skip adding {0} to mirror: the package.py file"
" indicates that a public mirror should not contain"
" it.".format(x.name)
)
return False
def _not_cmdline_excluded(self, x):
"""True if a spec was not explicitly excluded by the user."""
return not any(x.satisfies(y) for y in self.exclude_specs)
def concrete_specs_from_environment(): def concrete_specs_from_environment(selection_fn):
env = ev.active_environment() env = ev.active_environment()
assert env, "an active environment is required" assert env, "an active environment is required"
mirror_specs = env.all_specs() mirror_specs = env.all_specs()
mirror_specs = filter_externals(mirror_specs) mirror_specs = filter_externals(mirror_specs)
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=selection_fn)
return mirror_specs return mirror_specs
def all_specs_with_all_versions(): def all_specs_with_all_versions(selection_fn):
specs = [spack.spec.Spec(n) for n in spack.repo.all_package_names()] specs = [spack.spec.Spec(n) for n in spack.repo.all_package_names()]
mirror_specs = spack.mirror.get_all_versions(specs) mirror_specs = spack.mirror.get_all_versions(specs)
mirror_specs.sort(key=lambda s: (s.name, s.version)) mirror_specs.sort(key=lambda s: (s.name, s.version))
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=selection_fn)
return mirror_specs return mirror_specs
@@ -472,6 +428,12 @@ def versions_per_spec(args):
return num_versions return num_versions
def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions):
present, mirrored, error = spack.mirror.create(path, mirror_specs, skip_unstable_versions)
tty.msg("Summary for mirror in {}".format(path))
process_mirror_stats(present, mirrored, error)
def process_mirror_stats(present, mirrored, error): def process_mirror_stats(present, mirrored, error):
p, m, e = len(present), len(mirrored), len(error) p, m, e = len(present), len(mirrored), len(error)
tty.msg( tty.msg(
@@ -517,28 +479,30 @@ def mirror_create(args):
# When no directory is provided, the source dir is used # When no directory is provided, the source dir is used
path = args.directory or spack.caches.fetch_cache_location() path = args.directory or spack.caches.fetch_cache_location()
mirror_specs, mirror_fn = _specs_and_action(args)
mirror_fn(mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions)
def _specs_and_action(args):
include_fn = IncludeFilter(args)
if args.all and not ev.active_environment(): if args.all and not ev.active_environment():
mirror_specs = all_specs_with_all_versions() create_mirror_for_all_specs(
mirror_fn = create_mirror_for_all_specs path=path,
elif args.all and ev.active_environment(): skip_unstable_versions=args.skip_unstable_versions,
mirror_specs = concrete_specs_from_environment() selection_fn=not_excluded_fn(args),
mirror_fn = create_mirror_for_individual_specs )
else: return
mirror_specs = concrete_specs_from_user(args)
mirror_fn = create_mirror_for_individual_specs
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=include_fn) if args.all and ev.active_environment():
return mirror_specs, mirror_fn create_mirror_for_all_specs_inside_environment(
path=path,
skip_unstable_versions=args.skip_unstable_versions,
selection_fn=not_excluded_fn(args),
)
return
mirror_specs = concrete_specs_from_user(args)
create_mirror_for_individual_specs(
mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions
)
def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions): def create_mirror_for_all_specs(path, skip_unstable_versions, selection_fn):
mirror_specs = all_specs_with_all_versions(selection_fn=selection_fn)
mirror_cache, mirror_stats = spack.mirror.mirror_cache_and_stats( mirror_cache, mirror_stats = spack.mirror.mirror_cache_and_stats(
path, skip_unstable_versions=skip_unstable_versions path, skip_unstable_versions=skip_unstable_versions
) )
@@ -550,10 +514,11 @@ def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions):
process_mirror_stats(*mirror_stats.stats()) process_mirror_stats(*mirror_stats.stats())
def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions): def create_mirror_for_all_specs_inside_environment(path, skip_unstable_versions, selection_fn):
present, mirrored, error = spack.mirror.create(path, mirror_specs, skip_unstable_versions) mirror_specs = concrete_specs_from_environment(selection_fn=selection_fn)
tty.msg("Summary for mirror in {}".format(path)) create_mirror_for_individual_specs(
process_mirror_stats(present, mirrored, error) mirror_specs, path=path, skip_unstable_versions=skip_unstable_versions
)
def mirror_destroy(args): def mirror_destroy(args):
@@ -584,4 +549,7 @@ def mirror(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
action[args.mirror_command](args) action[args.mirror_command](args)

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
arguments.add_concretizer_args(subparser) arguments.add_concretizer_args(subparser)
@@ -33,6 +33,9 @@ def patch(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
specs = spack.cmd.parse_specs(args.specs, concretize=False) specs = spack.cmd.parse_specs(args.specs, concretize=False)
for spec in specs: for spec in specs:
_patch(spack.cmd.matching_spec_from_env(spec).package) _patch(spack.cmd.matching_spec_from_env(spec).package)

View File

@@ -116,38 +116,39 @@ def ipython_interpreter(args):
def python_interpreter(args): def python_interpreter(args):
"""A python interpreter is the default interpreter""" """A python interpreter is the default interpreter"""
# Fake a main python shell by setting __name__ to __main__.
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack})
if "PYTHONSTARTUP" in os.environ:
startup_file = os.environ["PYTHONSTARTUP"]
if os.path.isfile(startup_file):
with open(startup_file) as startup:
console.runsource(startup.read(), startup_file, "exec")
if args.python_args and not args.python_command: if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
elif args.python_args:
propagate_exceptions_from(console)
sys.argv = args.python_args sys.argv = args.python_args
runpy.run_path(args.python_args[0], run_name="__main__") with open(args.python_args[0]) as file:
console.runsource(file.read(), args.python_args[0], "exec")
else: else:
# Fake a main python shell by setting __name__ to __main__. # Provides readline support, allowing user to use arrow keys
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack}) console.push("import readline")
if "PYTHONSTARTUP" in os.environ: # Provide tabcompletion
startup_file = os.environ["PYTHONSTARTUP"] console.push("from rlcompleter import Completer")
if os.path.isfile(startup_file): console.push("readline.set_completer(Completer(locals()).complete)")
with open(startup_file) as startup: console.push('readline.parse_and_bind("tab: complete")')
console.runsource(startup.read(), startup_file, "exec")
if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
else:
# Provides readline support, allowing user to use arrow keys
console.push("import readline")
# Provide tabcompletion
console.push("from rlcompleter import Completer")
console.push("readline.set_completer(Completer(locals()).complete)")
console.push('readline.parse_and_bind("tab: complete")')
console.interact( console.interact(
"Spack version %s\nPython %s, %s %s" "Spack version %s\nPython %s, %s %s"
% ( % (
spack.spack_version, spack.spack_version,
platform.python_version(), platform.python_version(),
platform.system(), platform.system(),
platform.machine(), platform.machine(),
)
) )
)
def propagate_exceptions_from(console): def propagate_exceptions_from(console):

View File

@@ -91,6 +91,7 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs): def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers) opt, _, _ = min(result.answers)
if ("opt" in show) and (not required_format): if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels) tty.msg("Best of %d considered solutions." % result.nmodels)

View File

@@ -22,7 +22,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
subparser.add_argument( subparser.add_argument(
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree" "-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
) )
@@ -33,6 +33,9 @@ def stage(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if not args.specs: if not args.specs:
env = ev.active_environment() env = ev.active_environment()
if not env: if not env:

View File

@@ -228,7 +228,7 @@ def create_reporter(args, specs_to_test, test_suite):
def test_list(args): def test_list(args):
"""list installed packages with available tests""" """list installed packages with available tests"""
tagged = spack.repo.PATH.packages_with_tags(*args.tag) if args.tag else set() tagged = set(spack.repo.PATH.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class): def has_test_and_tags(pkg_class):
tests = spack.install_test.test_functions(pkg_class) tests = spack.install_test.test_functions(pkg_class)

View File

@@ -34,13 +34,6 @@ def setup_parser(subparser):
default=False, default=False,
help="show full pytest help, with advanced options", help="show full pytest help, with advanced options",
) )
subparser.add_argument(
"-n",
"--numprocesses",
type=int,
default=1,
help="run tests in parallel up to this wide, default 1 for sequential",
)
# extra spack arguments to list tests # extra spack arguments to list tests
list_group = subparser.add_argument_group("listing tests") list_group = subparser.add_argument_group("listing tests")
@@ -236,16 +229,6 @@ def unit_test(parser, args, unknown_args):
if args.extension: if args.extension:
pytest_root = spack.extensions.load_extension(args.extension) pytest_root = spack.extensions.load_extension(args.extension)
if args.numprocesses is not None and args.numprocesses > 1:
pytest_args.extend(
[
"--dist",
"loadfile",
"--tx",
f"{args.numprocesses}*popen//python=spack-tmpconfig spack python",
]
)
# pytest.ini lives in the root of the spack repository. # pytest.ini lives in the root of the spack repository.
with llnl.util.filesystem.working_dir(pytest_root): with llnl.util.filesystem.working_dir(pytest_root):
if args.list: if args.list:

View File

@@ -8,7 +8,6 @@
import os import os
import platform import platform
import re import re
import shlex
import shutil import shutil
import sys import sys
import tempfile import tempfile
@@ -23,7 +22,6 @@
import spack.error import spack.error
import spack.spec import spack.spec
import spack.util.executable import spack.util.executable
import spack.util.libc
import spack.util.module_cmd import spack.util.module_cmd
import spack.version import spack.version
from spack.util.environment import filter_system_paths from spack.util.environment import filter_system_paths
@@ -109,6 +107,7 @@ def _parse_link_paths(string):
""" """
lib_search_paths = False lib_search_paths = False
raw_link_dirs = [] raw_link_dirs = []
tty.debug("parsing implicit link info")
for line in string.splitlines(): for line in string.splitlines():
if lib_search_paths: if lib_search_paths:
if line.startswith("\t"): if line.startswith("\t"):
@@ -123,7 +122,7 @@ def _parse_link_paths(string):
continue continue
if _LINKER_LINE_IGNORE.match(line): if _LINKER_LINE_IGNORE.match(line):
continue continue
tty.debug(f"implicit link dirs: link line: {line}") tty.debug("linker line: %s" % line)
next_arg = False next_arg = False
for arg in line.split(): for arg in line.split():
@@ -139,12 +138,15 @@ def _parse_link_paths(string):
link_dir_arg = _LINK_DIR_ARG.match(arg) link_dir_arg = _LINK_DIR_ARG.match(arg)
if link_dir_arg: if link_dir_arg:
link_dir = link_dir_arg.group("dir") link_dir = link_dir_arg.group("dir")
tty.debug("linkdir: %s" % link_dir)
raw_link_dirs.append(link_dir) raw_link_dirs.append(link_dir)
link_dir_arg = _LIBPATH_ARG.match(arg) link_dir_arg = _LIBPATH_ARG.match(arg)
if link_dir_arg: if link_dir_arg:
link_dir = link_dir_arg.group("dir") link_dir = link_dir_arg.group("dir")
tty.debug("libpath: %s", link_dir)
raw_link_dirs.append(link_dir) raw_link_dirs.append(link_dir)
tty.debug("found raw link dirs: %s" % ", ".join(raw_link_dirs))
implicit_link_dirs = list() implicit_link_dirs = list()
visited = set() visited = set()
@@ -154,7 +156,7 @@ def _parse_link_paths(string):
implicit_link_dirs.append(normalized_path) implicit_link_dirs.append(normalized_path)
visited.add(normalized_path) visited.add(normalized_path)
tty.debug(f"implicit link dirs: result: {', '.join(implicit_link_dirs)}") tty.debug("found link dirs: %s" % ", ".join(implicit_link_dirs))
return implicit_link_dirs return implicit_link_dirs
@@ -182,21 +184,6 @@ def _parse_non_system_link_dirs(string: str) -> List[str]:
return list(p for p in link_dirs if not in_system_subdirectory(p)) return list(p for p in link_dirs if not in_system_subdirectory(p))
def _parse_dynamic_linker(output: str):
"""Parse -dynamic-linker /path/to/ld.so from compiler output"""
for line in reversed(output.splitlines()):
if "-dynamic-linker" not in line:
continue
args = shlex.split(line)
for idx in reversed(range(1, len(args))):
arg = args[idx]
if arg == "-dynamic-linker" or args == "--dynamic-linker":
return args[idx + 1]
elif arg.startswith("--dynamic-linker=") or arg.startswith("-dynamic-linker="):
return arg.split("=", 1)[1]
def in_system_subdirectory(path): def in_system_subdirectory(path):
system_dirs = [ system_dirs = [
"/lib/", "/lib/",
@@ -347,40 +334,6 @@ def __init__(
# used for version checks for API, e.g. C++11 flag # used for version checks for API, e.g. C++11 flag
self._real_version = None self._real_version = None
def __eq__(self, other):
return (
self.cc == other.cc
and self.cxx == other.cxx
and self.fc == other.fc
and self.f77 == other.f77
and self.spec == other.spec
and self.operating_system == other.operating_system
and self.target == other.target
and self.flags == other.flags
and self.modules == other.modules
and self.environment == other.environment
and self.extra_rpaths == other.extra_rpaths
and self.enable_implicit_rpaths == other.enable_implicit_rpaths
)
def __hash__(self):
return hash(
(
self.cc,
self.cxx,
self.fc,
self.f77,
self.spec,
self.operating_system,
self.target,
str(self.flags),
str(self.modules),
str(self.environment),
str(self.extra_rpaths),
self.enable_implicit_rpaths,
)
)
def verify_executables(self): def verify_executables(self):
"""Raise an error if any of the compiler executables is not valid. """Raise an error if any of the compiler executables is not valid.
@@ -430,35 +383,18 @@ def real_version(self):
self._real_version = self.version self._real_version = self.version
return self._real_version return self._real_version
def implicit_rpaths(self) -> List[str]: def implicit_rpaths(self):
if self.enable_implicit_rpaths is False: if self.enable_implicit_rpaths is False:
return [] return []
output = self.compiler_verbose_output # Put CXX first since it has the most linking issues
# And because it has flags that affect linking
if not output: exe_paths = [x for x in [self.cxx, self.cc, self.fc, self.f77] if x]
return [] link_dirs = self._get_compiler_link_paths(exe_paths)
link_dirs = _parse_non_system_link_dirs(output)
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
return list(paths_containing_libs(link_dirs, all_required_libs)) return list(paths_containing_libs(link_dirs, all_required_libs))
@property
def default_libc(self) -> Optional["spack.spec.Spec"]:
"""Determine libc targeted by the compiler from link line"""
output = self.compiler_verbose_output
if not output:
return None
dynamic_linker = _parse_dynamic_linker(output)
if not dynamic_linker:
return None
return spack.util.libc.libc_from_dynamic_linker(dynamic_linker)
@property @property
def required_libs(self): def required_libs(self):
"""For executables created with this compiler, the compiler libraries """For executables created with this compiler, the compiler libraries
@@ -467,41 +403,52 @@ def required_libs(self):
# By default every compiler returns the empty list # By default every compiler returns the empty list
return [] return []
@property def _get_compiler_link_paths(self, paths):
def compiler_verbose_output(self) -> Optional[str]: first_compiler = next((c for c in paths if c), None)
"""Verbose output from compiling a dummy C source file. Output is cached.""" if not first_compiler:
if not hasattr(self, "_compile_c_source_output"): return []
self._compile_c_source_output = self._compile_dummy_c_source() if not self.verbose_flag:
return self._compile_c_source_output # In this case there is no mechanism to learn what link directories
# are used by the compiler
return []
def _compile_dummy_c_source(self) -> Optional[str]: # What flag types apply to first_compiler, in what order
cc = self.cc if self.cc else self.cxx flags = ["cppflags", "ldflags"]
if not cc or not self.verbose_flag: if first_compiler == self.cc:
return None flags = ["cflags"] + flags
elif first_compiler == self.cxx:
flags = ["cxxflags"] + flags
else:
flags.append("fflags")
try: try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info") tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output") fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, "main.c") fin = os.path.join(tmpdir, "main.c")
with open(fin, "w") as csource: with open(fin, "w+") as csource:
csource.write( csource.write(
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n" "int main(int argc, char* argv[]) { " "(void)argc; (void)argv; return 0; }\n"
) )
cc_exe = spack.util.executable.Executable(cc) compiler_exe = spack.util.executable.Executable(first_compiler)
for flag_type in ["cflags" if cc == self.cc else "cxxflags", "cppflags", "ldflags"]: for flag_type in flags:
cc_exe.add_default_arg(*self.flags.get(flag_type, [])) for flag in self.flags.get(flag_type, []):
compiler_exe.add_default_arg(flag)
output = ""
with self.compiler_environment(): with self.compiler_environment():
return cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str) output = str(
compiler_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
) # str for py2
return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe: except spack.util.executable.ProcessError as pe:
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message) tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)
return None return []
finally: finally:
shutil.rmtree(tmpdir, ignore_errors=True) shutil.rmtree(tmpdir, ignore_errors=True)
@property @property
def verbose_flag(self) -> Optional[str]: def verbose_flag(self):
""" """
This property should be overridden in the compiler subclass if a This property should be overridden in the compiler subclass if a
verbose flag is available. verbose flag is available.

View File

@@ -10,7 +10,6 @@
import itertools import itertools
import multiprocessing.pool import multiprocessing.pool
import os import os
import warnings
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import archspec.cpu import archspec.cpu
@@ -110,128 +109,29 @@ def _to_dict(compiler):
return {"compiler": d} return {"compiler": d}
def get_compiler_config( def get_compiler_config(scope=None, init_config=True):
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = False,
) -> List[Dict]:
"""Return the compiler configuration for the specified architecture.""" """Return the compiler configuration for the specified architecture."""
config = configuration.get("compilers", scope=scope) or []
config = spack.config.get("compilers", scope=scope) or []
if config or not init_config: if config or not init_config:
return config return config
merged_config = configuration.get("compilers") merged_config = spack.config.get("compilers")
if merged_config: if merged_config:
# Config is empty for this scope
# Do not init config because there is a non-empty scope
return config return config
_init_compiler_config(configuration, scope=scope) _init_compiler_config(scope=scope)
config = configuration.get("compilers", scope=scope) config = spack.config.get("compilers", scope=scope)
return config return config
def get_compiler_config_from_packages( def _init_compiler_config(*, scope):
configuration: "spack.config.Configuration", *, scope: Optional[str] = None
) -> List[Dict]:
"""Return the compiler configuration from packages.yaml"""
config = configuration.get("packages", scope=scope)
if not config:
return []
packages = []
compiler_package_names = supported_compilers() + list(package_name_to_compiler_name.keys())
for name, entry in config.items():
if name not in compiler_package_names:
continue
externals_config = entry.get("externals", None)
if not externals_config:
continue
packages.extend(_compiler_config_from_package_config(externals_config))
return packages
def _compiler_config_from_package_config(config):
compilers = []
for entry in config:
compiler = _compiler_config_from_external(entry)
if compiler:
compilers.append(compiler)
return compilers
def _compiler_config_from_external(config):
spec = spack.spec.parse_with_version_concrete(config["spec"])
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
compiler_spec = spack.spec.CompilerSpec(
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
)
extra_attributes = config.get("extra_attributes", {})
prefix = config.get("prefix", None)
compiler_class = class_for_compiler_name(compiler_spec.name)
paths = extra_attributes.get("paths", {})
compiler_langs = ["cc", "cxx", "fc", "f77"]
for lang in compiler_langs:
if paths.setdefault(lang, None):
continue
if not prefix:
continue
# Check for files that satisfy the naming scheme for this compiler
bindir = os.path.join(prefix, "bin")
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
if regex.match(f):
paths[lang] = os.path.join(bindir, f)
if all(v is None for v in paths.values()):
return None
if not spec.architecture:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
target = host_platform.target("default_target").microarchitecture
else:
target = spec.target
if not target:
host_platform = spack.platforms.host()
target = host_platform.target("default_target").microarchitecture
operating_system = spec.os
if not operating_system:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
compiler_entry = {
"compiler": {
"spec": str(compiler_spec),
"paths": paths,
"flags": extra_attributes.get("flags", {}),
"operating_system": str(operating_system),
"target": str(target.family),
"modules": config.get("modules", []),
"environment": extra_attributes.get("environment", {}),
"extra_rpaths": extra_attributes.get("extra_rpaths", []),
"implicit_rpaths": extra_attributes.get("implicit_rpaths", None),
}
}
return compiler_entry
def _init_compiler_config(
configuration: "spack.config.Configuration", *, scope: Optional[str]
) -> None:
"""Compiler search used when Spack has no compilers.""" """Compiler search used when Spack has no compilers."""
compilers = find_compilers() compilers = find_compilers()
compilers_dict = [] compilers_dict = []
for compiler in compilers: for compiler in compilers:
compilers_dict.append(_to_dict(compiler)) compilers_dict.append(_to_dict(compiler))
configuration.set("compilers", compilers_dict, scope=scope) spack.config.set("compilers", compilers_dict, scope=scope)
def compiler_config_files(): def compiler_config_files():
@@ -242,22 +142,17 @@ def compiler_config_files():
compiler_config = config.get("compilers", scope=name) compiler_config = config.get("compilers", scope=name)
if compiler_config: if compiler_config:
config_files.append(config.get_config_filename(name, "compilers")) config_files.append(config.get_config_filename(name, "compilers"))
compiler_config_from_packages = get_compiler_config_from_packages(config, scope=name)
if compiler_config_from_packages:
config_files.append(config.get_config_filename(name, "packages"))
return config_files return config_files
def add_compilers_to_config(compilers, scope=None): def add_compilers_to_config(compilers, scope=None, init_config=True):
"""Add compilers to the config for the specified architecture. """Add compilers to the config for the specified architecture.
Arguments: Arguments:
compilers: a list of Compiler objects. compilers: a list of Compiler objects.
scope: configuration scope to modify. scope: configuration scope to modify.
""" """
compiler_config = get_compiler_config( compiler_config = get_compiler_config(scope, init_config)
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
for compiler in compilers: for compiler in compilers:
if not compiler.cc: if not compiler.cc:
tty.debug(f"{compiler.spec} does not have a C compiler") tty.debug(f"{compiler.spec} does not have a C compiler")
@@ -289,9 +184,6 @@ def remove_compiler_from_config(compiler_spec, scope=None):
for current_scope in candidate_scopes: for current_scope in candidate_scopes:
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope) removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
msg = "`spack compiler remove` will not remove compilers defined in packages.yaml"
msg += "\nTo remove these compilers, either edit the config or use `spack external remove`"
tty.debug(msg)
return removal_happened return removal_happened
@@ -306,9 +198,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
True if one or more compiler entries were actually removed, False otherwise True if one or more compiler entries were actually removed, False otherwise
""" """
assert scope is not None, "a specific scope is needed when calling this function" assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config( compiler_config = get_compiler_config(scope)
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
filtered_compiler_config = [ filtered_compiler_config = [
compiler_entry compiler_entry
for compiler_entry in compiler_config for compiler_entry in compiler_config
@@ -323,36 +213,22 @@ def _remove_compiler_from_scope(compiler_spec, scope):
# We need to preserve the YAML type for comments, hence we are copying the # We need to preserve the YAML type for comments, hence we are copying the
# items in the list that has just been retrieved # items in the list that has just been retrieved
compiler_config[:] = filtered_compiler_config compiler_config[:] = filtered_compiler_config
spack.config.CONFIG.set("compilers", compiler_config, scope=scope) spack.config.set("compilers", compiler_config, scope=scope)
return True return True
def all_compilers_config( def all_compilers_config(scope=None, init_config=True):
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = True,
) -> List["spack.compiler.Compiler"]:
"""Return a set of specs for all the compiler versions currently """Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec. available to build with. These are instances of CompilerSpec.
""" """
from_packages_yaml = get_compiler_config_from_packages(configuration, scope=scope) return get_compiler_config(scope, init_config)
if from_packages_yaml:
init_config = False
from_compilers_yaml = get_compiler_config(configuration, scope=scope, init_config=init_config)
result = from_compilers_yaml + from_packages_yaml
# Dedupe entries by the compiler they represent
# If the entry is invalid, treat it as unique for deduplication
key = lambda c: _compiler_from_config_entry(c["compiler"] or id(c))
return list(llnl.util.lang.dedupe(result, key=key))
def all_compiler_specs(scope=None, init_config=True): def all_compiler_specs(scope=None, init_config=True):
# Return compiler specs from the merged config. # Return compiler specs from the merged config.
return [ return [
spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True) spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True)
for s in all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config) for s in all_compilers_config(scope, init_config)
] ]
@@ -512,20 +388,11 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
def all_compilers(scope=None, init_config=True): def all_compilers(scope=None, init_config=True):
return all_compilers_from( config = get_compiler_config(scope, init_config=init_config)
configuration=spack.config.CONFIG, scope=scope, init_config=init_config compilers = list()
) for items in config:
def all_compilers_from(configuration, scope=None, init_config=True):
compilers = []
for items in all_compilers_config(
configuration=configuration, scope=scope, init_config=init_config
):
items = items["compiler"] items = items["compiler"]
compiler = _compiler_from_config_entry(items) # can be None in error case compilers.append(_compiler_from_config_entry(items))
if compiler:
compilers.append(compiler)
return compilers return compilers
@@ -536,7 +403,10 @@ def compilers_for_spec(
"""This gets all compilers that satisfy the supplied CompilerSpec. """This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found. Returns an empty list if none are found.
""" """
config = all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config) if use_cache:
config = all_compilers_config(scope, init_config)
else:
config = get_compiler_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config)) matches = set(find(compiler_spec, scope, init_config))
compilers = [] compilers = []
@@ -546,7 +416,7 @@ def compilers_for_spec(
def compilers_for_arch(arch_spec, scope=None): def compilers_for_arch(arch_spec, scope=None):
config = all_compilers_config(spack.config.CONFIG, scope=scope) config = all_compilers_config(scope)
return list(get_compilers(config, arch_spec=arch_spec)) return list(get_compilers(config, arch_spec=arch_spec))
@@ -632,10 +502,7 @@ def _compiler_from_config_entry(items):
compiler = _compiler_cache.get(config_id, None) compiler = _compiler_cache.get(config_id, None)
if compiler is None: if compiler is None:
try: compiler = compiler_from_dict(items)
compiler = compiler_from_dict(items)
except UnknownCompilerError as e:
warnings.warn(e.message)
_compiler_cache[config_id] = compiler _compiler_cache[config_id] = compiler
return compiler return compiler
@@ -688,9 +555,7 @@ def get_compilers(config, cspec=None, arch_spec=None):
raise ValueError(msg) raise ValueError(msg)
continue continue
compiler = _compiler_from_config_entry(items) compilers.append(_compiler_from_config_entry(items))
if compiler:
compilers.append(compiler)
return compilers return compilers
@@ -718,7 +583,9 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
scope_to_compilers = {} scope_to_compilers = {}
for scope in config.scopes: for scope in config.scopes:
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec, scope=scope) compilers = compilers_for_spec(
compiler_spec, arch_spec=arch_spec, scope=scope, use_cache=False
)
if compilers: if compilers:
scope_to_compilers[scope] = compilers scope_to_compilers[scope] = compilers
@@ -967,11 +834,10 @@ def _default_make_compilers(cmp_id, paths):
make_mixed_toolchain(flat_compilers) make_mixed_toolchain(flat_compilers)
# Finally, create the compiler list # Finally, create the compiler list
compilers: List["spack.compiler.Compiler"] = [] compilers = []
for compiler_id, _, compiler in flat_compilers: for compiler_id, _, compiler in flat_compilers:
make_compilers = getattr(compiler_id.os, "make_compilers", _default_make_compilers) make_compilers = getattr(compiler_id.os, "make_compilers", _default_make_compilers)
candidates = make_compilers(compiler_id, compiler) compilers.extend(make_compilers(compiler_id, compiler))
compilers.extend(x for x in candidates if x.cc is not None)
return compilers return compilers

View File

@@ -38,10 +38,10 @@ class Clang(Compiler):
cxx_names = ["clang++"] cxx_names = ["clang++"]
# Subclasses use possible names of Fortran 77 compiler # Subclasses use possible names of Fortran 77 compiler
f77_names = ["flang-new", "flang"] f77_names = ["flang"]
# Subclasses use possible names of Fortran 90 compiler # Subclasses use possible names of Fortran 90 compiler
fc_names = ["flang-new", "flang"] fc_names = ["flang"]
version_argument = "--version" version_argument = "--version"
@@ -171,11 +171,10 @@ def extract_version_from_output(cls, output):
match = re.search( match = re.search(
# Normal clang compiler versions are left as-is # Normal clang compiler versions are left as-is
r"(?:clang|flang-new) version ([^ )\n]+)-svn[~.\w\d-]*|" r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version # Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details) # (see https://github.com/spack/spack/pull/14365 for details)
r"(?:clang|flang-new) version ([^ )\n]+?)-[~.\w\d-]*|" r"clang version ([^ )\n]+?)-[~.\w\d-]*|" r"clang version ([^ )\n]+)",
r"(?:clang|flang-new) version ([^ )\n]+)",
output, output,
) )
if match: if match:

Some files were not shown because too many files have changed in this diff Show More