Compare commits
7 Commits
packages/m
...
cws/pumiFi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9dead10d70 | ||
|
|
63ba7db2d2 | ||
|
|
19c0208c1a | ||
|
|
9682347254 | ||
|
|
f4f7309504 | ||
|
|
071a34df27 | ||
|
|
8d35a8498b |
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
|
||||
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Load spack environment at terminal startup
|
||||
cat <<EOF >> /root/.bashrc
|
||||
. /workspaces/spack/share/spack/setup-env.sh
|
||||
EOF
|
||||
|
||||
# Load spack environment in this script
|
||||
. /workspaces/spack/share/spack/setup-env.sh
|
||||
|
||||
# Ensure generic targets for maximum matching with buildcaches
|
||||
spack config --scope site add "packages:all:require:[target=x86_64_v3]"
|
||||
spack config --scope site add "concretizer:targets:granularity:generic"
|
||||
|
||||
# Find compiler and install gcc-runtime
|
||||
spack compiler find --scope site
|
||||
|
||||
# Setup buildcaches
|
||||
spack mirror add --scope site develop https://binaries.spack.io/develop
|
||||
spack buildcache keys --install --trust
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -1,6 +0,0 @@
|
||||
<!--
|
||||
Remember that `spackbot` can help with your PR in multiple ways:
|
||||
- `@spackbot help` shows all the commands that are currently available
|
||||
- `@spackbot fix style` tries to push a commit to fix style issues in this PR
|
||||
- `@spackbot re-run pipeline` runs the pipelines again, if you have write access to the repository
|
||||
-->
|
||||
8
.github/workflows/audit.yaml
vendored
8
.github/workflows/audit.yaml
vendored
@@ -22,8 +22,8 @@ jobs:
|
||||
matrix:
|
||||
operating_system: ["ubuntu-latest", "macos-latest"]
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: ${{inputs.python_version}}
|
||||
- name: Install Python packages
|
||||
@@ -43,9 +43,7 @@ jobs:
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) audit packages
|
||||
$(which spack) audit externals
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
|
||||
if: ${{ inputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,audits
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
24
.github/workflows/bootstrap.yml
vendored
24
.github/workflows/bootstrap.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison bison-devel libstdc++-static
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup repo
|
||||
@@ -158,8 +158,8 @@ jobs:
|
||||
run: |
|
||||
brew install cmake bison@2.7 tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Bootstrap clingo
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
run: |
|
||||
brew install tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
set -ex
|
||||
@@ -207,7 +207,7 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup repo
|
||||
@@ -250,7 +250,7 @@ jobs:
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -287,7 +287,7 @@ jobs:
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
gawk
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -320,7 +320,7 @@ jobs:
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
@@ -338,7 +338,7 @@ jobs:
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
|
||||
14
.github/workflows/build-containers.yml
vendored
14
.github/workflows/build-containers.yml
vendored
@@ -55,9 +55,9 @@ jobs:
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
- uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c
|
||||
id: docker_meta
|
||||
with:
|
||||
images: |
|
||||
@@ -96,10 +96,10 @@ jobs:
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -107,16 +107,18 @@ jobs:
|
||||
|
||||
- name: Log in to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0
|
||||
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
|
||||
8
.github/workflows/ci.yaml
vendored
8
.github/workflows/ci.yaml
vendored
@@ -18,7 +18,6 @@ jobs:
|
||||
prechecks:
|
||||
needs: [ changes ]
|
||||
uses: ./.github/workflows/valid-style.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
with_coverage: ${{ needs.changes.outputs.core }}
|
||||
all-prechecks:
|
||||
@@ -36,12 +35,12 @@ jobs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
|
||||
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
|
||||
id: filter
|
||||
with:
|
||||
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
|
||||
@@ -71,17 +70,14 @@ jobs:
|
||||
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
|
||||
needs: [ prechecks, changes ]
|
||||
uses: ./.github/workflows/bootstrap.yml
|
||||
secrets: inherit
|
||||
unit-tests:
|
||||
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
|
||||
needs: [ prechecks, changes ]
|
||||
uses: ./.github/workflows/unit_tests.yaml
|
||||
secrets: inherit
|
||||
windows:
|
||||
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
|
||||
needs: [ prechecks ]
|
||||
uses: ./.github/workflows/windows_python.yml
|
||||
secrets: inherit
|
||||
all:
|
||||
needs: [ windows, unit-tests, bootstrap ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/nightly-win-builds.yml
vendored
4
.github/workflows/nightly-win-builds.yml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
build-paraview-deps:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
|
||||
4
.github/workflows/style/requirements.txt
vendored
4
.github/workflows/style/requirements.txt
vendored
@@ -1,5 +1,5 @@
|
||||
black==24.3.0
|
||||
clingo==5.7.1
|
||||
black==23.12.1
|
||||
clingo==5.6.2
|
||||
flake8==7.0.0
|
||||
isort==5.13.2
|
||||
mypy==1.8.0
|
||||
|
||||
37
.github/workflows/unit_tests.yaml
vendored
37
.github/workflows/unit_tests.yaml
vendored
@@ -51,10 +51,10 @@ jobs:
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
@@ -91,19 +91,17 @@ jobs:
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
# Test shell integration
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install System packages
|
||||
@@ -124,11 +122,9 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
# Test RHEL8 UBI with platform Python. This job is run
|
||||
# only on PRs modifying core Spack
|
||||
@@ -141,7 +137,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -160,10 +156,10 @@ jobs:
|
||||
clingo-cffi:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install System packages
|
||||
@@ -185,23 +181,20 @@ jobs:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
# Run unit tests on MacOS
|
||||
macos:
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, macos-14]
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
@@ -223,8 +216,6 @@ jobs:
|
||||
$(which spack) solve zlib
|
||||
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
|
||||
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
with:
|
||||
flags: unittests,macos
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
11
.github/workflows/valid-style.yml
vendored
11
.github/workflows/valid-style.yml
vendored
@@ -18,8 +18,8 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -35,10 +35,10 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -56,7 +56,6 @@ jobs:
|
||||
share/spack/qa/run-style-tests
|
||||
audit:
|
||||
uses: ./.github/workflows/audit.yaml
|
||||
secrets: inherit
|
||||
with:
|
||||
with_coverage: ${{ inputs.with_coverage }}
|
||||
python_version: '3.11'
|
||||
@@ -70,7 +69,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
|
||||
20
.github/workflows/windows_python.yml
vendored
20
.github/workflows/windows_python.yml
vendored
@@ -15,10 +15,10 @@ jobs:
|
||||
unit-tests:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
@@ -33,18 +33,16 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
coverage combine -a
|
||||
coverage xml
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
with:
|
||||
flags: unittests,windows
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
unit-tests-cmd:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
@@ -59,18 +57,16 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
coverage combine -a
|
||||
coverage xml
|
||||
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
|
||||
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
with:
|
||||
flags: unittests,windows
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
build-abseil:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
|
||||
@@ -42,8 +42,3 @@ concretizer:
|
||||
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
|
||||
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
|
||||
strategy: minimal
|
||||
# Option to specify compatiblity between operating systems for reuse of compilers and packages
|
||||
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
|
||||
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
|
||||
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
|
||||
os_compatible: {}
|
||||
|
||||
@@ -101,12 +101,6 @@ config:
|
||||
verify_ssl: true
|
||||
|
||||
|
||||
# This is where custom certs for proxy/firewall are stored.
|
||||
# It can be a path or environment variable. To match ssl env configuration
|
||||
# the default is the environment variable SSL_CERT_FILE
|
||||
ssl_certs: $SSL_CERT_FILE
|
||||
|
||||
|
||||
# Suppress gpg warnings from binary package verification
|
||||
# Only suppresses warnings, gpg failure will still fail the install
|
||||
# Potential rationale to set True: users have already explicitly trusted the
|
||||
|
||||
@@ -24,7 +24,6 @@ packages:
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw, amdfftw]
|
||||
flame: [libflame, amdlibflame]
|
||||
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
|
||||
fuse: [libfuse]
|
||||
gl: [glx, osmesa]
|
||||
glu: [mesa-glu, openglu]
|
||||
@@ -35,9 +34,7 @@ packages:
|
||||
java: [openjdk, jdk, ibm-java]
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
libgfortran: [ gcc-runtime ]
|
||||
libglx: [mesa+glx, mesa18+glx]
|
||||
libifcore: [ intel-oneapi-runtime ]
|
||||
libllvm: [llvm]
|
||||
libosmesa: [mesa+osmesa, mesa18+osmesa]
|
||||
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
|
||||
|
||||
@@ -1119,9 +1119,6 @@ and ``3.4.2``. Similarly, ``@4.2:`` means any version above and including
|
||||
``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and
|
||||
includes any version with major version ``3``.
|
||||
|
||||
Versions are ordered lexicograpically by its components. For more details
|
||||
on the order, see :ref:`the packaging guide <version-comparison>`.
|
||||
|
||||
Notice that you can distinguish between the specific version ``@=3.2`` and
|
||||
the range ``@3.2``. This is useful for packages that follow a versioning
|
||||
scheme that omits the zero patch version number: ``3.2``, ``3.2.1``,
|
||||
@@ -1133,10 +1130,6 @@ A version specifier can also be a list of ranges and specific versions,
|
||||
separated by commas. For example, ``@1.0:1.5,=1.7.1`` matches any version
|
||||
in the range ``1.0:1.5`` and the specific version ``1.7.1``.
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Git versions
|
||||
^^^^^^^^^^^^
|
||||
|
||||
For packages with a ``git`` attribute, ``git`` references
|
||||
may be specified instead of a numerical version i.e. branches, tags
|
||||
and commits. Spack will stage and build based off the ``git``
|
||||
|
||||
@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
% spack -b find
|
||||
% spack find -b
|
||||
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
|
||||
==> 11 installed packages
|
||||
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
|
||||
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
|
||||
% spack clean -b
|
||||
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
|
||||
|
||||
% spack -b find
|
||||
% spack find -b
|
||||
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
|
||||
==> 0 installed packages
|
||||
|
||||
@@ -175,4 +175,4 @@ bootstrapping.
|
||||
|
||||
This command needs to be run on a machine with internet access and the resulting folder
|
||||
has to be moved over to the air-gapped system. Once the local sources are added using the
|
||||
commands suggested at the prompt, they can be used to bootstrap Spack.
|
||||
commands suggested at the prompt, they can be used to bootstrap Spack.
|
||||
@@ -250,7 +250,7 @@ generator is Ninja. To switch to the Ninja generator, simply add:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
generator("ninja")
|
||||
generator = "Ninja"
|
||||
|
||||
|
||||
``CMakePackage`` defaults to "Unix Makefiles". If you switch to the
|
||||
|
||||
@@ -173,72 +173,6 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
|
||||
]
|
||||
|
||||
|
||||
^^^^^^^
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
``PerlPackage`` provides a simple stand-alone test of the successfully
|
||||
installed package to confirm that installed perl module(s) can be used.
|
||||
These tests can be performed any time after the installation using
|
||||
``spack -v test run``. (For more information on the command, see
|
||||
:ref:`cmd-spack-test-run`.)
|
||||
|
||||
The base class automatically detects perl modules based on the presence
|
||||
of ``*.pm`` files under the package's library directory. For example,
|
||||
the files under ``perl-bignum``'s perl library are:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find . -name "*.pm"
|
||||
./bigfloat.pm
|
||||
./bigrat.pm
|
||||
./Math/BigFloat/Trace.pm
|
||||
./Math/BigInt/Trace.pm
|
||||
./Math/BigRat/Trace.pm
|
||||
./bigint.pm
|
||||
./bignum.pm
|
||||
|
||||
|
||||
which results in the package having the ``use_modules`` property containing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
use_modules = [
|
||||
"bigfloat",
|
||||
"bigrat",
|
||||
"Math::BigFloat::Trace",
|
||||
"Math::BigInt::Trace",
|
||||
"Math::BigRat::Trace",
|
||||
"bigint",
|
||||
"bignum",
|
||||
]
|
||||
|
||||
.. note::
|
||||
|
||||
This list can often be used to catch missing dependencies.
|
||||
|
||||
If the list is somehow wrong, you can provide the names of the modules
|
||||
yourself by overriding ``use_modules`` like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
|
||||
|
||||
If you only want a subset of the automatically detected modules to be
|
||||
tested, you could instead define the ``skip_modules`` property on the
|
||||
package. So, instead of overriding ``use_modules`` as shown above, you
|
||||
could define the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
skip_modules = [
|
||||
"Math::BigFloat::Trace",
|
||||
"Math::BigInt::Trace",
|
||||
"Math::BigRat::Trace",
|
||||
]
|
||||
|
||||
for the same use tests.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Alternatives to Spack
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -199,7 +199,6 @@ def setup(sphinx):
|
||||
("py:class", "contextlib.contextmanager"),
|
||||
("py:class", "module"),
|
||||
("py:class", "_io.BufferedReader"),
|
||||
("py:class", "_io.BytesIO"),
|
||||
("py:class", "unittest.case.TestCase"),
|
||||
("py:class", "_frozen_importlib_external.SourceFileLoader"),
|
||||
("py:class", "clingo.Control"),
|
||||
@@ -216,7 +215,6 @@ def setup(sphinx):
|
||||
("py:class", "spack.spec.InstallStatus"),
|
||||
("py:class", "spack.spec.SpecfileReaderBase"),
|
||||
("py:class", "spack.install_test.Pb"),
|
||||
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
|
||||
@@ -145,22 +145,6 @@ hosts when making ``ssl`` connections. Set to ``false`` to disable, and
|
||||
tools like ``curl`` will use their ``--insecure`` options. Disabling
|
||||
this can expose you to attacks. Use at your own risk.
|
||||
|
||||
--------------------
|
||||
``ssl_certs``
|
||||
--------------------
|
||||
|
||||
Path to custom certificats for SSL verification. The value can be a
|
||||
filesytem path, or an environment variable that expands to a file path.
|
||||
The default value is set to the environment variable ``SSL_CERT_FILE``
|
||||
to use the same syntax used by many other applications that automatically
|
||||
detect custom certificates.
|
||||
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
|
||||
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
|
||||
in the subprocess calling ``curl``.
|
||||
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
|
||||
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
|
||||
will work.
|
||||
|
||||
--------------------
|
||||
``checksum``
|
||||
--------------------
|
||||
|
||||
@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
|
||||
Spack instance per project) or for site-wide settings on a multi-user
|
||||
machine (e.g., for a common Spack instance).
|
||||
|
||||
#. **plugin**: Read from a Python project's entry points. Settings here affect
|
||||
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
|
||||
|
||||
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
|
||||
affect all instances of Spack and take higher precedence than site,
|
||||
system, plugin, or defaults scopes.
|
||||
system, or defaults scopes.
|
||||
|
||||
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
|
||||
If multiple scopes are listed on the command line, they are ordered
|
||||
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
|
||||
mpi: [mpich]
|
||||
|
||||
|
||||
.. _plugin-scopes:
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Plugin scopes
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
.. note::
|
||||
Python version >= 3.8 is required to enable plugin configuration.
|
||||
|
||||
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
my-package/
|
||||
├── src
|
||||
│ ├── my_package
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── spack/
|
||||
│ │ │ └── config.yaml
|
||||
└── pyproject.toml
|
||||
|
||||
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry_points."spack.config"]
|
||||
my_package = "my_package:get_config_path"
|
||||
|
||||
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import importlib.resources
|
||||
|
||||
def get_config_path():
|
||||
dirname = importlib.resources.files("my_package").joinpath("spack")
|
||||
if dirname.exists():
|
||||
return str(dirname)
|
||||
|
||||
.. _platform-scopes:
|
||||
|
||||
------------------------
|
||||
|
||||
@@ -357,23 +357,91 @@ If there is a hook that you would like and is missing, you can propose to add a
|
||||
``pre_install(spec)``
|
||||
"""""""""""""""""""""
|
||||
|
||||
A ``pre_install`` hook is run within the install subprocess, directly before the install starts.
|
||||
It expects a single argument of a spec.
|
||||
A ``pre_install`` hook is run within an install subprocess, directly before
|
||||
the install starts. It expects a single argument of a spec, and is run in
|
||||
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
|
||||
as we have defined them here, but rather callback functions associated with
|
||||
a package install.
|
||||
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""
|
||||
``post_install(spec, explicit=None)``
|
||||
"""""""""""""""""""""""""""""""""""""
|
||||
""""""""""""""""""""""
|
||||
``post_install(spec)``
|
||||
""""""""""""""""""""""
|
||||
|
||||
A ``post_install`` hook is run within the install subprocess, directly after the install finishes,
|
||||
but before the build stage is removed and the spec is registered in the database. It expects two
|
||||
arguments: spec and an optional boolean indicating whether this spec is being installed explicitly.
|
||||
A ``post_install`` hook is run within an install subprocess, directly after
|
||||
the install finishes, but before the build stage is removed. If you
|
||||
write one of these hooks, you should expect it to accept a spec as the only
|
||||
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
|
||||
also seen in packages, but in this context not related to the hooks described
|
||||
here.
|
||||
|
||||
""""""""""""""""""""""""""""""""""""""""""""""""""""
|
||||
``pre_uninstall(spec)`` and ``post_uninstall(spec)``
|
||||
""""""""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
These hooks are currently used for cleaning up module files after uninstall.
|
||||
""""""""""""""""""""""""""
|
||||
``on_install_start(spec)``
|
||||
""""""""""""""""""""""""""
|
||||
|
||||
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
|
||||
in the install function of a ``PackageInstaller``,
|
||||
and importantly is not part of a build process, but before it. This is when
|
||||
we have just newly grabbed the task, and are preparing to install. If you
|
||||
write a hook of this type, you should provide the spec to it.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def on_install_start(spec):
|
||||
"""On start of an install, we want to...
|
||||
"""
|
||||
print('on_install_start')
|
||||
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_success(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run on a successful install, and is also run inside the build
|
||||
process, akin to ``post_install``. The main difference is that this hook
|
||||
is run outside of the context of the stage directory, meaning after the
|
||||
build stage has been removed and the user is alerted that the install was
|
||||
successful. If you need to write a hook that is run on success of a particular
|
||||
phase, you should use ``on_phase_success``.
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_failure(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run given an install failure that happens outside of the build
|
||||
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
|
||||
If you need to write a hook that is relevant to a failure within a build
|
||||
process, you would want to instead use ``on_phase_failure``.
|
||||
|
||||
|
||||
"""""""""""""""""""""""""""
|
||||
``on_install_cancel(spec)``
|
||||
"""""""""""""""""""""""""""
|
||||
|
||||
The same, but triggered if a spec install is cancelled for any reason.
|
||||
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_success(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run within the install subprocess, and specifically when a phase
|
||||
successfully finishes. Since we are interested in the package, the name of
|
||||
the phase, and any output from it, we require:
|
||||
|
||||
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
|
||||
- **phase_name**: the name of the phase that was successful (e.g., configure)
|
||||
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_error(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
In the case of an error during a phase, we might want to trigger some event
|
||||
with a hook, and this is the purpose of this particular hook. Akin to
|
||||
``on_phase_success`` we require the same variables - the package that failed,
|
||||
the name of the phase, and the log file where we might find errors.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -142,21 +142,6 @@ user's prompt to begin with the environment name in brackets.
|
||||
$ spack env activate -p myenv
|
||||
[myenv] $ ...
|
||||
|
||||
The ``activate`` command can also be used to create a new environment, if it is
|
||||
not already defined, by adding the ``--create`` flag. Managed and anonymous
|
||||
environments, anonymous environments are explained in the next section,
|
||||
can both be created using the same flags that `spack env create` accepts.
|
||||
If an environment already exists then spack will simply activate it and ignore the
|
||||
create specific flags.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env activate --create -p myenv
|
||||
# ...
|
||||
# [creates if myenv does not exist yet]
|
||||
# ...
|
||||
[myenv] $ ...
|
||||
|
||||
To deactivate an environment, use the command:
|
||||
|
||||
.. code-block:: console
|
||||
@@ -416,23 +401,6 @@ that git clone if ``foo`` is in the environment.
|
||||
Further development on ``foo`` can be tested by reinstalling the environment,
|
||||
and eventually committed and pushed to the upstream git repo.
|
||||
|
||||
If the package being developed supports out-of-source builds then users can use the
|
||||
``--build_directory`` flag to control the location and name of the build directory.
|
||||
This is a shortcut to set the ``package_attributes:build_directory`` in the
|
||||
``packages`` configuration (see :ref:`assigning-package-attributes`).
|
||||
The supplied location will become the build-directory for that package in all future builds.
|
||||
|
||||
.. warning::
|
||||
Potential pitfalls of setting the build directory
|
||||
Spack does not check for out-of-source build compatibility with the packages and
|
||||
so the onerous of making sure the package supports out-of-source builds is on
|
||||
the user.
|
||||
For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds
|
||||
while all ``CMake`` packages do.
|
||||
Understanding these nuances are on the software developers and we strongly encourage
|
||||
developers to only redirect the build directory if they understand their package's
|
||||
build-system.
|
||||
|
||||
^^^^^^^
|
||||
Loading
|
||||
^^^^^^^
|
||||
@@ -489,11 +457,11 @@ a ``packages.yaml`` file) could contain:
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
packages:
|
||||
all:
|
||||
compiler: [intel]
|
||||
# ...
|
||||
...
|
||||
|
||||
This configuration sets the default compiler for all packages to
|
||||
``intel``.
|
||||
@@ -839,7 +807,7 @@ directories.
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
view:
|
||||
mpis:
|
||||
root: /path/to/view
|
||||
@@ -883,7 +851,7 @@ automatically named ``default``, so that
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
view: True
|
||||
|
||||
is equivalent to
|
||||
@@ -891,7 +859,7 @@ is equivalent to
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
view:
|
||||
default:
|
||||
root: .spack-env/view
|
||||
@@ -901,7 +869,7 @@ and
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
view: /path/to/view
|
||||
|
||||
is equivalent to
|
||||
@@ -909,7 +877,7 @@ is equivalent to
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
view:
|
||||
default:
|
||||
root: /path/to/view
|
||||
@@ -952,17 +920,6 @@ function, as shown in the example below:
|
||||
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
|
||||
all: "{name}-{version}/{compiler.name}-{compiler.version}"
|
||||
|
||||
Projections also permit environment and spack configuration variable
|
||||
expansions as shown below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
projections:
|
||||
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
|
||||
|
||||
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
|
||||
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
|
||||
|
||||
The entries in the projections configuration file must all be either
|
||||
specs or the keyword ``all``. For each spec, the projection used will
|
||||
be the first non-``all`` entry that the spec satisfies, or ``all`` if
|
||||
|
||||
@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
|
||||
|
||||
(5 durations < 0.005s hidden. Use -vv to show these durations.)
|
||||
=========================================== 5 passed in 5.06s ============================================
|
||||
|
||||
---------------------------------------
|
||||
Registering Extensions via Entry Points
|
||||
---------------------------------------
|
||||
|
||||
.. note::
|
||||
Python version >= 3.8 is required to register extensions via entry points.
|
||||
|
||||
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
my-package/
|
||||
├── src
|
||||
│ ├── my_package
|
||||
│ │ └── __init__.py
|
||||
│ └── spack-scripting/ # the spack extensions
|
||||
└── pyproject.toml
|
||||
|
||||
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry_points."spack.extenions"]
|
||||
my_package = "my_package:get_extension_path"
|
||||
|
||||
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import importlib.resources
|
||||
|
||||
def get_extension_path():
|
||||
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
|
||||
if dirname.exists():
|
||||
return str(dirname)
|
||||
|
||||
@@ -250,10 +250,9 @@ Compiler configuration
|
||||
|
||||
Spack has the ability to build packages with multiple compilers and
|
||||
compiler versions. Compilers can be made available to Spack by
|
||||
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
|
||||
or automatically by running ``spack compiler find``, but for
|
||||
convenience Spack will automatically detect compilers the first time
|
||||
it needs them.
|
||||
specifying them manually in ``compilers.yaml``, or automatically by
|
||||
running ``spack compiler find``, but for convenience Spack will
|
||||
automatically detect compilers the first time it needs them.
|
||||
|
||||
.. _cmd-spack-compilers:
|
||||
|
||||
@@ -458,48 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
|
||||
prepend_path: # Similar for append|remove_path
|
||||
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
|
||||
|
||||
.. note::
|
||||
|
||||
Spack is in the process of moving compilers from a separate
|
||||
attribute to be handled like all other packages. As part of this
|
||||
process, the ``compilers.yaml`` section will eventually be replaced
|
||||
by configuration in the ``packages.yaml`` section. This new
|
||||
configuration is now available, although it is not yet the default
|
||||
behavior.
|
||||
|
||||
Compilers can also be configured as external packages in the
|
||||
``packages.yaml`` config file. Any external package for a compiler
|
||||
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
|
||||
assuming the paths to the compiler executables are determinable from
|
||||
the prefix.
|
||||
|
||||
If the paths to the compiler executable are not determinable from the
|
||||
prefix, you can add them to the ``extra_attributes`` field. Similarly,
|
||||
all other fields from the compilers config can be added to the
|
||||
``extra_attributes`` field for an external representing a compiler.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
gcc:
|
||||
external:
|
||||
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
|
||||
prefix: /usr
|
||||
extra_attributes:
|
||||
environment:
|
||||
set:
|
||||
GCC_ROOT: /usr
|
||||
external:
|
||||
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
|
||||
prefix: /usr
|
||||
extra_attributes:
|
||||
paths:
|
||||
cc: /usr/bin/clang-with-suffix
|
||||
cxx: /usr/bin/clang++-with-extra-info
|
||||
fc: /usr/bin/gfortran
|
||||
f77: /usr/bin/gfortran
|
||||
extra_rpaths:
|
||||
- /usr/lib/llvm/
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build Your Own Compiler
|
||||
@@ -666,7 +623,7 @@ Fortran.
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
# ...
|
||||
...
|
||||
paths:
|
||||
cc: /usr/bin/clang
|
||||
cxx: /usr/bin/clang++
|
||||
|
||||
@@ -10,7 +10,7 @@ Modules (modules.yaml)
|
||||
======================
|
||||
|
||||
The use of module systems to manage user environment in a controlled way
|
||||
is a common practice at HPC centers that is sometimes embraced also by
|
||||
is a common practice at HPC centers that is often embraced also by
|
||||
individual programmers on their development machines. To support this
|
||||
common practice Spack integrates with `Environment Modules
|
||||
<http://modules.sourceforge.net/>`_ and `Lmod
|
||||
@@ -21,38 +21,14 @@ Modules are one of several ways you can use Spack packages. For other
|
||||
options that may fit your use case better, you should also look at
|
||||
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
|
||||
|
||||
-----------
|
||||
Quick start
|
||||
-----------
|
||||
----------------------------
|
||||
Using module files via Spack
|
||||
----------------------------
|
||||
|
||||
In the current version of Spack, module files are not generated by default. To get started, you
|
||||
can generate module files for all currently installed packages by running either
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module tcl refresh
|
||||
|
||||
or
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module lmod refresh
|
||||
|
||||
Spack can also generate module files for all future installations automatically through the
|
||||
following configuration:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack config add modules:default:enable:[tcl]
|
||||
|
||||
or
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack config add modules:default:enable:[lmod]
|
||||
|
||||
Assuming you have a module system installed, you should now be able to use the ``module`` command
|
||||
to interact with them:
|
||||
If you have installed a supported module system you should be able to
|
||||
run ``module avail`` to see what module
|
||||
files have been installed. Here is sample output of those programs,
|
||||
showing lots of installed packages:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -89,17 +65,33 @@ scheme used at your site.
|
||||
Module file customization
|
||||
-------------------------
|
||||
|
||||
Module files are generated by post-install hooks after the successful
|
||||
installation of a package.
|
||||
|
||||
.. note::
|
||||
|
||||
Spack only generates modulefiles when a package is installed. If
|
||||
you attempt to install a package and it is already installed, Spack
|
||||
will not regenerate modulefiles for the package. This may lead to
|
||||
inconsistent modulefiles if the Spack module configuration has
|
||||
changed since the package was installed, either by editing a file
|
||||
or changing scopes or environments.
|
||||
|
||||
Later in this section there is a subsection on :ref:`regenerating
|
||||
modules <cmd-spack-module-refresh>` that will allow you to bring
|
||||
your modules to a consistent state.
|
||||
|
||||
The table below summarizes the essential information associated with
|
||||
the different file formats that can be generated by Spack:
|
||||
|
||||
|
||||
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
|
||||
| | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** |
|
||||
+===========+==============+==============================+==============================================+======================+
|
||||
| ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
|
||||
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
|
||||
| ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
|
||||
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
|
||||
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
|
||||
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
|
||||
+=============================+====================+===============================+==============================================+======================+
|
||||
| **Tcl - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
|
||||
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
|
||||
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
|
||||
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
|
||||
|
||||
|
||||
Spack ships with sensible defaults for the generation of module files, but
|
||||
@@ -110,7 +102,7 @@ In general you can override or extend the default behavior by:
|
||||
2. writing specific rules in the ``modules.yaml`` configuration file
|
||||
3. writing your own templates to override or extend the defaults
|
||||
|
||||
The former method lets you express changes in the run-time environment
|
||||
The former method let you express changes in the run-time environment
|
||||
that are needed to use the installed software properly, e.g. injecting variables
|
||||
from language interpreters into their extensions. The latter two instead permit to
|
||||
fine tune the filesystem layout, content and creation of module files to meet
|
||||
@@ -118,62 +110,79 @@ site specific conventions.
|
||||
|
||||
.. _overide-api-calls-in-package-py:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Setting environment variables dynamically in ``package.py``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Override API calls in ``package.py``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are two methods that you can implement in any ``package.py`` to dynamically affect the
|
||||
content of the module files generated by Spack. The most important one is
|
||||
``setup_run_environment``, which can be used to set environment variables in the module file that
|
||||
depend on the spec:
|
||||
There are two methods that you can override in any ``package.py`` to affect the
|
||||
content of the module files generated by Spack. The first one:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup_run_environment(self, env):
|
||||
if self.spec.satisfies("+foo"):
|
||||
env.set("FOO", "bar")
|
||||
pass
|
||||
|
||||
The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``,
|
||||
which allows a dependency to set variables in the module file of its dependents. This is typically
|
||||
used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the
|
||||
search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate
|
||||
the packages at runtime.
|
||||
|
||||
For example, a simplified version of the ``python`` package could look like this:
|
||||
can alter the content of the module file associated with the same package where it is overridden.
|
||||
The second method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup_dependent_run_environment(self, env, dependent_spec):
|
||||
if dependent_spec.package.extends(self.spec):
|
||||
env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python)
|
||||
pass
|
||||
|
||||
and would make any package that ``extends("python")`` have its library directory added to the
|
||||
``PYTHONPATH`` environment variable in the module file. It's much more convenient to set this
|
||||
variable here, than to repeat it in every Python extension's ``setup_run_environment`` method.
|
||||
can instead inject run-time environment modifications in the module files of packages
|
||||
that depend on it. In both cases you need to fill ``env`` with the desired
|
||||
list of environment modifications.
|
||||
|
||||
.. admonition:: The ``r`` package and callback APIs
|
||||
|
||||
An example in which it is crucial to override both methods
|
||||
is given by the ``r`` package. This package installs libraries and headers
|
||||
in non-standard locations and it is possible to prepend the appropriate directory
|
||||
to the corresponding environment variables:
|
||||
|
||||
================== =================================
|
||||
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
|
||||
================== =================================
|
||||
|
||||
with the following snippet:
|
||||
|
||||
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
|
||||
:pyobject: R.setup_run_environment
|
||||
|
||||
The ``r`` package also knows which environment variable should be modified
|
||||
to make language extensions provided by other packages available, and modifies
|
||||
it appropriately in the override of the second method:
|
||||
|
||||
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
|
||||
:pyobject: R.setup_dependent_run_environment
|
||||
|
||||
.. _modules-yaml:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``modules.yaml`` config file and module sets
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Write a configuration file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The configuration files that control module generation behavior are named ``modules.yaml``. The
|
||||
default configuration looks like this:
|
||||
The configuration files that control module generation behavior
|
||||
are named ``modules.yaml``. The default configuration:
|
||||
|
||||
.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml
|
||||
:language: yaml
|
||||
|
||||
You can define one or more **module sets**, each of which can be configured separately with regard
|
||||
to install location, naming scheme, inclusion and exclusion, autoloading, et cetera.
|
||||
activates the hooks to generate ``tcl`` module files and inspects
|
||||
the installation folder of each package for the presence of a set of subdirectories
|
||||
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
|
||||
to the environment variables listed below the folder name.
|
||||
|
||||
The default module set is aptly named ``default``. All
|
||||
:ref:`Spack commands that operate on modules <maintaining-module-files>` apply to the ``default``
|
||||
module set, unless another module set is specified explicitly (with the ``--name`` flag).
|
||||
Spack modules can be configured for multiple module sets. The default
|
||||
module set is named ``default``. All Spack commands which operate on
|
||||
modules default to apply the ``default`` module set, but can be
|
||||
applied to any module set in the configuration.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""
|
||||
Changing the modules root
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
As shown in the table above, the default module root for ``lmod`` is
|
||||
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
|
||||
@@ -189,7 +198,7 @@ set by changing the ``roots`` key of the configuration.
|
||||
my_custom_lmod_modules:
|
||||
roots:
|
||||
lmod: /path/to/install/custom/lmod/modules
|
||||
# ...
|
||||
...
|
||||
|
||||
This configuration will create two module sets. The default module set
|
||||
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
|
||||
@@ -215,32 +224,25 @@ location could be confusing to users of your modules. In the next
|
||||
section, we will discuss enabling and disabling module types (module
|
||||
file generators) for each module set.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Automatically generating module files
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""
|
||||
Activate other hooks
|
||||
""""""""""""""""""""
|
||||
|
||||
Spack can be configured to automatically generate module files as part of package installation.
|
||||
This is done by adding the desired module systems to the ``enable`` list.
|
||||
Any other module file generator shipped with Spack can be activated adding it to the
|
||||
list under the ``enable`` key in the module file. Currently the only generator that
|
||||
is not active by default is ``lmod``, which produces hierarchical lua module files.
|
||||
|
||||
Each module system can then be configured separately. In fact, you should list configuration
|
||||
options that affect a particular type of module files under a top level key corresponding
|
||||
to the generator being customized:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
enable:
|
||||
- tcl
|
||||
- lmod
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Configuring ``tcl`` and ``lmod`` modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can configure the behavior of either module system separately, under a key corresponding to
|
||||
the generator being customized:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
- tcl
|
||||
- lmod
|
||||
tcl:
|
||||
# contains environment modules specific customizations
|
||||
lmod:
|
||||
@@ -251,82 +253,16 @@ either change the layout of the module files on the filesystem, or they will aff
|
||||
their content. For the latter point it is possible to use anonymous specs
|
||||
to fine tune the set of packages on which the modifications should be applied.
|
||||
|
||||
.. _autoloading-dependencies:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Autoloading and hiding dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A module file should set the variables that are needed for an application to work. But since an
|
||||
application often has many dependencies, where should all the environment variables for those be
|
||||
set? In Spack the rule is that each package sets the runtime variables that are needed by the
|
||||
package itself, and no more. This way, dependencies can be loaded standalone too, and duplication
|
||||
of environment variables is avoided.
|
||||
|
||||
That means however that if you want to use an application, you need to load the modules for all its
|
||||
dependencies. Of course this is not something you would want users to do manually.
|
||||
|
||||
Since Spack knows the dependency graph of every package, it can easily generate module files that
|
||||
automatically load the modules for its dependencies recursively. It is enabled by default for both
|
||||
Lmod and Environment Modules under the ``autoload: direct`` config option. The former system has
|
||||
builtin support through the ``depends_on`` function, the latter simply uses a ``module load``
|
||||
statement. Both module systems (at least in newer versions) do reference counting, so that if a
|
||||
module is loaded by two different modules, it will only be unloaded after the others are.
|
||||
|
||||
The ``autoload`` key accepts the values:
|
||||
|
||||
* ``none``: no autoloading
|
||||
* ``run``: autoload direct *run* type dependencies
|
||||
* ``direct``: autoload direct *link and run* type dependencies
|
||||
* ``all``: autoload all dependencies
|
||||
|
||||
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
|
||||
|
||||
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
|
||||
variables for themselves, or need to have variables of their own dependencies set.
|
||||
|
||||
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
|
||||
|
||||
The ``all`` option is discouraged and seldomly used.
|
||||
|
||||
A common complaint about autoloading is the large number of modules that are visible to the user.
|
||||
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
|
||||
packages you've explicitly installed are exposed by ``module avail``, but still allows for
|
||||
autoloading of hidden dependencies. Lmod should support hiding implicits in general, while
|
||||
Environment Modules requires version 4.7 or higher.
|
||||
|
||||
.. note::
|
||||
If supported by your module system, we highly encourage the following configuration that enables
|
||||
autoloading and hiding of implicits. It ensures all runtime variables are set correctly,
|
||||
including those for dependencies, without overwhelming the user with a large number of available
|
||||
modules. Further, it makes it easier to get readable module names without collisions, see the
|
||||
section below on :ref:`modules-projections`.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
tcl:
|
||||
hide_implicits: true
|
||||
all:
|
||||
autoload: direct # or `run`
|
||||
lmod:
|
||||
hide_implicits: true
|
||||
all:
|
||||
autoload: direct # or `run`
|
||||
|
||||
.. _anonymous_specs:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Setting environment variables for selected packages in config
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""""""""""
|
||||
Selection by anonymous specs
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
In the configuration file you can filter particular specs, and make further changes to the
|
||||
environment variables that go into their module files. This is very powerful when you want to avoid
|
||||
:ref:`modifying the package itself <overide-api-calls-in-package-py>`, or when you want to set
|
||||
certain variables on multiple selected packages at once.
|
||||
|
||||
For instance, in the snippet below:
|
||||
In the configuration file you can use *anonymous specs* (i.e. specs
|
||||
that **are not required to have a root package** and are thus used just
|
||||
to express constraints) to apply certain modifications on a selected set
|
||||
of the installed software. For instance, in the snippet below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -369,28 +305,12 @@ the variable ``FOOBAR`` will be unset.
|
||||
.. note::
|
||||
Order does matter
|
||||
The modifications associated with the ``all`` keyword are always evaluated
|
||||
first, no matter where they appear in the configuration file. All the other changes to
|
||||
environment variables for matching specs are evaluated from top to bottom.
|
||||
first, no matter where they appear in the configuration file. All the other
|
||||
spec constraints are instead evaluated top to bottom.
|
||||
|
||||
.. warning::
|
||||
|
||||
As general advice, it's often better to set as few unnecessary variables as possible. For
|
||||
example, the following seemingly innocent and potentially useful configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
all:
|
||||
environment:
|
||||
set:
|
||||
"{name}_ROOT": "{prefix}"
|
||||
|
||||
sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break
|
||||
the ``gcc`` compiler: it uses this variable as its default search path for certain object
|
||||
files and libraries, and by merely setting it, everything fails to link.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""""""""""""""""""""""""""
|
||||
Exclude or include specific module files
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
You can use anonymous specs also to prevent module files from being written or
|
||||
to force them to be written. Consider the case where you want to hide from users
|
||||
@@ -410,19 +330,14 @@ you will prevent the generation of module files for any package that
|
||||
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
|
||||
or any ``llvm`` installation.
|
||||
|
||||
It is safe to combine ``exclude`` and ``autoload``
|
||||
:ref:`mentioned above <autoloading-dependencies>`. When ``exclude`` prevents a module file to be
|
||||
generated for a dependency, the ``autoload`` feature will simply not generate a statement to load
|
||||
it.
|
||||
|
||||
|
||||
.. _modules-projections:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""""""""
|
||||
Customize the naming of modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""""""""
|
||||
|
||||
The names of environment modules generated by Spack are not always easy to
|
||||
The names of environment modules generated by spack are not always easy to
|
||||
fully comprehend due to the long hash in the name. There are three module
|
||||
configuration options to help with that. The first is a global setting to
|
||||
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
|
||||
@@ -438,13 +353,6 @@ shows how to set hash length in the module file names:
|
||||
tcl:
|
||||
hash_length: 7
|
||||
|
||||
.. tip::
|
||||
|
||||
Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number
|
||||
modules exposed to the user. The hidden modules always contain the hash in their name, and are
|
||||
not influenced by the ``hash_length`` setting. Hidden implicits thus make it easier to use a
|
||||
short hash length or no hash at all, without risking name conflicts.
|
||||
|
||||
To help make module names more readable, and to help alleviate name conflicts
|
||||
with a short hash, one can use the ``suffixes`` option in the modules
|
||||
configuration file. This option will add strings to modules that match a spec.
|
||||
@@ -457,12 +365,12 @@ For instance, the following config options,
|
||||
tcl:
|
||||
all:
|
||||
suffixes:
|
||||
^python@3.12: 'python-3.12'
|
||||
^python@2.7.12: 'python-2.7.12'
|
||||
^openblas: 'openblas'
|
||||
|
||||
will add a ``python-3.12`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3.12``. This is useful to know which
|
||||
version of Python a set of Python extensions is associated with. Likewise, the
|
||||
will add a ``python-2.7.12`` version string to any packages compiled with
|
||||
python matching the spec, ``python@2.7.12``. This is useful to know which
|
||||
version of python a set of python extensions is associated with. Likewise, the
|
||||
``openblas`` string is attached to any program that has openblas in the spec,
|
||||
most likely via the ``+blas`` variant specification.
|
||||
|
||||
@@ -560,11 +468,41 @@ that are already in the Lmod hierarchy.
|
||||
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
|
||||
See `this discussion on the Lmod project <https://github.com/TACC/Lmod/issues/114>`_.
|
||||
|
||||
""""""""""""""""""""""
|
||||
Select default modules
|
||||
""""""""""""""""""""""
|
||||
|
||||
By default, when multiple modules of the same name share a directory,
|
||||
the highest version number will be the default module. This behavior
|
||||
of the ``module`` command can be overridden with a symlink named
|
||||
``default`` to the desired default module. If you wish to configure
|
||||
default modules with Spack, add a ``defaults`` key to your modules
|
||||
configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
my-module-set:
|
||||
tcl:
|
||||
defaults:
|
||||
- gcc@10.2.1
|
||||
- hdf5@1.2.10+mpi+hl%gcc
|
||||
|
||||
These defaults may be arbitrarily specific. For any package that
|
||||
satisfies a default, Spack will generate the module file in the
|
||||
appropriate path, and will generate a default symlink to the module
|
||||
file as well.
|
||||
|
||||
.. warning::
|
||||
If Spack is configured to generate multiple default packages in the
|
||||
same directory, the last modulefile to be generated will be the
|
||||
default module.
|
||||
|
||||
.. _customize-env-modifications:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
Customize environment modifications
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
|
||||
You can control which prefixes in a Spack package are added to
|
||||
environment variables with the ``prefix_inspections`` section; this
|
||||
@@ -662,9 +600,9 @@ stack to users who are likely to inspect the modules to find full
|
||||
paths to software, when it is desirable to present the users with a
|
||||
simpler set of paths than those generated by the Spack install tree.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
Filter out environment modifications
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
|
||||
Modifications to certain environment variables in module files are there by
|
||||
default, for instance because they are generated by prefix inspections.
|
||||
@@ -684,37 +622,49 @@ do so by using the ``exclude_env_vars``:
|
||||
The configuration above will generate module files that will not contain
|
||||
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Select default modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, when multiple modules of the same name share a directory,
|
||||
the highest version number will be the default module. This behavior
|
||||
of the ``module`` command can be overridden with a symlink named
|
||||
``default`` to the desired default module. If you wish to configure
|
||||
default modules with Spack, add a ``defaults`` key to your modules
|
||||
configuration:
|
||||
.. _autoloading-dependencies:
|
||||
|
||||
"""""""""""""""""""""
|
||||
Autoload dependencies
|
||||
"""""""""""""""""""""
|
||||
|
||||
Often it is required for a module to have its (transient) dependencies loaded as well.
|
||||
One example where this is useful is when one package needs to use executables provided
|
||||
by its dependency; when the dependency is autoloaded, the executable will be in the
|
||||
PATH. Similarly for scripting languages such as Python, packages and their dependencies
|
||||
have to be loaded together.
|
||||
|
||||
Autoloading is enabled by default for Lmod and Environment Modules. The former
|
||||
has builtin support for through the ``depends_on`` function. The latter uses
|
||||
``module load`` statement to load and track dependencies.
|
||||
|
||||
Autoloading can also be enabled conditionally:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
my-module-set:
|
||||
tcl:
|
||||
defaults:
|
||||
- gcc@10.2.1
|
||||
- hdf5@1.2.10+mpi+hl%gcc
|
||||
modules:
|
||||
default:
|
||||
tcl:
|
||||
all:
|
||||
autoload: none
|
||||
^python:
|
||||
autoload: direct
|
||||
|
||||
These defaults may be arbitrarily specific. For any package that
|
||||
satisfies a default, Spack will generate the module file in the
|
||||
appropriate path, and will generate a default symlink to the module
|
||||
file as well.
|
||||
The configuration file above will produce module files that will
|
||||
load their direct dependencies if the package installed depends on ``python``.
|
||||
The allowed values for the ``autoload`` statement are either ``none``,
|
||||
``direct`` or ``all``.
|
||||
|
||||
.. warning::
|
||||
If Spack is configured to generate multiple default packages in the
|
||||
same directory, the last modulefile to be generated will be the
|
||||
default module.
|
||||
|
||||
.. _maintaining-module-files:
|
||||
.. note::
|
||||
Tcl prerequisites
|
||||
In the ``tcl`` section of the configuration file it is possible to use
|
||||
the ``prerequisites`` directive that accepts the same values as
|
||||
``autoload``. It will produce module files that have a ``prereq``
|
||||
statement, which autoloads dependencies on Environment Modules when its
|
||||
``auto_handling`` configuration option is enabled. If Environment Modules
|
||||
is installed with Spack, ``auto_handling`` is enabled by default starting
|
||||
version 4.2. Otherwise it is enabled by default since version 5.0.
|
||||
|
||||
------------------------
|
||||
Maintaining Module Files
|
||||
|
||||
@@ -487,56 +487,6 @@ present. For instance with a configuration like:
|
||||
|
||||
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Conflicts and strong preferences
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts"
|
||||
from configuration files:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
all:
|
||||
prefer:
|
||||
- '%clang'
|
||||
conflict:
|
||||
- '+shared'
|
||||
|
||||
The ``prefer`` and ``conflict`` sections can be used whenever a ``require`` section is allowed.
|
||||
The argument is always a list of constraints, and each constraint can be either a simple string,
|
||||
or a more complex object:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
all:
|
||||
conflict:
|
||||
- spec: '%clang'
|
||||
when: 'target=x86_64_v3'
|
||||
message: 'reason why clang cannot be used'
|
||||
|
||||
The ``spec`` attribute is mandatory, while both ``when`` and ``message`` are optional.
|
||||
|
||||
.. note::
|
||||
|
||||
Requirements allow for expressing both "strong preferences" and "conflicts".
|
||||
The syntax for doing so, though, may not be immediately clear. For
|
||||
instance, if we want to prevent any package from using ``%clang``, we can set:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
all:
|
||||
require:
|
||||
- one_of: ['%clang', '@:']
|
||||
|
||||
Since only one of the requirements must hold, and ``@:`` is always true, the rule above is
|
||||
equivalent to a conflict. For "strong preferences" we need to substitute the ``one_of`` policy
|
||||
with ``any_of``.
|
||||
|
||||
|
||||
|
||||
.. _package-preferences:
|
||||
|
||||
-------------------
|
||||
@@ -647,8 +597,6 @@ manually placed files within the install prefix are owned by the
|
||||
assigned group. If no group is assigned, Spack will allow the OS
|
||||
default behavior to go as expected.
|
||||
|
||||
.. _assigning-package-attributes:
|
||||
|
||||
----------------------------
|
||||
Assigning Package Attributes
|
||||
----------------------------
|
||||
@@ -659,11 +607,10 @@ You can assign class-level attributes in the configuration:
|
||||
|
||||
packages:
|
||||
mpileaks:
|
||||
package_attributes:
|
||||
# Override existing attributes
|
||||
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
|
||||
# ... or add new ones
|
||||
x: 1
|
||||
# Override existing attributes
|
||||
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
|
||||
# ... or add new ones
|
||||
x: 1
|
||||
|
||||
Attributes set this way will be accessible to any method executed
|
||||
in the package.py file (e.g. the ``install()`` method). Values for these
|
||||
|
||||
@@ -893,50 +893,26 @@ as an option to the ``version()`` directive. Example situations would be a
|
||||
"snapshot"-like Version Control System (VCS) tag, a VCS branch such as
|
||||
``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball.
|
||||
|
||||
|
||||
.. _version-comparison:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Version comparison
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack imposes a generic total ordering on the set of versions,
|
||||
independently from the package they are associated with.
|
||||
|
||||
Most Spack versions are numeric, a tuple of integers; for example,
|
||||
``0.1``, ``6.96`` or ``1.2.3.1``. In this very basic case, version
|
||||
comparison is lexicographical on the numeric components:
|
||||
``1.2 < 1.2.1 < 1.2.2 < 1.10``.
|
||||
``0.1``, ``6.96`` or ``1.2.3.1``. Spack knows how to compare and sort
|
||||
numeric versions.
|
||||
|
||||
Spack can also supports string components such as ``1.1.1a`` and
|
||||
``1.y.0``. String components are considered less than numeric
|
||||
components, so ``1.y.0 < 1.0``. This is for consistency with
|
||||
`RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_. String
|
||||
components do not have to be separated by dots or any other delimiter.
|
||||
So, the contrived version ``1y0`` is identical to ``1.y.0``.
|
||||
Some Spack versions involve slight extensions of numeric syntax; for
|
||||
example, ``py-sphinx-rtd-theme@=0.1.10a0``. In this case, numbers are
|
||||
always considered to be "newer" than letters. This is for consistency
|
||||
with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
|
||||
|
||||
Pre-release suffixes also contain string parts, but they are handled
|
||||
in a special way. For example ``1.2.3alpha1`` is parsed as a pre-release
|
||||
of the version ``1.2.3``. This allows Spack to order it before the
|
||||
actual release: ``1.2.3alpha1 < 1.2.3``. Spack supports alpha, beta and
|
||||
release candidate suffixes: ``1.2alpha1 < 1.2beta1 < 1.2rc1 < 1.2``. Any
|
||||
suffix not recognized as a pre-release is treated as an ordinary
|
||||
string component, so ``1.2 < 1.2-mysuffix``.
|
||||
Spack versions may also be arbitrary non-numeric strings, for example
|
||||
``develop``, ``master``, ``local``.
|
||||
|
||||
Finally, there are a few special string components that are considered
|
||||
"infinity versions". They include ``develop``, ``main``, ``master``,
|
||||
``head``, ``trunk``, and ``stable``. For example: ``1.2 < develop``.
|
||||
These are useful for specifying the most recent development version of
|
||||
a package (often a moving target like a git branch), without assigning
|
||||
a specific version number. Infinity versions are not automatically used when determining the latest version of a package unless explicitly required by another package or user.
|
||||
|
||||
More formally, the order on versions is defined as follows. A version
|
||||
string is split into a list of components based on delimiters such as
|
||||
``.`` and ``-`` and string boundaries. The components are split into
|
||||
the **release** and a possible **pre-release** (if the last component
|
||||
is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``).
|
||||
The release components are ordered lexicographically, with comparsion
|
||||
between different types of components as follows:
|
||||
The order on versions is defined as follows. A version string is split
|
||||
into a list of components based on delimiters such as ``.``, ``-`` etc.
|
||||
Lists are then ordered lexicographically, where components are ordered
|
||||
as follows:
|
||||
|
||||
#. The following special strings are considered larger than any other
|
||||
numeric or non-numeric version component, and satisfy the following
|
||||
@@ -949,9 +925,6 @@ between different types of components as follows:
|
||||
#. All other non-numeric components are less than numeric components,
|
||||
and are ordered alphabetically.
|
||||
|
||||
Finally, if the release components are equal, the pre-release components
|
||||
are used to break the tie, in the obvious way.
|
||||
|
||||
The logic behind this sort order is two-fold:
|
||||
|
||||
#. Non-numeric versions are usually used for special cases while
|
||||
@@ -7006,18 +6979,3 @@ you probably care most about are:
|
||||
You may also care about `license exceptions
|
||||
<https://spdx.org/licenses/exceptions-index.html>`_ that use the ``WITH`` operator,
|
||||
e.g. ``Apache-2.0 WITH LLVM-exception``.
|
||||
|
||||
Many of the licenses that are currently in the spack repositories have been
|
||||
automatically determined. While this is great for bulk adding license
|
||||
information and is most likely correct, there are sometimes edge cases that
|
||||
require manual intervention. To determine which licenses are validated and
|
||||
which are not, there is the `checked_by` parameter in the license directive:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
license("<license>", when="<when>", checked_by="<github username>")
|
||||
|
||||
When you have validated a github license, either when doing so explicitly or
|
||||
as part of packaging a new package, please set the `checked_by` parameter
|
||||
to your Github username to signal that the license has been manually
|
||||
verified.
|
||||
|
||||
@@ -810,7 +810,7 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
# ...
|
||||
...
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
|
||||
@@ -17,7 +17,7 @@ experimental software separately from the built-in repository. Spack
|
||||
allows you to configure local repositories using either the
|
||||
``repos.yaml`` or the ``spack repo`` command.
|
||||
|
||||
A package repository is a directory structured like this::
|
||||
A package repository a directory structured like this::
|
||||
|
||||
repo/
|
||||
repo.yaml
|
||||
|
||||
@@ -2,12 +2,12 @@ sphinx==7.2.6
|
||||
sphinxcontrib-programoutput==0.17
|
||||
sphinx_design==0.5.0
|
||||
sphinx-rtd-theme==2.0.0
|
||||
python-levenshtein==0.25.0
|
||||
python-levenshtein==0.23.0
|
||||
docutils==0.20.1
|
||||
pygments==2.17.2
|
||||
urllib3==2.2.1
|
||||
pytest==8.1.1
|
||||
urllib3==2.1.0
|
||||
pytest==7.4.4
|
||||
isort==5.13.2
|
||||
black==24.3.0
|
||||
black==23.12.1
|
||||
flake8==7.0.0
|
||||
mypy==1.9.0
|
||||
mypy==1.8.0
|
||||
|
||||
5
lib/spack/env/cc
vendored
5
lib/spack/env/cc
vendored
@@ -248,7 +248,7 @@ case "$command" in
|
||||
lang_flags=C
|
||||
debug_flags="-g"
|
||||
;;
|
||||
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
|
||||
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
|
||||
command="$SPACK_CXX"
|
||||
language="C++"
|
||||
comp="CXX"
|
||||
@@ -526,7 +526,7 @@ categorize_arguments() {
|
||||
continue
|
||||
fi
|
||||
|
||||
replaced="$after$stripped"
|
||||
replaced="$after$stripped"
|
||||
|
||||
# it matched, remove it
|
||||
shift
|
||||
@@ -913,3 +913,4 @@ fi
|
||||
# Execute the full command, preserving spaces with IFS set
|
||||
# to the alarm bell separator.
|
||||
IFS="$lsep"; exec $full_command_list
|
||||
|
||||
|
||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
|
||||
* Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
||||
3
lib/spack/external/archspec/__init__.py
vendored
3
lib/spack/external/archspec/__init__.py
vendored
@@ -1,3 +1,2 @@
|
||||
"""Init file to avoid namespace packages"""
|
||||
|
||||
__version__ = "0.2.3"
|
||||
__version__ = "0.2.2"
|
||||
|
||||
1
lib/spack/external/archspec/__main__.py
vendored
1
lib/spack/external/archspec/__main__.py
vendored
@@ -3,7 +3,6 @@
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from .cli import main
|
||||
|
||||
sys.exit(main())
|
||||
|
||||
6
lib/spack/external/archspec/cli.py
vendored
6
lib/spack/external/archspec/cli.py
vendored
@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
|
||||
|
||||
def cpu() -> int:
|
||||
"""Run the `archspec cpu` subcommand."""
|
||||
try:
|
||||
print(archspec.cpu.host())
|
||||
except FileNotFoundError as exc:
|
||||
print(exc)
|
||||
return 1
|
||||
print(archspec.cpu.host())
|
||||
return 0
|
||||
|
||||
|
||||
|
||||
10
lib/spack/external/archspec/cpu/__init__.py
vendored
10
lib/spack/external/archspec/cpu/__init__.py
vendored
@@ -5,14 +5,10 @@
|
||||
"""The "cpu" package permits to query and compare different
|
||||
CPU microarchitectures.
|
||||
"""
|
||||
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
|
||||
from .microarchitecture import TARGETS, generic_microarchitecture
|
||||
from .microarchitecture import version_components
|
||||
from .detect import host
|
||||
from .microarchitecture import (
|
||||
TARGETS,
|
||||
Microarchitecture,
|
||||
UnsupportedMicroarchitecture,
|
||||
generic_microarchitecture,
|
||||
version_components,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Microarchitecture",
|
||||
|
||||
372
lib/spack/external/archspec/cpu/detect.py
vendored
372
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -4,17 +4,15 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Detection of CPU microarchitectures"""
|
||||
import collections
|
||||
import functools
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import struct
|
||||
import subprocess
|
||||
import warnings
|
||||
from typing import Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
from ..vendor.cpuid.cpuid import CPUID
|
||||
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture
|
||||
from .schema import CPUID_JSON, TARGETS_JSON
|
||||
from .microarchitecture import generic_microarchitecture, TARGETS
|
||||
from .schema import TARGETS_JSON
|
||||
|
||||
#: Mapping from operating systems to chain of commands
|
||||
#: to obtain a dictionary of raw info on the current cpu
|
||||
@@ -24,46 +22,43 @@
|
||||
#: functions checking the compatibility of the host with a given target
|
||||
COMPATIBILITY_CHECKS = {}
|
||||
|
||||
# Constants for commonly used architectures
|
||||
X86_64 = "x86_64"
|
||||
AARCH64 = "aarch64"
|
||||
PPC64LE = "ppc64le"
|
||||
PPC64 = "ppc64"
|
||||
RISCV64 = "riscv64"
|
||||
|
||||
|
||||
def detection(operating_system: str):
|
||||
"""Decorator to mark functions that are meant to return partial information on the current cpu.
|
||||
def info_dict(operating_system):
|
||||
"""Decorator to mark functions that are meant to return raw info on
|
||||
the current cpu.
|
||||
|
||||
Args:
|
||||
operating_system: operating system where this function can be used.
|
||||
operating_system (str or tuple): operating system for which the marked
|
||||
function is a viable factory of raw info dictionaries.
|
||||
"""
|
||||
|
||||
def decorator(factory):
|
||||
INFO_FACTORY[operating_system].append(factory)
|
||||
return factory
|
||||
|
||||
@functools.wraps(factory)
|
||||
def _impl():
|
||||
info = factory()
|
||||
|
||||
# Check that info contains a few mandatory fields
|
||||
msg = 'field "{0}" is missing from raw info dictionary'
|
||||
assert "vendor_id" in info, msg.format("vendor_id")
|
||||
assert "flags" in info, msg.format("flags")
|
||||
assert "model" in info, msg.format("model")
|
||||
assert "model_name" in info, msg.format("model_name")
|
||||
|
||||
return info
|
||||
|
||||
return _impl
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def partial_uarch(
|
||||
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
|
||||
) -> Microarchitecture:
|
||||
"""Construct a partial microarchitecture, from information gathered during system scan."""
|
||||
return Microarchitecture(
|
||||
name=name,
|
||||
parents=[],
|
||||
vendor=vendor,
|
||||
features=features or set(),
|
||||
compilers={},
|
||||
generation=generation,
|
||||
)
|
||||
|
||||
|
||||
@detection(operating_system="Linux")
|
||||
def proc_cpuinfo() -> Microarchitecture:
|
||||
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
|
||||
data = {}
|
||||
@info_dict(operating_system="Linux")
|
||||
def proc_cpuinfo():
|
||||
"""Returns a raw info dictionary by parsing the first entry of
|
||||
``/proc/cpuinfo``
|
||||
"""
|
||||
info = {}
|
||||
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
|
||||
for line in file:
|
||||
key, separator, value = line.partition(":")
|
||||
@@ -75,96 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
|
||||
#
|
||||
# we are on a blank line separating two cpus. Exit early as
|
||||
# we want to read just the first entry in /proc/cpuinfo
|
||||
if separator != ":" and data:
|
||||
if separator != ":" and info:
|
||||
break
|
||||
|
||||
data[key.strip()] = value.strip()
|
||||
|
||||
architecture = _machine()
|
||||
if architecture == X86_64:
|
||||
return partial_uarch(
|
||||
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
|
||||
)
|
||||
|
||||
if architecture == AARCH64:
|
||||
return partial_uarch(
|
||||
vendor=_canonicalize_aarch64_vendor(data),
|
||||
features=_feature_set(data, key="Features"),
|
||||
)
|
||||
|
||||
if architecture in (PPC64LE, PPC64):
|
||||
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
|
||||
try:
|
||||
generation = int(generation_match.group(1))
|
||||
except AttributeError:
|
||||
# There might be no match under emulated environments. For instance
|
||||
# emulating a ppc64le with QEMU and Docker still reports the host
|
||||
# /proc/cpuinfo and not a Power
|
||||
generation = 0
|
||||
return partial_uarch(generation=generation)
|
||||
|
||||
if architecture == RISCV64:
|
||||
if data.get("uarch") == "sifive,u74-mc":
|
||||
data["uarch"] = "u74mc"
|
||||
return partial_uarch(name=data.get("uarch", RISCV64))
|
||||
|
||||
return generic_microarchitecture(architecture)
|
||||
|
||||
|
||||
class CpuidInfoCollector:
|
||||
"""Collects the information we need on the host CPU from cpuid"""
|
||||
|
||||
# pylint: disable=too-few-public-methods
|
||||
def __init__(self):
|
||||
self.cpuid = CPUID()
|
||||
|
||||
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
|
||||
self.highest_basic_support = registers.eax
|
||||
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
|
||||
"utf-8"
|
||||
)
|
||||
|
||||
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
|
||||
self.highest_extension_support = registers.eax
|
||||
|
||||
self.features = self._features()
|
||||
|
||||
def _features(self):
|
||||
result = set()
|
||||
|
||||
def check_features(data):
|
||||
registers = self.cpuid.registers_for(**data["input"])
|
||||
for feature_check in data["bits"]:
|
||||
current = getattr(registers, feature_check["register"])
|
||||
if self._is_bit_set(current, feature_check["bit"]):
|
||||
result.add(feature_check["name"])
|
||||
|
||||
for call_data in CPUID_JSON["flags"]:
|
||||
if call_data["input"]["eax"] > self.highest_basic_support:
|
||||
continue
|
||||
check_features(call_data)
|
||||
|
||||
for call_data in CPUID_JSON["extension-flags"]:
|
||||
if call_data["input"]["eax"] > self.highest_extension_support:
|
||||
continue
|
||||
check_features(call_data)
|
||||
|
||||
return result
|
||||
|
||||
def _is_bit_set(self, register: int, bit: int) -> bool:
|
||||
mask = 1 << bit
|
||||
return register & mask > 0
|
||||
|
||||
|
||||
@detection(operating_system="Windows")
|
||||
def cpuid_info():
|
||||
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
|
||||
architecture = _machine()
|
||||
if architecture == X86_64:
|
||||
data = CpuidInfoCollector()
|
||||
return partial_uarch(vendor=data.vendor, features=data.features)
|
||||
|
||||
return generic_microarchitecture(architecture)
|
||||
info[key.strip()] = value.strip()
|
||||
return info
|
||||
|
||||
|
||||
def _check_output(args, env):
|
||||
@@ -173,25 +83,14 @@ def _check_output(args, env):
|
||||
return str(output.decode("utf-8"))
|
||||
|
||||
|
||||
WINDOWS_MAPPING = {
|
||||
"AMD64": "x86_64",
|
||||
"ARM64": "aarch64",
|
||||
}
|
||||
|
||||
|
||||
def _machine():
|
||||
"""Return the machine architecture we are on"""
|
||||
""" "Return the machine architecture we are on"""
|
||||
operating_system = platform.system()
|
||||
|
||||
# If we are not on Darwin or Windows, trust what Python tells us
|
||||
if operating_system not in ("Darwin", "Windows"):
|
||||
# If we are not on Darwin, trust what Python tells us
|
||||
if operating_system != "Darwin":
|
||||
return platform.machine()
|
||||
|
||||
# Normalize windows specific names
|
||||
if operating_system == "Windows":
|
||||
platform_machine = platform.machine()
|
||||
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
|
||||
|
||||
# On Darwin it might happen that we are on M1, but using an interpreter
|
||||
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
|
||||
# need to fix that.
|
||||
@@ -204,47 +103,54 @@ def _machine():
|
||||
if "Apple" in output:
|
||||
# Note that a native Python interpreter on Apple M1 would return
|
||||
# "arm64" instead of "aarch64". Here we normalize to the latter.
|
||||
return AARCH64
|
||||
return "aarch64"
|
||||
|
||||
return X86_64
|
||||
return "x86_64"
|
||||
|
||||
|
||||
@detection(operating_system="Darwin")
|
||||
def sysctl_info() -> Microarchitecture:
|
||||
@info_dict(operating_system="Darwin")
|
||||
def sysctl_info_dict():
|
||||
"""Returns a raw info dictionary parsing the output of sysctl."""
|
||||
child_environment = _ensure_bin_usrbin_in_path()
|
||||
|
||||
def sysctl(*args: str) -> str:
|
||||
def sysctl(*args):
|
||||
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
|
||||
|
||||
if _machine() == X86_64:
|
||||
features = (
|
||||
f'{sysctl("-n", "machdep.cpu.features").lower()} '
|
||||
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}'
|
||||
if _machine() == "x86_64":
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
)
|
||||
features = set(features.split())
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
else:
|
||||
model = "unknown"
|
||||
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
|
||||
if "m2" in model_str:
|
||||
model = "m2"
|
||||
elif "m1" in model_str:
|
||||
model = "m1"
|
||||
elif "apple" in model_str:
|
||||
model = "m1"
|
||||
|
||||
# Flags detected on Darwin turned to their linux counterpart
|
||||
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items():
|
||||
if darwin_flag in features:
|
||||
features.update(linux_flag.split())
|
||||
|
||||
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
|
||||
|
||||
model = "unknown"
|
||||
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
|
||||
if "m2" in model_str:
|
||||
model = "m2"
|
||||
elif "m1" in model_str:
|
||||
model = "m1"
|
||||
elif "apple" in model_str:
|
||||
model = "m1"
|
||||
|
||||
return partial_uarch(name=model, vendor="Apple")
|
||||
info = {
|
||||
"vendor_id": "Apple",
|
||||
"flags": [],
|
||||
"model": model,
|
||||
"CPU implementer": "Apple",
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
return info
|
||||
|
||||
|
||||
def _ensure_bin_usrbin_in_path():
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
|
||||
# usually found there
|
||||
child_environment = dict(os.environ.items())
|
||||
search_paths = child_environment.get("PATH", "").split(os.pathsep)
|
||||
for additional_path in ("/sbin", "/usr/sbin"):
|
||||
@@ -254,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
|
||||
return child_environment
|
||||
|
||||
|
||||
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
|
||||
"""Adjust the vendor field to make it human-readable"""
|
||||
if "CPU implementer" not in data:
|
||||
return "generic"
|
||||
def adjust_raw_flags(info):
|
||||
"""Adjust the flags detected on the system to homogenize
|
||||
slightly different representations.
|
||||
"""
|
||||
# Flags detected on Darwin turned to their linux counterpart
|
||||
flags = info.get("flags", [])
|
||||
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
|
||||
for darwin_flag, linux_flag in d2l.items():
|
||||
if darwin_flag in flags:
|
||||
info["flags"] += " " + linux_flag
|
||||
|
||||
|
||||
def adjust_raw_vendor(info):
|
||||
"""Adjust the vendor field to make it human readable"""
|
||||
if "CPU implementer" not in info:
|
||||
return
|
||||
|
||||
# Mapping numeric codes to vendor (ARM). This list is a merge from
|
||||
# different sources:
|
||||
@@ -267,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
|
||||
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
|
||||
# https://patchwork.kernel.org/patch/10524949/
|
||||
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
|
||||
arm_code = data["CPU implementer"]
|
||||
return arm_vendors.get(arm_code, arm_code)
|
||||
arm_code = info["CPU implementer"]
|
||||
if arm_code in arm_vendors:
|
||||
info["CPU implementer"] = arm_vendors[arm_code]
|
||||
|
||||
|
||||
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
|
||||
return set(data.get(key, "").split())
|
||||
def raw_info_dictionary():
|
||||
"""Returns a dictionary with information on the cpu of the current host.
|
||||
|
||||
|
||||
def detected_info() -> Microarchitecture:
|
||||
"""Returns a partial Microarchitecture with information on the CPU of the current host.
|
||||
|
||||
This function calls all the viable factories one after the other until there's one that is
|
||||
able to produce the requested information. Falls-back to a generic microarchitecture, if none
|
||||
of the calls succeed.
|
||||
This function calls all the viable factories one after the other until
|
||||
there's one that is able to produce the requested information.
|
||||
"""
|
||||
# pylint: disable=broad-except
|
||||
info = {}
|
||||
for factory in INFO_FACTORY[platform.system()]:
|
||||
try:
|
||||
return factory()
|
||||
info = factory()
|
||||
except Exception as exc:
|
||||
warnings.warn(str(exc))
|
||||
|
||||
return generic_microarchitecture(_machine())
|
||||
if info:
|
||||
adjust_raw_flags(info)
|
||||
adjust_raw_vendor(info)
|
||||
break
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
|
||||
"""Returns an unordered list of known micro-architectures that are compatible with the
|
||||
partial Microarchitecture passed as input.
|
||||
def compatible_microarchitectures(info):
|
||||
"""Returns an unordered list of known micro-architectures that are
|
||||
compatible with the info dictionary passed as argument.
|
||||
|
||||
Args:
|
||||
info (dict): dictionary containing information on the host cpu
|
||||
"""
|
||||
architecture_family = _machine()
|
||||
# If a tester is not registered, assume no known target is compatible with the host
|
||||
# If a tester is not registered, be conservative and assume no known
|
||||
# target is compatible with the host
|
||||
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
|
||||
return [x for x in TARGETS.values() if tester(info, x)] or [
|
||||
generic_microarchitecture(architecture_family)
|
||||
@@ -306,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
|
||||
|
||||
def host():
|
||||
"""Detects the host micro-architecture and returns it."""
|
||||
# Retrieve information on the host's cpu
|
||||
info = detected_info()
|
||||
# Retrieve a dictionary with raw information on the host's cpu
|
||||
info = raw_info_dictionary()
|
||||
|
||||
# Get a list of possible candidates for this micro-architecture
|
||||
candidates = compatible_microarchitectures(info)
|
||||
@@ -334,15 +258,16 @@ def sorting_fn(item):
|
||||
return max(candidates, key=sorting_fn)
|
||||
|
||||
|
||||
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
|
||||
def compatibility_check(architecture_family):
|
||||
"""Decorator to register a function as a proper compatibility check.
|
||||
|
||||
A compatibility check function takes a partial Microarchitecture object as a first argument,
|
||||
and an arbitrary target Microarchitecture as the second argument. It returns True if the
|
||||
target is compatible with first argument, False otherwise.
|
||||
A compatibility check function takes the raw info dictionary as a first
|
||||
argument and an arbitrary target as the second argument. It returns True
|
||||
if the target is compatible with the info dictionary, False otherwise.
|
||||
|
||||
Args:
|
||||
architecture_family: architecture family for which this test can be used
|
||||
architecture_family (str or tuple): architecture family for which
|
||||
this test can be used, e.g. x86_64 or ppc64le etc.
|
||||
"""
|
||||
# Turn the argument into something iterable
|
||||
if isinstance(architecture_family, str):
|
||||
@@ -355,57 +280,86 @@ def decorator(func):
|
||||
return decorator
|
||||
|
||||
|
||||
@compatibility_check(architecture_family=(PPC64LE, PPC64))
|
||||
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
|
||||
def compatibility_check_for_power(info, target):
|
||||
"""Compatibility check for PPC64 and PPC64LE architectures."""
|
||||
basename = platform.machine()
|
||||
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
|
||||
try:
|
||||
generation = int(generation_match.group(1))
|
||||
except AttributeError:
|
||||
# There might be no match under emulated environments. For instance
|
||||
# emulating a ppc64le with QEMU and Docker still reports the host
|
||||
# /proc/cpuinfo and not a Power
|
||||
generation = 0
|
||||
|
||||
# We can use a target if it descends from our machine type and our
|
||||
# generation (9 for POWER9, etc) is at least its generation.
|
||||
arch_root = TARGETS[_machine()]
|
||||
arch_root = TARGETS[basename]
|
||||
return (
|
||||
target == arch_root or arch_root in target.ancestors
|
||||
) and target.generation <= info.generation
|
||||
) and target.generation <= generation
|
||||
|
||||
|
||||
@compatibility_check(architecture_family=X86_64)
|
||||
@compatibility_check(architecture_family="x86_64")
|
||||
def compatibility_check_for_x86_64(info, target):
|
||||
"""Compatibility check for x86_64 architectures."""
|
||||
basename = "x86_64"
|
||||
vendor = info.get("vendor_id", "generic")
|
||||
features = set(info.get("flags", "").split())
|
||||
|
||||
# We can use a target if it descends from our machine type, is from our
|
||||
# vendor, and we have all of its features
|
||||
arch_root = TARGETS[X86_64]
|
||||
arch_root = TARGETS[basename]
|
||||
return (
|
||||
(target == arch_root or arch_root in target.ancestors)
|
||||
and target.vendor in (info.vendor, "generic")
|
||||
and target.features.issubset(info.features)
|
||||
and target.vendor in (vendor, "generic")
|
||||
and target.features.issubset(features)
|
||||
)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family=AARCH64)
|
||||
@compatibility_check(architecture_family="aarch64")
|
||||
def compatibility_check_for_aarch64(info, target):
|
||||
"""Compatibility check for AARCH64 architectures."""
|
||||
# At the moment, it's not clear how to detect compatibility with
|
||||
basename = "aarch64"
|
||||
features = set(info.get("Features", "").split())
|
||||
vendor = info.get("CPU implementer", "generic")
|
||||
|
||||
# At the moment it's not clear how to detect compatibility with
|
||||
# a specific version of the architecture
|
||||
if target.vendor == "generic" and target.name != AARCH64:
|
||||
if target.vendor == "generic" and target.name != "aarch64":
|
||||
return False
|
||||
|
||||
arch_root = TARGETS[AARCH64]
|
||||
arch_root = TARGETS[basename]
|
||||
arch_root_and_vendor = arch_root == target.family and target.vendor in (
|
||||
info.vendor,
|
||||
vendor,
|
||||
"generic",
|
||||
)
|
||||
|
||||
# On macOS it seems impossible to get all the CPU features
|
||||
# with syctl info, but for ARM we can get the exact model
|
||||
if platform.system() == "Darwin":
|
||||
model = TARGETS[info.name]
|
||||
model_key = info.get("model", basename)
|
||||
model = TARGETS[model_key]
|
||||
return arch_root_and_vendor and (target == model or target in model.ancestors)
|
||||
|
||||
return arch_root_and_vendor and target.features.issubset(info.features)
|
||||
return arch_root_and_vendor and target.features.issubset(features)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family=RISCV64)
|
||||
@compatibility_check(architecture_family="riscv64")
|
||||
def compatibility_check_for_riscv64(info, target):
|
||||
"""Compatibility check for riscv64 architectures."""
|
||||
arch_root = TARGETS[RISCV64]
|
||||
basename = "riscv64"
|
||||
uarch = info.get("uarch")
|
||||
|
||||
# sifive unmatched board
|
||||
if uarch == "sifive,u74-mc":
|
||||
uarch = "u74mc"
|
||||
# catch-all for unknown uarchs
|
||||
else:
|
||||
uarch = "riscv64"
|
||||
|
||||
arch_root = TARGETS[basename]
|
||||
return (target == arch_root or arch_root in target.ancestors) and (
|
||||
target.name == info.name or target.vendor == "generic"
|
||||
target == uarch or target.vendor == "generic"
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
import archspec
|
||||
import archspec.cpu.alias
|
||||
import archspec.cpu.schema
|
||||
|
||||
from .alias import FEATURE_ALIASES
|
||||
from .schema import LazyDictionary
|
||||
|
||||
@@ -48,7 +47,7 @@ class Microarchitecture:
|
||||
which has "broadwell" as a parent, supports running binaries
|
||||
optimized for "broadwell".
|
||||
vendor (str): vendor of the micro-architecture
|
||||
features (set of str): supported CPU flags. Note that the semantic
|
||||
features (list of str): supported CPU flags. Note that the semantic
|
||||
of the flags in this field might vary among architectures, if
|
||||
at all present. For instance x86_64 processors will list all
|
||||
the flags supported by a given CPU while Arm processors will
|
||||
@@ -181,28 +180,24 @@ def generic(self):
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary representation of this object."""
|
||||
return {
|
||||
"name": str(self.name),
|
||||
"vendor": str(self.vendor),
|
||||
"features": sorted(str(x) for x in self.features),
|
||||
"generation": self.generation,
|
||||
"parents": [str(x) for x in self.parents],
|
||||
"compilers": self.compilers,
|
||||
}
|
||||
def to_dict(self, return_list_of_items=False):
|
||||
"""Returns a dictionary representation of this object.
|
||||
|
||||
@staticmethod
|
||||
def from_dict(data) -> "Microarchitecture":
|
||||
"""Construct a microarchitecture from a dictionary representation."""
|
||||
return Microarchitecture(
|
||||
name=data["name"],
|
||||
parents=[TARGETS[x] for x in data["parents"]],
|
||||
vendor=data["vendor"],
|
||||
features=set(data["features"]),
|
||||
compilers=data.get("compilers", {}),
|
||||
generation=data.get("generation", 0),
|
||||
)
|
||||
Args:
|
||||
return_list_of_items (bool): if True returns an ordered list of
|
||||
items instead of the dictionary
|
||||
"""
|
||||
list_of_items = [
|
||||
("name", str(self.name)),
|
||||
("vendor", str(self.vendor)),
|
||||
("features", sorted(str(x) for x in self.features)),
|
||||
("generation", self.generation),
|
||||
("parents", [str(x) for x in self.parents]),
|
||||
]
|
||||
if return_list_of_items:
|
||||
return list_of_items
|
||||
|
||||
return dict(list_of_items)
|
||||
|
||||
def optimization_flags(self, compiler, version):
|
||||
"""Returns a string containing the optimization flags that needs
|
||||
@@ -276,7 +271,9 @@ def tuplify(ver):
|
||||
flags = flags_fmt.format(**compiler_entry)
|
||||
return flags
|
||||
|
||||
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
|
||||
msg = (
|
||||
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
|
||||
)
|
||||
if compiler_info:
|
||||
versions = [x["versions"] for x in compiler_info]
|
||||
msg += f' [supported compiler versions are {", ".join(versions)}]'
|
||||
@@ -292,7 +289,9 @@ def generic_microarchitecture(name):
|
||||
Args:
|
||||
name (str): name of the micro-architecture
|
||||
"""
|
||||
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
|
||||
return Microarchitecture(
|
||||
name, parents=[], vendor="generic", features=[], compilers={}
|
||||
)
|
||||
|
||||
|
||||
def version_components(version):
|
||||
@@ -346,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
|
||||
compilers = values.get("compilers", {})
|
||||
generation = values.get("generation", 0)
|
||||
|
||||
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
|
||||
targets[name] = Microarchitecture(
|
||||
name, parents, vendor, features, compilers, generation
|
||||
)
|
||||
|
||||
known_targets = {}
|
||||
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
|
||||
|
||||
68
lib/spack/external/archspec/cpu/schema.py
vendored
68
lib/spack/external/archspec/cpu/schema.py
vendored
@@ -7,9 +7,7 @@
|
||||
"""
|
||||
import collections.abc
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
from typing import Tuple
|
||||
import os.path
|
||||
|
||||
|
||||
class LazyDictionary(collections.abc.MutableMapping):
|
||||
@@ -48,65 +46,21 @@ def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
|
||||
#: Environment variable that might point to a directory with a user defined JSON file
|
||||
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
|
||||
def _load_json_file(json_file):
|
||||
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
|
||||
json_dir = os.path.abspath(json_dir)
|
||||
|
||||
#: Environment variable that might point to a directory with extensions to JSON files
|
||||
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
|
||||
def _factory():
|
||||
filename = os.path.join(json_dir, json_file)
|
||||
with open(filename, "r", encoding="utf-8") as file:
|
||||
return json.load(file)
|
||||
|
||||
|
||||
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
|
||||
"""Given a filename, returns the absolute path for the main JSON file, and an
|
||||
optional absolute path for an extension JSON file.
|
||||
|
||||
Args:
|
||||
filename: filename for the JSON file
|
||||
allow_custom: if True, allows overriding the location where the file resides
|
||||
"""
|
||||
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
|
||||
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
|
||||
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
|
||||
json_dir = json_dir.absolute()
|
||||
json_file = json_dir / filename
|
||||
|
||||
extension_file = None
|
||||
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
|
||||
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
|
||||
extension_dir.absolute()
|
||||
extension_file = extension_dir / filename
|
||||
|
||||
return json_file, extension_file
|
||||
|
||||
|
||||
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
|
||||
with open(json_file, "r", encoding="utf-8") as file:
|
||||
data = json.load(file)
|
||||
|
||||
if not extension_file or not extension_file.exists():
|
||||
return data
|
||||
|
||||
with open(extension_file, "r", encoding="utf-8") as file:
|
||||
extension_data = json.load(file)
|
||||
|
||||
top_level_sections = list(data.keys())
|
||||
for key in top_level_sections:
|
||||
if key not in extension_data:
|
||||
continue
|
||||
|
||||
data[key].update(extension_data[key])
|
||||
|
||||
return data
|
||||
return _factory
|
||||
|
||||
|
||||
#: In memory representation of the data in microarchitectures.json,
|
||||
#: loaded on first access
|
||||
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
|
||||
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
|
||||
|
||||
#: JSON schema for microarchitectures.json, loaded on first access
|
||||
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
|
||||
|
||||
#: Information on how to call 'cpuid' to get information on the HOST CPU
|
||||
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
|
||||
|
||||
#: JSON schema for cpuid.json, loaded on first access
|
||||
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
|
||||
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
|
||||
|
||||
10
lib/spack/external/archspec/json/README.md
vendored
10
lib/spack/external/archspec/json/README.md
vendored
@@ -9,11 +9,11 @@ language specific APIs.
|
||||
|
||||
Currently the repository contains the following JSON files:
|
||||
```console
|
||||
cpu/
|
||||
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64
|
||||
├── cpuid_schema.json # Schema for the file above
|
||||
├── microarchitectures.json # Contains information on CPU microarchitectures
|
||||
└── microarchitectures_schema.json # Schema for the file above
|
||||
.
|
||||
├── COPYRIGHT
|
||||
└── cpu
|
||||
├── microarchitectures.json # Contains information on CPU microarchitectures
|
||||
└── microarchitectures_schema.json # Schema for the file above
|
||||
```
|
||||
|
||||
|
||||
|
||||
1050
lib/spack/external/archspec/json/cpu/cpuid.json
vendored
1050
lib/spack/external/archspec/json/cpu/cpuid.json
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,134 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Schema for microarchitecture definitions and feature aliases",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"vendor": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"highest_extension_support": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"flags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"bits": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"register": {
|
||||
"type": "string"
|
||||
},
|
||||
"bit": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"extension-flags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"bits": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"register": {
|
||||
"type": "string"
|
||||
},
|
||||
"bit": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
20
lib/spack/external/archspec/vendor/cpuid/LICENSE
vendored
20
lib/spack/external/archspec/vendor/cpuid/LICENSE
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Anders Høst
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -1,76 +0,0 @@
|
||||
cpuid.py
|
||||
========
|
||||
|
||||
Now, this is silly!
|
||||
|
||||
Pure Python library for accessing information about x86 processors
|
||||
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
|
||||
instruction. Well, not exactly pure Python...
|
||||
|
||||
It works by allocating a small piece of virtual memory, copying
|
||||
a raw x86 function to that memory, giving the memory execute
|
||||
permissions and then calling the memory as a function. The injected
|
||||
function executes the CPUID instruction and copies the result back
|
||||
to a ctypes.Structure where is can be read by Python.
|
||||
|
||||
It should work fine on both 32 and 64 bit versions of Windows and Linux
|
||||
running x86 processors. Apple OS X and other BSD systems should also work,
|
||||
not tested though...
|
||||
|
||||
|
||||
Why?
|
||||
----
|
||||
For poops and giggles. Plus, having access to a low-level feature
|
||||
without having to compile a C wrapper is pretty neat.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
Getting info with eax=0:
|
||||
|
||||
import cpuid
|
||||
|
||||
q = cpuid.CPUID()
|
||||
eax, ebx, ecx, edx = q(0)
|
||||
|
||||
Running the files:
|
||||
|
||||
$ python example.py
|
||||
Vendor ID : GenuineIntel
|
||||
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
|
||||
|
||||
Vector instructions supported:
|
||||
SSE : Yes
|
||||
SSE2 : Yes
|
||||
SSE3 : Yes
|
||||
SSSE3 : Yes
|
||||
SSE4.1 : Yes
|
||||
SSE4.2 : Yes
|
||||
SSE4a : --
|
||||
AVX : --
|
||||
AVX2 : --
|
||||
|
||||
$ python cpuid.py
|
||||
CPUID A B C D
|
||||
00000000 0000000b 756e6547 6c65746e 49656e69
|
||||
00000001 000106a5 00100800 009ce3bd bfebfbff
|
||||
00000002 55035a01 00f0b2e4 00000000 09ca212c
|
||||
00000003 00000000 00000000 00000000 00000000
|
||||
00000004 00000000 00000000 00000000 00000000
|
||||
00000005 00000040 00000040 00000003 00001120
|
||||
00000006 00000003 00000002 00000001 00000000
|
||||
00000007 00000000 00000000 00000000 00000000
|
||||
00000008 00000000 00000000 00000000 00000000
|
||||
00000009 00000000 00000000 00000000 00000000
|
||||
0000000a 07300403 00000044 00000000 00000603
|
||||
0000000b 00000000 00000000 00000095 00000000
|
||||
80000000 80000008 00000000 00000000 00000000
|
||||
80000001 00000000 00000000 00000001 28100800
|
||||
80000002 65746e49 2952286c 6f655820 2952286e
|
||||
80000003 55504320 20202020 20202020 57202020
|
||||
80000004 30353533 20402020 37302e33 007a4847
|
||||
80000005 00000000 00000000 00000000 00000000
|
||||
80000006 00000000 00000000 01006040 00000000
|
||||
80000007 00000000 00000000 00000000 00000100
|
||||
80000008 00003024 00000000 00000000 00000000
|
||||
|
||||
172
lib/spack/external/archspec/vendor/cpuid/cpuid.py
vendored
172
lib/spack/external/archspec/vendor/cpuid/cpuid.py
vendored
@@ -1,172 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2024 Anders Høst
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import platform
|
||||
import os
|
||||
import ctypes
|
||||
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
|
||||
|
||||
# Posix x86_64:
|
||||
# Three first call registers : RDI, RSI, RDX
|
||||
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
|
||||
|
||||
# Windows x86_64:
|
||||
# Three first call registers : RCX, RDX, R8
|
||||
# Volatile registers : RAX, RCX, RDX, R8-11
|
||||
|
||||
# cdecl 32 bit:
|
||||
# Three first call registers : Stack (%esp)
|
||||
# Volatile registers : EAX, ECX, EDX
|
||||
|
||||
_POSIX_64_OPC = [
|
||||
0x53, # push %rbx
|
||||
0x89, 0xf0, # mov %esi,%eax
|
||||
0x89, 0xd1, # mov %edx,%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x89, 0x07, # mov %eax,(%rdi)
|
||||
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
|
||||
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
|
||||
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
|
||||
0x5b, # pop %rbx
|
||||
0xc3 # retq
|
||||
]
|
||||
|
||||
_WINDOWS_64_OPC = [
|
||||
0x53, # push %rbx
|
||||
0x89, 0xd0, # mov %edx,%eax
|
||||
0x49, 0x89, 0xc9, # mov %rcx,%r9
|
||||
0x44, 0x89, 0xc1, # mov %r8d,%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x41, 0x89, 0x01, # mov %eax,(%r9)
|
||||
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
|
||||
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
|
||||
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
|
||||
0x5b, # pop %rbx
|
||||
0xc3 # retq
|
||||
]
|
||||
|
||||
_CDECL_32_OPC = [
|
||||
0x53, # push %ebx
|
||||
0x57, # push %edi
|
||||
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
|
||||
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
|
||||
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x89, 0x07, # mov %eax,(%edi)
|
||||
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
|
||||
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
|
||||
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
|
||||
0x5f, # pop %edi
|
||||
0x5b, # pop %ebx
|
||||
0xc3 # ret
|
||||
]
|
||||
|
||||
is_windows = os.name == "nt"
|
||||
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
|
||||
|
||||
|
||||
class CPUID_struct(ctypes.Structure):
|
||||
_register_names = ("eax", "ebx", "ecx", "edx")
|
||||
_fields_ = [(r, c_uint32) for r in _register_names]
|
||||
|
||||
def __getitem__(self, item):
|
||||
if item not in self._register_names:
|
||||
raise KeyError(item)
|
||||
return getattr(self, item)
|
||||
|
||||
def __repr__(self):
|
||||
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
|
||||
|
||||
|
||||
class CPUID(object):
|
||||
def __init__(self):
|
||||
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
|
||||
raise SystemError("Only available for x86")
|
||||
|
||||
if is_windows:
|
||||
if is_64bit:
|
||||
# VirtualAlloc seems to fail under some weird
|
||||
# circumstances when ctypes.windll.kernel32 is
|
||||
# used under 64 bit Python. CDLL fixes this.
|
||||
self.win = ctypes.CDLL("kernel32.dll")
|
||||
opc = _WINDOWS_64_OPC
|
||||
else:
|
||||
# Here ctypes.windll.kernel32 is needed to get the
|
||||
# right DLL. Otherwise it will fail when running
|
||||
# 32 bit Python on 64 bit Windows.
|
||||
self.win = ctypes.windll.kernel32
|
||||
opc = _CDECL_32_OPC
|
||||
else:
|
||||
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
|
||||
|
||||
size = len(opc)
|
||||
code = (ctypes.c_ubyte * size)(*opc)
|
||||
|
||||
if is_windows:
|
||||
self.win.VirtualAlloc.restype = c_void_p
|
||||
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
|
||||
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
|
||||
if not self.addr:
|
||||
raise MemoryError("Could not allocate RWX memory")
|
||||
ctypes.memmove(self.addr, code, size)
|
||||
else:
|
||||
from mmap import (
|
||||
mmap,
|
||||
MAP_PRIVATE,
|
||||
MAP_ANONYMOUS,
|
||||
PROT_WRITE,
|
||||
PROT_READ,
|
||||
PROT_EXEC,
|
||||
)
|
||||
self.mm = mmap(
|
||||
-1,
|
||||
size,
|
||||
flags=MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
|
||||
)
|
||||
self.mm.write(code)
|
||||
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
|
||||
|
||||
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
|
||||
self.func_ptr = func_type(self.addr)
|
||||
|
||||
def __call__(self, eax, ecx=0):
|
||||
struct = self.registers_for(eax=eax, ecx=ecx)
|
||||
return struct.eax, struct.ebx, struct.ecx, struct.edx
|
||||
|
||||
def registers_for(self, eax, ecx=0):
|
||||
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
|
||||
containing eax, ebx, ecx, and edx.
|
||||
"""
|
||||
struct = CPUID_struct()
|
||||
self.func_ptr(struct, eax, ecx)
|
||||
return struct
|
||||
|
||||
def __del__(self):
|
||||
if is_windows:
|
||||
self.win.VirtualFree.restype = c_long
|
||||
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
|
||||
self.win.VirtualFree(self.addr, 0, 0x8000)
|
||||
else:
|
||||
self.mm.close()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def valid_inputs():
|
||||
cpuid = CPUID()
|
||||
for eax in (0x0, 0x80000000):
|
||||
highest, _, _, _ = cpuid(eax)
|
||||
while eax <= highest:
|
||||
regs = cpuid(eax)
|
||||
yield (eax, regs)
|
||||
eax += 1
|
||||
|
||||
|
||||
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
|
||||
for eax, regs in valid_inputs():
|
||||
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))
|
||||
@@ -1,62 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2024 Anders Høst
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import struct
|
||||
import cpuid
|
||||
|
||||
|
||||
def cpu_vendor(cpu):
|
||||
_, b, c, d = cpu(0)
|
||||
return struct.pack("III", b, d, c).decode("utf-8")
|
||||
|
||||
|
||||
def cpu_name(cpu):
|
||||
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
|
||||
for i in range(2, 5)))
|
||||
|
||||
return name.split('\x00', 1)[0]
|
||||
|
||||
|
||||
def is_set(cpu, leaf, subleaf, reg_idx, bit):
|
||||
"""
|
||||
@param {leaf} %eax
|
||||
@param {sublead} %ecx, 0 in most cases
|
||||
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
|
||||
@param {bit} bit of reg selected by {reg_idx}, 0-based
|
||||
"""
|
||||
|
||||
regs = cpu(leaf, subleaf)
|
||||
|
||||
if (1 << bit) & regs[reg_idx]:
|
||||
return "Yes"
|
||||
else:
|
||||
return "--"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cpu = cpuid.CPUID()
|
||||
|
||||
print("Vendor ID : %s" % cpu_vendor(cpu))
|
||||
print("CPU name : %s" % cpu_name(cpu))
|
||||
print()
|
||||
print("Vector instructions supported:")
|
||||
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
|
||||
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
|
||||
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
|
||||
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
|
||||
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
|
||||
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
|
||||
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
|
||||
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
|
||||
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
|
||||
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
|
||||
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
|
||||
# Intel RDT CMT/MBM
|
||||
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
|
||||
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
|
||||
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
|
||||
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))
|
||||
@@ -42,6 +42,11 @@ def convert_to_posix_path(path: str) -> str:
|
||||
return format_os_path(path, mode=Path.unix)
|
||||
|
||||
|
||||
def convert_to_windows_path(path: str) -> str:
|
||||
"""Converts the input path to Windows style."""
|
||||
return format_os_path(path, mode=Path.windows)
|
||||
|
||||
|
||||
def convert_to_platform_path(path: str) -> str:
|
||||
"""Converts the input path to the current platform's native style."""
|
||||
return format_os_path(path, mode=Path.platform_path)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# Archive extensions allowed in Spack
|
||||
PREFIX_EXTENSIONS = ("tar", "TAR")
|
||||
EXTENSIONS = ("gz", "bz2", "xz", "Z")
|
||||
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz", "whl")
|
||||
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
|
||||
|
||||
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
|
||||
ALLOWED_ARCHIVE_TYPES = (
|
||||
@@ -357,8 +357,10 @@ def strip_version_suffixes(path_or_url: str) -> str:
|
||||
r"i[36]86",
|
||||
r"ppc64(le)?",
|
||||
r"armv?(7l|6l|64)?",
|
||||
# PyPI wheels
|
||||
r"-(?:py|cp)[23].*",
|
||||
# PyPI
|
||||
r"[._-]py[23].*\.whl",
|
||||
r"[._-]cp[23].*\.whl",
|
||||
r"[._-]win.*\.exe",
|
||||
]
|
||||
|
||||
for regex in suffix_regexes:
|
||||
@@ -401,7 +403,7 @@ def expand_contracted_extension_in_path(
|
||||
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
|
||||
"""Returns compression extension for a compressed archive"""
|
||||
extension = expand_contracted_extension(extension)
|
||||
for ext in EXTENSIONS:
|
||||
for ext in [*EXTENSIONS]:
|
||||
if ext in extension:
|
||||
return ext
|
||||
return None
|
||||
|
||||
@@ -171,7 +171,7 @@ def polite_path(components: Iterable[str]):
|
||||
@memoized
|
||||
def _polite_antipattern():
|
||||
# A regex of all the characters we don't want in a filename
|
||||
return re.compile(r"[^A-Za-z0-9_+.-]")
|
||||
return re.compile(r"[^A-Za-z0-9_.-]")
|
||||
|
||||
|
||||
def polite_filename(filename: str) -> str:
|
||||
@@ -237,6 +237,16 @@ def _get_mime_type():
|
||||
return file_command("-b", "-h", "--mime-type")
|
||||
|
||||
|
||||
@memoized
|
||||
def _get_mime_type_compressed():
|
||||
"""Same as _get_mime_type but attempts to check for
|
||||
compression first
|
||||
"""
|
||||
mime_uncompressed = _get_mime_type()
|
||||
mime_uncompressed.add_default_arg("-Z")
|
||||
return mime_uncompressed
|
||||
|
||||
|
||||
def mime_type(filename):
|
||||
"""Returns the mime type and subtype of a file.
|
||||
|
||||
@@ -252,6 +262,21 @@ def mime_type(filename):
|
||||
return type, subtype
|
||||
|
||||
|
||||
def compressed_mime_type(filename):
|
||||
"""Same as mime_type but checks for type that has been compressed
|
||||
|
||||
Args:
|
||||
filename (str): file to be analyzed
|
||||
|
||||
Returns:
|
||||
Tuple containing the MIME type and subtype
|
||||
"""
|
||||
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
|
||||
tty.debug("==> " + output)
|
||||
type, _, subtype = output.partition("/")
|
||||
return type, subtype
|
||||
|
||||
|
||||
#: This generates the library filenames that may appear on any OS.
|
||||
library_extensions = ["a", "la", "so", "tbd", "dylib"]
|
||||
|
||||
@@ -283,6 +308,13 @@ def paths_containing_libs(paths, library_names):
|
||||
return rpaths_to_include
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def same_path(path1, path2):
|
||||
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
|
||||
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
|
||||
return norm1 == norm2
|
||||
|
||||
|
||||
def filter_file(
|
||||
regex: str,
|
||||
repl: Union[str, Callable[[Match], str]],
|
||||
@@ -877,34 +909,39 @@ def is_exe(path):
|
||||
return os.path.isfile(path) and os.access(path, os.X_OK)
|
||||
|
||||
|
||||
def has_shebang(path):
|
||||
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
return f.read(2) == b"#!"
|
||||
except OSError:
|
||||
return False
|
||||
@system_path_filter
|
||||
def get_filetype(path_name):
|
||||
"""
|
||||
Return the output of file path_name as a string to identify file type.
|
||||
"""
|
||||
file = Executable("file")
|
||||
file.add_default_env("LC_ALL", "C")
|
||||
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
|
||||
return output.strip()
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def is_nonsymlink_exe_with_shebang(path):
|
||||
"""Returns whether the path is an executable regular file with a shebang. Returns False too
|
||||
when the path is a symlink to a script, and also when the file cannot be opened."""
|
||||
"""
|
||||
Returns whether the path is an executable script with a shebang.
|
||||
Return False when the path is a *symlink* to an executable script.
|
||||
"""
|
||||
try:
|
||||
st = os.lstat(path)
|
||||
except OSError:
|
||||
return False
|
||||
# Should not be a symlink
|
||||
if stat.S_ISLNK(st.st_mode):
|
||||
return False
|
||||
|
||||
# Should not be a symlink
|
||||
if stat.S_ISLNK(st.st_mode):
|
||||
return False
|
||||
# Should be executable
|
||||
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
|
||||
return False
|
||||
|
||||
# Should be executable
|
||||
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
|
||||
# Should start with a shebang
|
||||
with open(path, "rb") as f:
|
||||
return f.read(2) == b"#!"
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
|
||||
return has_shebang(path)
|
||||
|
||||
|
||||
@system_path_filter(arg_slice=slice(1))
|
||||
def chgrp_if_not_world_writable(path, group):
|
||||
@@ -1126,6 +1163,20 @@ def write_tmp_and_move(filename):
|
||||
shutil.move(tmp, filename)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@system_path_filter
|
||||
def open_if_filename(str_or_file, mode="r"):
|
||||
"""Takes either a path or a file object, and opens it if it is a path.
|
||||
|
||||
If it's a file object, just yields the file object.
|
||||
"""
|
||||
if isinstance(str_or_file, str):
|
||||
with open(str_or_file, mode) as f:
|
||||
yield f
|
||||
else:
|
||||
yield str_or_file
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def touch(path):
|
||||
"""Creates an empty file at the specified path."""
|
||||
@@ -1183,47 +1234,6 @@ def get_single_file(directory):
|
||||
return fnames[0]
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def windows_sfn(path: os.PathLike):
|
||||
"""Returns 8.3 Filename (SFN) representation of
|
||||
path
|
||||
|
||||
8.3 Filenames (SFN or short filename) is a file
|
||||
naming convention used prior to Win95 that Windows
|
||||
still (and will continue to) support. This convention
|
||||
caps filenames at 8 characters, and most importantly
|
||||
does not allow for spaces in addition to other specifications.
|
||||
The scheme is generally the same as a normal Windows
|
||||
file scheme, but all spaces are removed and the filename
|
||||
is capped at 6 characters. The remaining characters are
|
||||
replaced with ~N where N is the number file in a directory
|
||||
that a given file represents i.e. Program Files and Program Files (x86)
|
||||
would be PROGRA~1 and PROGRA~2 respectively.
|
||||
Further, all file/directory names are all caps (although modern Windows
|
||||
is case insensitive in practice).
|
||||
Conversion is accomplished by fileapi.h GetShortPathNameW
|
||||
|
||||
Returns paths in 8.3 Filename form
|
||||
|
||||
Note: this method is a no-op on Linux
|
||||
|
||||
Args:
|
||||
path: Path to be transformed into SFN (8.3 filename) format
|
||||
"""
|
||||
# This should not be run-able on linux/macos
|
||||
if sys.platform != "win32":
|
||||
return path
|
||||
path = str(path)
|
||||
import ctypes
|
||||
|
||||
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
|
||||
# stub Windows types TCHAR[LENGTH]
|
||||
TCHAR_arr = ctypes.c_wchar * len(path)
|
||||
ret_str = TCHAR_arr()
|
||||
k32.GetShortPathNameW(path, ret_str, len(path))
|
||||
return ret_str.value
|
||||
|
||||
|
||||
@contextmanager
|
||||
def temp_cwd():
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
@@ -1238,6 +1248,19 @@ def temp_cwd():
|
||||
shutil.rmtree(tmp_dir, **kwargs)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@system_path_filter
|
||||
def temp_rename(orig_path, temp_path):
|
||||
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
|
||||
if not same_path:
|
||||
shutil.move(orig_path, temp_path)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if not same_path:
|
||||
shutil.move(temp_path, orig_path)
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def can_access(file_name):
|
||||
"""True if we have read/write access to the file."""
|
||||
@@ -1354,89 +1377,120 @@ def traverse_tree(
|
||||
yield (source_path, dest_path)
|
||||
|
||||
|
||||
def lexists_islink_isdir(path):
|
||||
"""Computes the tuple (lexists(path), islink(path), isdir(path)) in a minimal
|
||||
number of stat calls on unix. Use os.path and symlink.islink methods for windows."""
|
||||
if sys.platform == "win32":
|
||||
if not os.path.lexists(path):
|
||||
return False, False, False
|
||||
return os.path.lexists(path), islink(path), os.path.isdir(path)
|
||||
# First try to lstat, so we know if it's a link or not.
|
||||
try:
|
||||
lst = os.lstat(path)
|
||||
except (IOError, OSError):
|
||||
return False, False, False
|
||||
|
||||
is_link = stat.S_ISLNK(lst.st_mode)
|
||||
|
||||
# Check whether file is a dir.
|
||||
if not is_link:
|
||||
is_dir = stat.S_ISDIR(lst.st_mode)
|
||||
return True, is_link, is_dir
|
||||
|
||||
# Check whether symlink points to a dir.
|
||||
try:
|
||||
st = os.stat(path)
|
||||
is_dir = stat.S_ISDIR(st.st_mode)
|
||||
except (IOError, OSError):
|
||||
# Dangling symlink (i.e. it lexists but not exists)
|
||||
is_dir = False
|
||||
|
||||
return True, is_link, is_dir
|
||||
|
||||
|
||||
class BaseDirectoryVisitor:
|
||||
"""Base class and interface for :py:func:`visit_directory_tree`."""
|
||||
|
||||
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
|
||||
def visit_file(self, root, rel_path, depth):
|
||||
"""Handle the non-symlink file at ``os.path.join(root, rel_path)``
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current file from ``root``
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current file from ``root``
|
||||
depth (int): depth of current file from the ``root`` directory"""
|
||||
pass
|
||||
|
||||
def visit_symlinked_file(self, root: str, rel_path: str, depth) -> None:
|
||||
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``. Note: ``rel_path`` is
|
||||
the location of the symlink, not to what it is pointing to. The symlink may be dangling.
|
||||
def visit_symlinked_file(self, root, rel_path, depth):
|
||||
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``.
|
||||
Note: ``rel_path`` is the location of the symlink, not to what it is
|
||||
pointing to. The symlink may be dangling.
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current symlink from ``root``
|
||||
depth: depth of current symlink from the ``root`` directory"""
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current symlink from ``root``
|
||||
depth (int): depth of current symlink from the ``root`` directory"""
|
||||
pass
|
||||
|
||||
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
def before_visit_dir(self, root, rel_path, depth):
|
||||
"""Return True from this function to recurse into the directory at
|
||||
os.path.join(root, rel_path). Return False in order not to recurse further.
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current directory from ``root``
|
||||
depth: depth of current directory from the ``root`` directory
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current directory from ``root``
|
||||
depth (int): depth of current directory from the ``root`` directory
|
||||
|
||||
Returns:
|
||||
bool: ``True`` when the directory should be recursed into. ``False`` when
|
||||
not"""
|
||||
return False
|
||||
|
||||
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
"""Return ``True`` to recurse into the symlinked directory and ``False`` in order not to.
|
||||
Note: ``rel_path`` is the path to the symlink itself. Following symlinked directories
|
||||
blindly can cause infinite recursion due to cycles.
|
||||
def before_visit_symlinked_dir(self, root, rel_path, depth):
|
||||
"""Return ``True`` to recurse into the symlinked directory and ``False`` in
|
||||
order not to. Note: ``rel_path`` is the path to the symlink itself.
|
||||
Following symlinked directories blindly can cause infinite recursion due to
|
||||
cycles.
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current symlink from ``root``
|
||||
depth: depth of current symlink from the ``root`` directory
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current symlink from ``root``
|
||||
depth (int): depth of current symlink from the ``root`` directory
|
||||
|
||||
Returns:
|
||||
bool: ``True`` when the directory should be recursed into. ``False`` when
|
||||
not"""
|
||||
return False
|
||||
|
||||
def after_visit_dir(self, root: str, rel_path: str, depth: int) -> None:
|
||||
"""Called after recursion into ``rel_path`` finished. This function is not called when
|
||||
``rel_path`` was not recursed into.
|
||||
def after_visit_dir(self, root, rel_path, depth):
|
||||
"""Called after recursion into ``rel_path`` finished. This function is not
|
||||
called when ``rel_path`` was not recursed into.
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current directory from ``root``
|
||||
depth: depth of current directory from the ``root`` directory"""
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current directory from ``root``
|
||||
depth (int): depth of current directory from the ``root`` directory"""
|
||||
pass
|
||||
|
||||
def after_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> None:
|
||||
"""Called after recursion into ``rel_path`` finished. This function is not called when
|
||||
``rel_path`` was not recursed into.
|
||||
def after_visit_symlinked_dir(self, root, rel_path, depth):
|
||||
"""Called after recursion into ``rel_path`` finished. This function is not
|
||||
called when ``rel_path`` was not recursed into.
|
||||
|
||||
Parameters:
|
||||
root: root directory
|
||||
rel_path: relative path to current symlink from ``root``
|
||||
depth: depth of current symlink from the ``root`` directory"""
|
||||
root (str): root directory
|
||||
rel_path (str): relative path to current symlink from ``root``
|
||||
depth (int): depth of current symlink from the ``root`` directory"""
|
||||
pass
|
||||
|
||||
|
||||
def visit_directory_tree(
|
||||
root: str, visitor: BaseDirectoryVisitor, rel_path: str = "", depth: int = 0
|
||||
):
|
||||
"""Recurses the directory root depth-first through a visitor pattern using the interface from
|
||||
:py:class:`BaseDirectoryVisitor`
|
||||
def visit_directory_tree(root, visitor, rel_path="", depth=0):
|
||||
"""Recurses the directory root depth-first through a visitor pattern using the
|
||||
interface from :py:class:`BaseDirectoryVisitor`
|
||||
|
||||
Parameters:
|
||||
root: path of directory to recurse into
|
||||
visitor: what visitor to use
|
||||
rel_path: current relative path from the root
|
||||
depth: current depth from the root
|
||||
root (str): path of directory to recurse into
|
||||
visitor (BaseDirectoryVisitor): what visitor to use
|
||||
rel_path (str): current relative path from the root
|
||||
depth (str): current depth from the root
|
||||
"""
|
||||
dir = os.path.join(root, rel_path)
|
||||
dir_entries = sorted(os.scandir(dir), key=lambda d: d.name)
|
||||
@@ -1444,19 +1498,26 @@ def visit_directory_tree(
|
||||
for f in dir_entries:
|
||||
rel_child = os.path.join(rel_path, f.name)
|
||||
islink = f.is_symlink()
|
||||
# On Windows, symlinks to directories are distinct from symlinks to files, and it is
|
||||
# possible to create a broken symlink to a directory (e.g. using os.symlink without
|
||||
# `target_is_directory=True`), invoking `isdir` on a symlink on Windows that is broken in
|
||||
# this manner will result in an error. In this case we can work around the issue by reading
|
||||
# the target and resolving the directory ourselves
|
||||
# On Windows, symlinks to directories are distinct from
|
||||
# symlinks to files, and it is possible to create a
|
||||
# broken symlink to a directory (e.g. using os.symlink
|
||||
# without `target_is_directory=True`), invoking `isdir`
|
||||
# on a symlink on Windows that is broken in this manner
|
||||
# will result in an error. In this case we can work around
|
||||
# the issue by reading the target and resolving the
|
||||
# directory ourselves
|
||||
try:
|
||||
isdir = f.is_dir()
|
||||
except OSError as e:
|
||||
if sys.platform == "win32" and hasattr(e, "winerror") and e.winerror == 5 and islink:
|
||||
# if path is a symlink, determine destination and evaluate file vs directory
|
||||
# if path is a symlink, determine destination and
|
||||
# evaluate file vs directory
|
||||
link_target = resolve_link_target_relative_to_the_link(f)
|
||||
# link_target might be relative but resolve_link_target_relative_to_the_link
|
||||
# will ensure that if so, that it is relative to the CWD and therefore makes sense
|
||||
# link_target might be relative but
|
||||
# resolve_link_target_relative_to_the_link
|
||||
# will ensure that if so, that it is relative
|
||||
# to the CWD and therefore
|
||||
# makes sense
|
||||
isdir = os.path.isdir(link_target)
|
||||
else:
|
||||
raise e
|
||||
|
||||
@@ -98,6 +98,36 @@ def caller_locals():
|
||||
del stack
|
||||
|
||||
|
||||
def get_calling_module_name():
|
||||
"""Make sure that the caller is a class definition, and return the
|
||||
enclosing module's name.
|
||||
"""
|
||||
# Passing zero here skips line context for speed.
|
||||
stack = inspect.stack(0)
|
||||
try:
|
||||
# Make sure locals contain __module__
|
||||
caller_locals = stack[2][0].f_locals
|
||||
finally:
|
||||
del stack
|
||||
|
||||
if "__module__" not in caller_locals:
|
||||
raise RuntimeError(
|
||||
"Must invoke get_calling_module_name() " "from inside a class definition!"
|
||||
)
|
||||
|
||||
module_name = caller_locals["__module__"]
|
||||
base_name = module_name.split(".")[-1]
|
||||
return base_name
|
||||
|
||||
|
||||
def attr_required(obj, attr_name):
|
||||
"""Ensure that a class has a required attribute."""
|
||||
if not hasattr(obj, attr_name):
|
||||
raise RequiredAttributeError(
|
||||
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
|
||||
)
|
||||
|
||||
|
||||
def attr_setdefault(obj, name, value):
|
||||
"""Like dict.setdefault, but for objects."""
|
||||
if not hasattr(obj, name):
|
||||
@@ -483,6 +513,42 @@ def copy(self):
|
||||
return clone
|
||||
|
||||
|
||||
def in_function(function_name):
|
||||
"""True if the caller was called from some function with
|
||||
the supplied Name, False otherwise."""
|
||||
stack = inspect.stack()
|
||||
try:
|
||||
for elt in stack[2:]:
|
||||
if elt[3] == function_name:
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
del stack
|
||||
|
||||
|
||||
def check_kwargs(kwargs, fun):
|
||||
"""Helper for making functions with kwargs. Checks whether the kwargs
|
||||
are empty after all of them have been popped off. If they're
|
||||
not, raises an error describing which kwargs are invalid.
|
||||
|
||||
Example::
|
||||
|
||||
def foo(self, **kwargs):
|
||||
x = kwargs.pop('x', None)
|
||||
y = kwargs.pop('y', None)
|
||||
z = kwargs.pop('z', None)
|
||||
check_kwargs(kwargs, self.foo)
|
||||
|
||||
# This raises a TypeError:
|
||||
foo(w='bad kwarg')
|
||||
"""
|
||||
if kwargs:
|
||||
raise TypeError(
|
||||
"'%s' is an invalid keyword argument for function %s()."
|
||||
% (next(iter(kwargs)), fun.__name__)
|
||||
)
|
||||
|
||||
|
||||
def match_predicate(*args):
|
||||
"""Utility function for making string matching predicates.
|
||||
|
||||
@@ -698,6 +764,11 @@ def pretty_seconds(seconds):
|
||||
return pretty_seconds_formatter(seconds)(seconds)
|
||||
|
||||
|
||||
class RequiredAttributeError(ValueError):
|
||||
def __init__(self, message):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ObjectWrapper:
|
||||
"""Base class that wraps an object. Derived classes can add new behavior
|
||||
while staying undercover.
|
||||
@@ -772,30 +843,6 @@ def __repr__(self):
|
||||
return repr(self.instance)
|
||||
|
||||
|
||||
def get_entry_points(*, group: str):
|
||||
"""Wrapper for ``importlib.metadata.entry_points``
|
||||
|
||||
Args:
|
||||
group: entry points to select
|
||||
|
||||
Returns:
|
||||
EntryPoints for ``group`` or empty list if unsupported
|
||||
"""
|
||||
|
||||
try:
|
||||
import importlib.metadata # type: ignore # novermin
|
||||
except ImportError:
|
||||
return []
|
||||
|
||||
try:
|
||||
return importlib.metadata.entry_points(group=group)
|
||||
except TypeError:
|
||||
# Prior to Python 3.10, entry_points accepted no parameters and always
|
||||
# returned a dictionary of entry points, keyed by group. See
|
||||
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
|
||||
return importlib.metadata.entry_points().get(group, [])
|
||||
|
||||
|
||||
def load_module_from_file(module_name, module_path):
|
||||
"""Loads a python module from the path of the corresponding file.
|
||||
|
||||
@@ -864,6 +911,25 @@ def uniq(sequence):
|
||||
return uniq_list
|
||||
|
||||
|
||||
def star(func):
|
||||
"""Unpacks arguments for use with Multiprocessing mapping functions"""
|
||||
|
||||
def _wrapper(args):
|
||||
return func(*args)
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
class Devnull:
|
||||
"""Null stream with less overhead than ``os.devnull``.
|
||||
|
||||
See https://stackoverflow.com/a/2929954.
|
||||
"""
|
||||
|
||||
def write(self, *_):
|
||||
pass
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
import filecmp
|
||||
import os
|
||||
import shutil
|
||||
from typing import Callable, Dict, List, Optional, Tuple
|
||||
from collections import OrderedDict
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree
|
||||
@@ -51,32 +51,32 @@ class SourceMergeVisitor(BaseDirectoryVisitor):
|
||||
- A list of merge conflicts in dst/
|
||||
"""
|
||||
|
||||
def __init__(self, ignore: Optional[Callable[[str], bool]] = None):
|
||||
def __init__(self, ignore=None):
|
||||
self.ignore = ignore if ignore is not None else lambda f: False
|
||||
|
||||
# When mapping <src root> to <dst root>/<projection>, we need to prepend the <projection>
|
||||
# bit to the relative path in the destination dir.
|
||||
self.projection: str = ""
|
||||
# When mapping <src root> to <dst root>/<projection>, we need
|
||||
# to prepend the <projection> bit to the relative path in the
|
||||
# destination dir.
|
||||
self.projection = ""
|
||||
|
||||
# Two files f and g conflict if they are not os.path.samefile(f, g) and they are both
|
||||
# projected to the same destination file. These conflicts are not necessarily fatal, and
|
||||
# can be resolved or ignored. For example <prefix>/LICENSE or
|
||||
# <site-packages>/<namespace>/__init__.py conflicts can be ignored).
|
||||
self.file_conflicts: List[MergeConflict] = []
|
||||
# When a file blocks another file, the conflict can sometimes
|
||||
# be resolved / ignored (e.g. <prefix>/LICENSE or
|
||||
# or <site-packages>/<namespace>/__init__.py conflicts can be
|
||||
# ignored).
|
||||
self.file_conflicts = []
|
||||
|
||||
# When we have to create a dir where a file is, or a file where a dir is, we have fatal
|
||||
# errors, listed here.
|
||||
self.fatal_conflicts: List[MergeConflict] = []
|
||||
# When we have to create a dir where a file is, or a file
|
||||
# where a dir is, we have fatal errors, listed here.
|
||||
self.fatal_conflicts = []
|
||||
|
||||
# What directories we have to make; this is an ordered dict, so that we have a fast lookup
|
||||
# and can run mkdir in order.
|
||||
self.directories: Dict[str, Tuple[str, str]] = {}
|
||||
# What directories we have to make; this is an ordered set,
|
||||
# so that we have a fast lookup and can run mkdir in order.
|
||||
self.directories = OrderedDict()
|
||||
|
||||
# Files to link. Maps dst_rel to (src_root, src_rel). This is an ordered dict, where files
|
||||
# are guaranteed to be grouped by src_root in the order they were visited.
|
||||
self.files: Dict[str, Tuple[str, str]] = {}
|
||||
# Files to link. Maps dst_rel to (src_root, src_rel)
|
||||
self.files = OrderedDict()
|
||||
|
||||
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
def before_visit_dir(self, root, rel_path, depth):
|
||||
"""
|
||||
Register a directory if dst / rel_path is not blocked by a file or ignored.
|
||||
"""
|
||||
@@ -104,7 +104,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
self.directories[proj_rel_path] = (root, rel_path)
|
||||
return True
|
||||
|
||||
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
def before_visit_symlinked_dir(self, root, rel_path, depth):
|
||||
"""
|
||||
Replace symlinked dirs with actual directories when possible in low depths,
|
||||
otherwise handle it as a file (i.e. we link to the symlink).
|
||||
@@ -136,56 +136,40 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
|
||||
self.visit_file(root, rel_path, depth)
|
||||
return False
|
||||
|
||||
def visit_file(self, root: str, rel_path: str, depth: int, *, symlink: bool = False) -> None:
|
||||
def visit_file(self, root, rel_path, depth):
|
||||
proj_rel_path = os.path.join(self.projection, rel_path)
|
||||
|
||||
if self.ignore(rel_path):
|
||||
pass
|
||||
elif proj_rel_path in self.directories:
|
||||
# Can't create a file where a dir is; fatal error
|
||||
src_a_root, src_a_relpath = self.directories[proj_rel_path]
|
||||
self.fatal_conflicts.append(
|
||||
MergeConflict(
|
||||
dst=proj_rel_path,
|
||||
src_a=os.path.join(*self.directories[proj_rel_path]),
|
||||
src_a=os.path.join(src_a_root, src_a_relpath),
|
||||
src_b=os.path.join(root, rel_path),
|
||||
)
|
||||
)
|
||||
elif proj_rel_path in self.files:
|
||||
# When two files project to the same path, they conflict iff they are distinct.
|
||||
# If they are the same (i.e. one links to the other), register regular files rather
|
||||
# than symlinks. The reason is that in copy-type views, we need a copy of the actual
|
||||
# file, not the symlink.
|
||||
|
||||
src_a = os.path.join(*self.files[proj_rel_path])
|
||||
src_b = os.path.join(root, rel_path)
|
||||
|
||||
try:
|
||||
samefile = os.path.samefile(src_a, src_b)
|
||||
except OSError:
|
||||
samefile = False
|
||||
|
||||
if not samefile:
|
||||
# Distinct files produce a conflict.
|
||||
self.file_conflicts.append(
|
||||
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
|
||||
# In some cases we can resolve file-file conflicts
|
||||
src_a_root, src_a_relpath = self.files[proj_rel_path]
|
||||
self.file_conflicts.append(
|
||||
MergeConflict(
|
||||
dst=proj_rel_path,
|
||||
src_a=os.path.join(src_a_root, src_a_relpath),
|
||||
src_b=os.path.join(root, rel_path),
|
||||
)
|
||||
return
|
||||
|
||||
if not symlink:
|
||||
# Remove the link in favor of the actual file. The del is necessary to maintain the
|
||||
# order of the files dict, which is grouped by root.
|
||||
del self.files[proj_rel_path]
|
||||
self.files[proj_rel_path] = (root, rel_path)
|
||||
|
||||
)
|
||||
else:
|
||||
# Otherwise register this file to be linked.
|
||||
self.files[proj_rel_path] = (root, rel_path)
|
||||
|
||||
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
|
||||
def visit_symlinked_file(self, root, rel_path, depth):
|
||||
# Treat symlinked files as ordinary files (without "dereferencing")
|
||||
self.visit_file(root, rel_path, depth, symlink=True)
|
||||
self.visit_file(root, rel_path, depth)
|
||||
|
||||
def set_projection(self, projection: str) -> None:
|
||||
def set_projection(self, projection):
|
||||
self.projection = os.path.normpath(projection)
|
||||
|
||||
# Todo, is this how to check in general for empty projection?
|
||||
@@ -213,19 +197,24 @@ def set_projection(self, projection: str) -> None:
|
||||
|
||||
|
||||
class DestinationMergeVisitor(BaseDirectoryVisitor):
|
||||
"""DestinatinoMergeVisitor takes a SourceMergeVisitor and:
|
||||
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
|
||||
and:
|
||||
|
||||
a. registers additional conflicts when merging to the destination prefix
|
||||
b. removes redundant mkdir operations when directories already exist in the destination prefix.
|
||||
a. registers additional conflicts when merging
|
||||
to the destination prefix
|
||||
b. removes redundant mkdir operations when
|
||||
directories already exist in the destination
|
||||
prefix.
|
||||
|
||||
This also makes sure that symlinked directories in the target prefix will never be merged with
|
||||
This also makes sure that symlinked directories
|
||||
in the target prefix will never be merged with
|
||||
directories in the sources directories.
|
||||
"""
|
||||
|
||||
def __init__(self, source_merge_visitor: SourceMergeVisitor):
|
||||
def __init__(self, source_merge_visitor):
|
||||
self.src = source_merge_visitor
|
||||
|
||||
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
def before_visit_dir(self, root, rel_path, depth):
|
||||
# If destination dir is a file in a src dir, add a conflict,
|
||||
# and don't traverse deeper
|
||||
if rel_path in self.src.files:
|
||||
@@ -247,7 +236,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
# don't descend into it.
|
||||
return False
|
||||
|
||||
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
|
||||
def before_visit_symlinked_dir(self, root, rel_path, depth):
|
||||
"""
|
||||
Symlinked directories in the destination prefix should
|
||||
be seen as files; we should not accidentally merge
|
||||
@@ -273,7 +262,7 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
|
||||
# Never descend into symlinked target dirs.
|
||||
return False
|
||||
|
||||
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
|
||||
def visit_file(self, root, rel_path, depth):
|
||||
# Can't merge a file if target already exists
|
||||
if rel_path in self.src.directories:
|
||||
src_a_root, src_a_relpath = self.src.directories[rel_path]
|
||||
@@ -291,7 +280,7 @@ def visit_file(self, root: str, rel_path: str, depth: int) -> None:
|
||||
)
|
||||
)
|
||||
|
||||
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
|
||||
def visit_symlinked_file(self, root, rel_path, depth):
|
||||
# Treat symlinked files as ordinary files (without "dereferencing")
|
||||
self.visit_file(root, rel_path, depth)
|
||||
|
||||
|
||||
@@ -815,6 +815,10 @@ def __init__(self, path):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class LockLimitError(LockError):
|
||||
"""Raised when exceed maximum attempts to acquire a lock."""
|
||||
|
||||
|
||||
class LockTimeoutError(LockError):
|
||||
"""Raised when an attempt to acquire a lock times out."""
|
||||
|
||||
|
||||
@@ -189,7 +189,6 @@ def _windows_can_symlink() -> bool:
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
fs.touchp(fpath)
|
||||
fs.mkdirp(dpath)
|
||||
|
||||
try:
|
||||
os.symlink(dpath, dlink)
|
||||
|
||||
@@ -44,6 +44,10 @@ def is_debug(level=1):
|
||||
return _debug >= level
|
||||
|
||||
|
||||
def is_stacktrace():
|
||||
return _stacktrace
|
||||
|
||||
|
||||
def set_debug(level=0):
|
||||
global _debug
|
||||
assert level >= 0, "Debug level must be a positive value"
|
||||
@@ -248,6 +252,37 @@ def die(message, *args, **kwargs) -> NoReturn:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_number(prompt, **kwargs):
|
||||
default = kwargs.get("default", None)
|
||||
abort = kwargs.get("abort", None)
|
||||
|
||||
if default is not None and abort is not None:
|
||||
prompt += " (default is %s, %s to abort) " % (default, abort)
|
||||
elif default is not None:
|
||||
prompt += " (default is %s) " % default
|
||||
elif abort is not None:
|
||||
prompt += " (%s to abort) " % abort
|
||||
|
||||
number = None
|
||||
while number is None:
|
||||
msg(prompt, newline=False)
|
||||
ans = input()
|
||||
if ans == str(abort):
|
||||
return None
|
||||
|
||||
if ans:
|
||||
try:
|
||||
number = int(ans)
|
||||
if number < 1:
|
||||
msg("Please enter a valid number.")
|
||||
number = None
|
||||
except ValueError:
|
||||
msg("Please enter a valid number.")
|
||||
elif default is not None:
|
||||
number = default
|
||||
return number
|
||||
|
||||
|
||||
def get_yes_or_no(prompt, **kwargs):
|
||||
default_value = kwargs.get("default", None)
|
||||
|
||||
|
||||
@@ -244,7 +244,7 @@ def _search_duplicate_specs_in_externals(error_cls):
|
||||
+ lines
|
||||
+ ["as they might result in non-deterministic hashes"]
|
||||
)
|
||||
except (TypeError, AttributeError):
|
||||
except TypeError:
|
||||
details = []
|
||||
|
||||
errors.append(error_cls(summary=error_msg, details=details))
|
||||
@@ -292,6 +292,12 @@ def _avoid_mismatched_variants(error_cls):
|
||||
errors = []
|
||||
packages_yaml = spack.config.CONFIG.get_config("packages")
|
||||
|
||||
def make_error(config_data, summary):
|
||||
s = io.StringIO()
|
||||
s.write("Occurring in the following file:\n")
|
||||
syaml.dump_config(config_data, stream=s, blame=True)
|
||||
return error_cls(summary=summary, details=[s.getvalue()])
|
||||
|
||||
for pkg_name in packages_yaml:
|
||||
# 'all:' must be more forgiving, since it is setting defaults for everything
|
||||
if pkg_name == "all" or "variants" not in packages_yaml[pkg_name]:
|
||||
@@ -311,7 +317,7 @@ def _avoid_mismatched_variants(error_cls):
|
||||
f"Setting a preference for the '{pkg_name}' package to the "
|
||||
f"non-existing variant '{variant.name}'"
|
||||
)
|
||||
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
|
||||
errors.append(make_error(preferences, summary))
|
||||
continue
|
||||
|
||||
# Variant cannot accept this value
|
||||
@@ -323,41 +329,11 @@ def _avoid_mismatched_variants(error_cls):
|
||||
f"Setting the variant '{variant.name}' of the '{pkg_name}' package "
|
||||
f"to the invalid value '{str(variant)}'"
|
||||
)
|
||||
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
|
||||
errors.append(make_error(preferences, summary))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
@config_packages
|
||||
def _wrongly_named_spec(error_cls):
|
||||
"""Warns if the wrong name is used for an external spec"""
|
||||
errors = []
|
||||
packages_yaml = spack.config.CONFIG.get_config("packages")
|
||||
for pkg_name in packages_yaml:
|
||||
if pkg_name == "all":
|
||||
continue
|
||||
|
||||
externals = packages_yaml[pkg_name].get("externals", [])
|
||||
is_virtual = spack.repo.PATH.is_virtual(pkg_name)
|
||||
for entry in externals:
|
||||
spec = spack.spec.Spec(entry["spec"])
|
||||
regular_pkg_is_wrong = not is_virtual and pkg_name != spec.name
|
||||
virtual_pkg_is_wrong = is_virtual and not any(
|
||||
p.name == spec.name for p in spack.repo.PATH.providers_for(pkg_name)
|
||||
)
|
||||
if regular_pkg_is_wrong or virtual_pkg_is_wrong:
|
||||
summary = f"Wrong external spec detected for '{pkg_name}': {spec}"
|
||||
errors.append(_make_config_error(entry, summary, error_cls=error_cls))
|
||||
return errors
|
||||
|
||||
|
||||
def _make_config_error(config_data, summary, error_cls):
|
||||
s = io.StringIO()
|
||||
s.write("Occurring in the following file:\n")
|
||||
syaml.dump_config(config_data, stream=s, blame=True)
|
||||
return error_cls(summary=summary, details=[s.getvalue()])
|
||||
|
||||
|
||||
#: Sanity checks on package directives
|
||||
package_directives = AuditClass(
|
||||
group="packages",
|
||||
@@ -796,30 +772,10 @@ def check_virtual_with_variants(spec, msg):
|
||||
except spack.repo.UnknownPackageError:
|
||||
# This dependency is completely missing, so report
|
||||
# and continue the analysis
|
||||
summary = f"{pkg_name}: unknown package '{dep_name}' in 'depends_on' directive"
|
||||
details = [f" in {filename}"]
|
||||
errors.append(error_cls(summary=summary, details=details))
|
||||
continue
|
||||
|
||||
# Check for self-referential specs similar to:
|
||||
#
|
||||
# depends_on("foo@X.Y", when="^foo+bar")
|
||||
#
|
||||
# That would allow clingo to choose whether to have foo@X.Y+bar in the graph.
|
||||
problematic_edges = [
|
||||
x for x in when.edges_to_dependencies(dep_name) if not x.virtuals
|
||||
]
|
||||
if problematic_edges and not dep.patches:
|
||||
summary = (
|
||||
f"{pkg_name}: dependency on '{dep.spec}' when '{when}' is self-referential"
|
||||
f"{pkg_name}: unknown package '{dep_name}' in " "'depends_on' directive"
|
||||
)
|
||||
details = [
|
||||
(
|
||||
f" please specify better using '^[virtuals=...] {dep_name}', or "
|
||||
f"substitute with an equivalent condition on '{pkg_name}'"
|
||||
),
|
||||
f" in {filename}",
|
||||
]
|
||||
details = [f" in {filename}"]
|
||||
errors.append(error_cls(summary=summary, details=details))
|
||||
continue
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import codecs
|
||||
import collections
|
||||
import errno
|
||||
import hashlib
|
||||
import io
|
||||
import itertools
|
||||
@@ -17,11 +18,13 @@
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import warnings
|
||||
from contextlib import closing
|
||||
from contextlib import closing, contextmanager
|
||||
from gzip import GzipFile
|
||||
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
|
||||
from urllib.error import HTTPError, URLError
|
||||
|
||||
@@ -47,7 +50,6 @@
|
||||
import spack.stage
|
||||
import spack.store
|
||||
import spack.traverse as traverse
|
||||
import spack.util.archive
|
||||
import spack.util.crypto
|
||||
import spack.util.file_cache as file_cache
|
||||
import spack.util.gpg
|
||||
@@ -110,6 +112,10 @@ def __init__(self, errors):
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class ListMirrorSpecsError(spack.error.SpackError):
|
||||
"""Raised when unable to retrieve list of specs from the mirror"""
|
||||
|
||||
|
||||
class BinaryCacheIndex:
|
||||
"""
|
||||
The BinaryCacheIndex tracks what specs are available on (usually remote)
|
||||
@@ -536,6 +542,83 @@ def binary_index_location():
|
||||
BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore
|
||||
|
||||
|
||||
class NoOverwriteException(spack.error.SpackError):
|
||||
"""Raised when a file would be overwritten"""
|
||||
|
||||
def __init__(self, file_path):
|
||||
super().__init__(f"Refusing to overwrite the following file: {file_path}")
|
||||
|
||||
|
||||
class NoGpgException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when gpg2 is not in PATH
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class NoKeyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when gpg has no default key added.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class PickKeyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when multiple keys can be used to sign.
|
||||
"""
|
||||
|
||||
def __init__(self, keys):
|
||||
err_msg = "Multiple keys available for signing\n%s\n" % keys
|
||||
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
|
||||
super().__init__(err_msg)
|
||||
|
||||
|
||||
class NoVerifyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if file fails signature verification.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoChecksumException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if file fails checksum verification.
|
||||
"""
|
||||
|
||||
def __init__(self, path, size, contents, algorithm, expected, computed):
|
||||
super().__init__(
|
||||
f"{algorithm} checksum failed for {path}",
|
||||
f"Expected {expected} but got {computed}. "
|
||||
f"File size = {size} bytes. Contents = {contents!r}",
|
||||
)
|
||||
|
||||
|
||||
class NewLayoutException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if directory layout is different from buildcache.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class InvalidMetadataFile(spack.error.SpackError):
|
||||
pass
|
||||
|
||||
|
||||
class UnsignedPackageException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if installation of unsigned package is attempted without
|
||||
the use of ``--no-check-signature``.
|
||||
"""
|
||||
|
||||
|
||||
def compute_hash(data):
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf-8")
|
||||
@@ -910,10 +993,15 @@ def url_read_method(url):
|
||||
if entry.endswith("spec.json") or entry.endswith("spec.json.sig")
|
||||
]
|
||||
read_fn = url_read_method
|
||||
except KeyError as inst:
|
||||
msg = "No packages at {0}: {1}".format(cache_prefix, inst)
|
||||
tty.warn(msg)
|
||||
except Exception as err:
|
||||
# If we got some kind of S3 (access denied or other connection error), the first non
|
||||
# boto-specific class in the exception is Exception. Just print a warning and return
|
||||
tty.warn(f"Encountered problem listing packages at {cache_prefix}: {err}")
|
||||
# If we got some kind of S3 (access denied or other connection
|
||||
# error), the first non boto-specific class in the exception
|
||||
# hierarchy is Exception. Just print a warning and return
|
||||
msg = "Encountered problem listing packages at {0}: {1}".format(cache_prefix, err)
|
||||
tty.warn(msg)
|
||||
|
||||
return file_list, read_fn
|
||||
|
||||
@@ -960,10 +1048,11 @@ def generate_package_index(cache_prefix, concurrency=32):
|
||||
"""
|
||||
try:
|
||||
file_list, read_fn = _spec_files_from_cache(cache_prefix)
|
||||
except ListMirrorSpecsError as e:
|
||||
raise GenerateIndexError(f"Unable to generate package index: {e}") from e
|
||||
except ListMirrorSpecsError as err:
|
||||
tty.error("Unable to generate package index, {0}".format(err))
|
||||
return
|
||||
|
||||
tty.debug(f"Retrieving spec descriptor files from {cache_prefix} to build index")
|
||||
tty.debug("Retrieving spec descriptor files from {0} to build index".format(cache_prefix))
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
|
||||
@@ -973,22 +1062,27 @@ def generate_package_index(cache_prefix, concurrency=32):
|
||||
|
||||
try:
|
||||
_read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
|
||||
except Exception as e:
|
||||
raise GenerateIndexError(
|
||||
f"Encountered problem pushing package index to {cache_prefix}: {e}"
|
||||
) from e
|
||||
except Exception as err:
|
||||
msg = "Encountered problem pushing package index to {0}: {1}".format(cache_prefix, err)
|
||||
tty.warn(msg)
|
||||
tty.debug("\n" + traceback.format_exc())
|
||||
finally:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def generate_key_index(key_prefix, tmpdir=None):
|
||||
"""Create the key index page.
|
||||
|
||||
Creates (or replaces) the "index.json" page at the location given in key_prefix. This page
|
||||
contains an entry for each key (.pub) under key_prefix.
|
||||
Creates (or replaces) the "index.json" page at the location given in
|
||||
key_prefix. This page contains an entry for each key (.pub) under
|
||||
key_prefix.
|
||||
"""
|
||||
|
||||
tty.debug(f"Retrieving key.pub files from {url_util.format(key_prefix)} to build key index")
|
||||
tty.debug(
|
||||
" ".join(
|
||||
("Retrieving key.pub files from", url_util.format(key_prefix), "to build key index")
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
fingerprints = (
|
||||
@@ -996,8 +1090,17 @@ def generate_key_index(key_prefix, tmpdir=None):
|
||||
for entry in web_util.list_url(key_prefix, recursive=False)
|
||||
if entry.endswith(".pub")
|
||||
)
|
||||
except Exception as e:
|
||||
raise CannotListKeys(f"Encountered problem listing keys at {key_prefix}: {e}") from e
|
||||
except KeyError as inst:
|
||||
msg = "No keys at {0}: {1}".format(key_prefix, inst)
|
||||
tty.warn(msg)
|
||||
return
|
||||
except Exception as err:
|
||||
# If we got some kind of S3 (access denied or other connection
|
||||
# error), the first non boto-specific class in the exception
|
||||
# hierarchy is Exception. Just print a warning and return
|
||||
msg = "Encountered problem listing keys at {0}: {1}".format(key_prefix, err)
|
||||
tty.warn(msg)
|
||||
return
|
||||
|
||||
remove_tmpdir = False
|
||||
|
||||
@@ -1022,55 +1125,213 @@ def generate_key_index(key_prefix, tmpdir=None):
|
||||
keep_original=False,
|
||||
extra_args={"ContentType": "application/json"},
|
||||
)
|
||||
except Exception as e:
|
||||
raise GenerateIndexError(
|
||||
f"Encountered problem pushing key index to {key_prefix}: {e}"
|
||||
) from e
|
||||
except Exception as err:
|
||||
msg = "Encountered problem pushing key index to {0}: {1}".format(key_prefix, err)
|
||||
tty.warn(msg)
|
||||
finally:
|
||||
if remove_tmpdir:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def gzip_compressed_tarfile(path):
|
||||
"""Create a reproducible, compressed tarfile"""
|
||||
# Create gzip compressed tarball of the install prefix
|
||||
# 1) Use explicit empty filename and mtime 0 for gzip header reproducibility.
|
||||
# If the filename="" is dropped, Python will use fileobj.name instead.
|
||||
# This should effectively mimick `gzip --no-name`.
|
||||
# 2) On AMD Ryzen 3700X and an SSD disk, we have the following on compression speed:
|
||||
# compresslevel=6 gzip default: llvm takes 4mins, roughly 2.1GB
|
||||
# compresslevel=9 python default: llvm takes 12mins, roughly 2.1GB
|
||||
# So we follow gzip.
|
||||
with open(path, "wb") as f, ChecksumWriter(f) as inner_checksum, closing(
|
||||
GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=inner_checksum)
|
||||
) as gzip_file, ChecksumWriter(gzip_file) as outer_checksum, tarfile.TarFile(
|
||||
name="", mode="w", fileobj=outer_checksum
|
||||
) as tar:
|
||||
yield tar, inner_checksum, outer_checksum
|
||||
|
||||
|
||||
def _tarinfo_name(absolute_path: str, *, _path=pathlib.PurePath) -> str:
|
||||
"""Compute tarfile entry name as the relative path from the (system) root."""
|
||||
return _path(*_path(absolute_path).parts[1:]).as_posix()
|
||||
|
||||
|
||||
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
|
||||
"""Create a tarfile of an install prefix of a spec. Skips existing buildinfo file.
|
||||
Only adds regular files, symlinks and dirs. Skips devices, fifos. Preserves hardlinks.
|
||||
Normalizes permissions like git. Tar entries are added in depth-first pre-order, with
|
||||
dir entries partitioned by file | dir, and sorted alphabetically, for reproducibility.
|
||||
Partitioning ensures only one dir is in memory at a time, and sorting improves compression.
|
||||
|
||||
Args:
|
||||
tar: tarfile object to add files to
|
||||
prefix: absolute install prefix of spec"""
|
||||
if not os.path.isabs(prefix) or not os.path.isdir(prefix):
|
||||
raise ValueError(f"prefix '{prefix}' must be an absolute path to a directory")
|
||||
hardlink_to_tarinfo_name: Dict[Tuple[int, int], str] = dict()
|
||||
stat_key = lambda stat: (stat.st_dev, stat.st_ino)
|
||||
|
||||
try: # skip buildinfo file if it exists
|
||||
files_to_skip = [stat_key(os.lstat(buildinfo_file_name(prefix)))]
|
||||
skip = lambda entry: stat_key(entry.stat(follow_symlinks=False)) in files_to_skip
|
||||
except OSError:
|
||||
skip = lambda entry: False
|
||||
files_to_skip = []
|
||||
|
||||
spack.util.archive.reproducible_tarfile_from_prefix(
|
||||
tar,
|
||||
prefix,
|
||||
# Spack <= 0.21 did not include parent directories, leading to issues when tarballs are
|
||||
# used in runtimes like AWS lambda.
|
||||
include_parent_directories=True,
|
||||
skip=skip,
|
||||
)
|
||||
# First add all directories leading up to `prefix` (Spack <= 0.21 did not do this, leading to
|
||||
# issues when tarballs are used in runtimes like AWS lambda). Skip the file system root.
|
||||
parent_dirs = reversed(pathlib.Path(prefix).parents)
|
||||
next(parent_dirs) # skip the root: slices are supported from python 3.10
|
||||
for parent_dir in parent_dirs:
|
||||
dir_info = tarfile.TarInfo(_tarinfo_name(str(parent_dir)))
|
||||
dir_info.type = tarfile.DIRTYPE
|
||||
dir_info.mode = 0o755
|
||||
tar.addfile(dir_info)
|
||||
|
||||
dir_stack = [prefix]
|
||||
while dir_stack:
|
||||
dir = dir_stack.pop()
|
||||
|
||||
# Add the dir before its contents
|
||||
dir_info = tarfile.TarInfo(_tarinfo_name(dir))
|
||||
dir_info.type = tarfile.DIRTYPE
|
||||
dir_info.mode = 0o755
|
||||
tar.addfile(dir_info)
|
||||
|
||||
# Sort by name: reproducible & improves compression
|
||||
with os.scandir(dir) as it:
|
||||
entries = sorted(it, key=lambda entry: entry.name)
|
||||
|
||||
new_dirs = []
|
||||
for entry in entries:
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
new_dirs.append(entry.path)
|
||||
continue
|
||||
|
||||
file_info = tarfile.TarInfo(_tarinfo_name(entry.path))
|
||||
|
||||
s = entry.stat(follow_symlinks=False)
|
||||
|
||||
# Skip existing binary distribution files.
|
||||
id = stat_key(s)
|
||||
if id in files_to_skip:
|
||||
continue
|
||||
|
||||
# Normalize the mode
|
||||
file_info.mode = 0o644 if s.st_mode & 0o100 == 0 else 0o755
|
||||
|
||||
if entry.is_symlink():
|
||||
file_info.type = tarfile.SYMTYPE
|
||||
file_info.linkname = os.readlink(entry.path)
|
||||
tar.addfile(file_info)
|
||||
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
# Deduplicate hardlinks
|
||||
if s.st_nlink > 1:
|
||||
if id in hardlink_to_tarinfo_name:
|
||||
file_info.type = tarfile.LNKTYPE
|
||||
file_info.linkname = hardlink_to_tarinfo_name[id]
|
||||
tar.addfile(file_info)
|
||||
continue
|
||||
hardlink_to_tarinfo_name[id] = file_info.name
|
||||
|
||||
# If file not yet seen, copy it.
|
||||
file_info.type = tarfile.REGTYPE
|
||||
file_info.size = s.st_size
|
||||
|
||||
with open(entry.path, "rb") as f:
|
||||
tar.addfile(file_info, f)
|
||||
|
||||
dir_stack.extend(reversed(new_dirs)) # we pop, so reverse to stay alphabetical
|
||||
|
||||
|
||||
class ChecksumWriter(io.BufferedIOBase):
|
||||
"""Checksum writer computes a checksum while writing to a file."""
|
||||
|
||||
myfileobj = None
|
||||
|
||||
def __init__(self, fileobj, algorithm=hashlib.sha256):
|
||||
self.fileobj = fileobj
|
||||
self.hasher = algorithm()
|
||||
self.length = 0
|
||||
|
||||
def hexdigest(self):
|
||||
return self.hasher.hexdigest()
|
||||
|
||||
def write(self, data):
|
||||
if isinstance(data, (bytes, bytearray)):
|
||||
length = len(data)
|
||||
else:
|
||||
data = memoryview(data)
|
||||
length = data.nbytes
|
||||
|
||||
if length > 0:
|
||||
self.fileobj.write(data)
|
||||
self.hasher.update(data)
|
||||
|
||||
self.length += length
|
||||
|
||||
return length
|
||||
|
||||
def read(self, size=-1):
|
||||
raise OSError(errno.EBADF, "read() on write-only object")
|
||||
|
||||
def read1(self, size=-1):
|
||||
raise OSError(errno.EBADF, "read1() on write-only object")
|
||||
|
||||
def peek(self, n):
|
||||
raise OSError(errno.EBADF, "peek() on write-only object")
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.fileobj is None
|
||||
|
||||
def close(self):
|
||||
fileobj = self.fileobj
|
||||
if fileobj is None:
|
||||
return
|
||||
self.fileobj.close()
|
||||
self.fileobj = None
|
||||
|
||||
def flush(self):
|
||||
self.fileobj.flush()
|
||||
|
||||
def fileno(self):
|
||||
return self.fileobj.fileno()
|
||||
|
||||
def rewind(self):
|
||||
raise OSError("Can't rewind while computing checksum")
|
||||
|
||||
def readable(self):
|
||||
return False
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
def seekable(self):
|
||||
return True
|
||||
|
||||
def tell(self):
|
||||
return self.fileobj.tell()
|
||||
|
||||
def seek(self, offset, whence=io.SEEK_SET):
|
||||
# In principle forward seek is possible with b"0" padding,
|
||||
# but this is not implemented.
|
||||
if offset == 0 and whence == io.SEEK_CUR:
|
||||
return
|
||||
raise OSError("Can't seek while computing checksum")
|
||||
|
||||
def readline(self, size=-1):
|
||||
raise OSError(errno.EBADF, "readline() on write-only object")
|
||||
|
||||
|
||||
def _do_create_tarball(tarfile_path: str, binaries_dir: str, buildinfo: dict):
|
||||
with spack.util.archive.gzip_compressed_tarfile(tarfile_path) as (
|
||||
tar,
|
||||
inner_checksum,
|
||||
outer_checksum,
|
||||
):
|
||||
with gzip_compressed_tarfile(tarfile_path) as (tar, inner_checksum, outer_checksum):
|
||||
# Tarball the install prefix
|
||||
tarfile_of_spec_prefix(tar, binaries_dir)
|
||||
|
||||
# Serialize buildinfo for the tarball
|
||||
bstring = syaml.dump(buildinfo, default_flow_style=True).encode("utf-8")
|
||||
tarinfo = tarfile.TarInfo(
|
||||
name=spack.util.archive.default_path_to_name(buildinfo_file_name(binaries_dir))
|
||||
)
|
||||
tarinfo = tarfile.TarInfo(name=_tarinfo_name(buildinfo_file_name(binaries_dir)))
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.size = len(bstring)
|
||||
tarinfo.mode = 0o644
|
||||
@@ -1099,8 +1360,7 @@ def push_or_raise(spec: Spec, out_url: str, options: PushOptions):
|
||||
used at the mirror (following <tarball_directory_name>).
|
||||
|
||||
This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or
|
||||
spec.json file already exist in the buildcache. It raises :py:class:`PushToBuildCacheError`
|
||||
when the tarball or spec.json file cannot be pushed to the buildcache.
|
||||
spec.json file already exist in the buildcache.
|
||||
"""
|
||||
if not spec.concrete:
|
||||
raise ValueError("spec must be concrete to build tarball")
|
||||
@@ -1178,18 +1438,13 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
|
||||
key = select_signing_key(options.key)
|
||||
sign_specfile(key, options.force, specfile_path)
|
||||
|
||||
try:
|
||||
# push tarball and signed spec json to remote mirror
|
||||
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
|
||||
web_util.push_to_url(
|
||||
signed_specfile_path if not options.unsigned else specfile_path,
|
||||
remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
|
||||
keep_original=False,
|
||||
)
|
||||
except Exception as e:
|
||||
raise PushToBuildCacheError(
|
||||
f"Encountered problem pushing binary {remote_spackfile_path}: {e}"
|
||||
) from e
|
||||
# push tarball and signed spec json to remote mirror
|
||||
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
|
||||
web_util.push_to_url(
|
||||
signed_specfile_path if not options.unsigned else specfile_path,
|
||||
remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
|
||||
keep_original=False,
|
||||
)
|
||||
|
||||
# push the key to the build cache's _pgp directory so it can be
|
||||
# imported
|
||||
@@ -1201,6 +1456,8 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
|
||||
if options.regenerate_index:
|
||||
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NotInstalledError(spack.error.SpackError):
|
||||
"""Raised when a spec is not installed but picked to be packaged."""
|
||||
@@ -1255,6 +1512,28 @@ def specs_to_be_packaged(
|
||||
return [s for s in itertools.chain(roots, deps) if not s.external]
|
||||
|
||||
|
||||
def push(spec: Spec, mirror_url: str, options: PushOptions):
|
||||
"""Create and push binary package for a single spec to the specified
|
||||
mirror url.
|
||||
|
||||
Args:
|
||||
spec: Spec to package and push
|
||||
mirror_url: Desired destination url for binary package
|
||||
options:
|
||||
|
||||
Returns:
|
||||
True if package was pushed, False otherwise.
|
||||
|
||||
"""
|
||||
try:
|
||||
push_or_raise(spec, mirror_url, options)
|
||||
except NoOverwriteException as e:
|
||||
warnings.warn(str(e))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def try_verify(specfile_path):
|
||||
"""Utility function to attempt to verify a local file. Assumes the
|
||||
file is a clearsigned signature file.
|
||||
@@ -1422,7 +1701,7 @@ def fetch_url_to_mirror(url):
|
||||
response = spack.oci.opener.urlopen(
|
||||
urllib.request.Request(
|
||||
url=ref.manifest_url(),
|
||||
headers={"Accept": ", ".join(spack.oci.oci.manifest_content_type)},
|
||||
headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
@@ -2587,96 +2866,3 @@ def conditional_fetch(self) -> FetchIndexResult:
|
||||
raise FetchIndexError(f"Remote index {url_manifest} is invalid")
|
||||
|
||||
return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False)
|
||||
|
||||
|
||||
class NoOverwriteException(spack.error.SpackError):
|
||||
"""Raised when a file would be overwritten"""
|
||||
|
||||
def __init__(self, file_path):
|
||||
super().__init__(f"Refusing to overwrite the following file: {file_path}")
|
||||
|
||||
|
||||
class NoGpgException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when gpg2 is not in PATH
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class NoKeyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when gpg has no default key added.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class PickKeyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised when multiple keys can be used to sign.
|
||||
"""
|
||||
|
||||
def __init__(self, keys):
|
||||
err_msg = "Multiple keys available for signing\n%s\n" % keys
|
||||
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
|
||||
super().__init__(err_msg)
|
||||
|
||||
|
||||
class NoVerifyException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if file fails signature verification.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoChecksumException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if file fails checksum verification.
|
||||
"""
|
||||
|
||||
def __init__(self, path, size, contents, algorithm, expected, computed):
|
||||
super().__init__(
|
||||
f"{algorithm} checksum failed for {path}",
|
||||
f"Expected {expected} but got {computed}. "
|
||||
f"File size = {size} bytes. Contents = {contents!r}",
|
||||
)
|
||||
|
||||
|
||||
class NewLayoutException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if directory layout is different from buildcache.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class InvalidMetadataFile(spack.error.SpackError):
|
||||
pass
|
||||
|
||||
|
||||
class UnsignedPackageException(spack.error.SpackError):
|
||||
"""
|
||||
Raised if installation of unsigned package is attempted without
|
||||
the use of ``--no-check-signature``.
|
||||
"""
|
||||
|
||||
|
||||
class ListMirrorSpecsError(spack.error.SpackError):
|
||||
"""Raised when unable to retrieve list of specs from the mirror"""
|
||||
|
||||
|
||||
class GenerateIndexError(spack.error.SpackError):
|
||||
"""Raised when unable to generate key or package index for mirror"""
|
||||
|
||||
|
||||
class CannotListKeys(GenerateIndexError):
|
||||
"""Raised when unable to list keys when generating key index"""
|
||||
|
||||
|
||||
class PushToBuildCacheError(spack.error.SpackError):
|
||||
"""Raised when unable to push objects to binary mirror"""
|
||||
|
||||
@@ -213,6 +213,9 @@ def _root_spec(spec_str: str) -> str:
|
||||
platform = str(spack.platforms.host())
|
||||
if platform == "darwin":
|
||||
spec_str += " %apple-clang"
|
||||
elif platform == "windows":
|
||||
# TODO (johnwparent): Remove version constraint when clingo patch is up
|
||||
spec_str += " %msvc@:19.37"
|
||||
elif platform == "linux":
|
||||
spec_str += " %gcc"
|
||||
elif platform == "freebsd":
|
||||
|
||||
@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
|
||||
mixed_toolchain=sys.platform == "darwin"
|
||||
)
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(new_compilers)
|
||||
spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
@@ -542,7 +542,7 @@ def verify_patchelf(patchelf: "spack.util.executable.Executable") -> bool:
|
||||
return version >= spack.version.Version("0.13.1")
|
||||
|
||||
|
||||
def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
|
||||
def ensure_patchelf_in_path_or_raise() -> None:
|
||||
"""Ensure patchelf is in the PATH or raise."""
|
||||
# The old concretizer is not smart and we're doing its job: if the latest patchelf
|
||||
# does not concretize because the compiler doesn't support C++17, we try to
|
||||
|
||||
@@ -146,7 +146,7 @@ def mypy_root_spec() -> str:
|
||||
|
||||
def black_root_spec() -> str:
|
||||
"""Return the root spec used to bootstrap black"""
|
||||
return _root_spec("py-black@:24.1.0")
|
||||
return _root_spec("py-black@:23.1.0")
|
||||
|
||||
|
||||
def flake8_root_spec() -> str:
|
||||
|
||||
@@ -217,9 +217,6 @@ def clean_environment():
|
||||
env.unset("R_HOME")
|
||||
env.unset("R_ENVIRON")
|
||||
|
||||
env.unset("LUA_PATH")
|
||||
env.unset("LUA_CPATH")
|
||||
|
||||
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
|
||||
# env.unset('MAKEFLAGS')
|
||||
|
||||
@@ -555,55 +552,58 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
|
||||
"""
|
||||
module = ModuleChangePropagator(pkg)
|
||||
|
||||
m = module
|
||||
|
||||
if context == Context.BUILD:
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
jobs = determine_number_of_jobs(parallel=pkg.parallel)
|
||||
m.make_jobs = jobs
|
||||
|
||||
jobs = determine_number_of_jobs(parallel=pkg.parallel)
|
||||
module.make_jobs = jobs
|
||||
# TODO: make these build deps that can be installed if not found.
|
||||
m.make = MakeExecutable("make", jobs)
|
||||
m.gmake = MakeExecutable("gmake", jobs)
|
||||
m.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
|
||||
# TODO: johnwparent: add package or builder support to define these build tools
|
||||
# for now there is no entrypoint for builders to define these on their
|
||||
# own
|
||||
if sys.platform == "win32":
|
||||
m.nmake = Executable("nmake")
|
||||
m.msbuild = Executable("msbuild")
|
||||
# analog to configure for win32
|
||||
m.cscript = Executable("cscript")
|
||||
|
||||
# TODO: make these build deps that can be installed if not found.
|
||||
module.make = MakeExecutable("make", jobs)
|
||||
module.gmake = MakeExecutable("gmake", jobs)
|
||||
module.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
|
||||
# TODO: johnwparent: add package or builder support to define these build tools
|
||||
# for now there is no entrypoint for builders to define these on their
|
||||
# own
|
||||
if sys.platform == "win32":
|
||||
module.nmake = Executable("nmake")
|
||||
module.msbuild = Executable("msbuild")
|
||||
# analog to configure for win32
|
||||
module.cscript = Executable("cscript")
|
||||
# Find the configure script in the archive path
|
||||
# Don't use which for this; we want to find it in the current dir.
|
||||
m.configure = Executable("./configure")
|
||||
|
||||
# Find the configure script in the archive path
|
||||
# Don't use which for this; we want to find it in the current dir.
|
||||
module.configure = Executable("./configure")
|
||||
# Standard CMake arguments
|
||||
m.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
m.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
m.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
# Put spack compiler paths in module scope. (Some packages use it
|
||||
# in setup_run_environment etc, so don't put it context == build)
|
||||
link_dir = spack.paths.build_env_path
|
||||
module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
|
||||
module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
|
||||
module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
|
||||
module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
|
||||
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
|
||||
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
|
||||
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
|
||||
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
|
||||
|
||||
# Useful directories within the prefix are encapsulated in
|
||||
# a Prefix object.
|
||||
module.prefix = pkg.prefix
|
||||
m.prefix = pkg.prefix
|
||||
|
||||
# Platform-specific library suffix.
|
||||
module.dso_suffix = dso_suffix
|
||||
m.dso_suffix = dso_suffix
|
||||
|
||||
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
|
||||
compiler_path = kwargs.get("compiler", module.spack_cc)
|
||||
compiler_path = kwargs.get("compiler", m.spack_cc)
|
||||
compiler = Executable(compiler_path)
|
||||
|
||||
return _static_to_shared_library(
|
||||
pkg.spec.architecture, compiler, static_lib, shared_lib, **kwargs
|
||||
)
|
||||
|
||||
module.static_to_shared_library = static_to_shared_library
|
||||
m.static_to_shared_library = static_to_shared_library
|
||||
|
||||
module.propagate_changes_to_mro()
|
||||
|
||||
@@ -789,7 +789,7 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
|
||||
for mod in ["cray-mpich", "cray-libsci"]:
|
||||
module("unload", mod)
|
||||
|
||||
if target and target.module_name:
|
||||
if target.module_name:
|
||||
load_module(target.module_name)
|
||||
|
||||
load_external_modules(pkg)
|
||||
@@ -972,8 +972,8 @@ def __init__(self, *specs: spack.spec.Spec, context: Context) -> None:
|
||||
self.should_set_package_py_globals = (
|
||||
self.should_setup_dependent_build_env | self.should_setup_run_env | UseMode.ROOT
|
||||
)
|
||||
# In a build context, the root needs build-specific globals set.
|
||||
self.needs_build_context = UseMode.ROOT
|
||||
# In a build context, the root and direct build deps need build-specific globals set.
|
||||
self.needs_build_context = UseMode.ROOT | UseMode.BUILDTIME_DIRECT
|
||||
|
||||
def set_all_package_py_globals(self):
|
||||
"""Set the globals in modules of package.py files."""
|
||||
|
||||
@@ -434,6 +434,11 @@ def _do_patch_libtool(self):
|
||||
r"crtendS\.o",
|
||||
]:
|
||||
x.filter(regex=(rehead + o), repl="")
|
||||
elif self.pkg.compiler.name == "dpcpp":
|
||||
# Hack to filter out spurious predep_objects when building with Intel dpcpp
|
||||
# (see https://github.com/spack/spack/issues/32863):
|
||||
x.filter(regex=r"^(predep_objects=.*)/tmp/conftest-[0-9A-Fa-f]+\.o", repl=r"\1")
|
||||
x.filter(regex=r"^(predep_objects=.*)/tmp/a-[0-9A-Fa-f]+\.o", repl=r"\1")
|
||||
elif self.pkg.compiler.name == "nag":
|
||||
for tag in ["fc", "f77"]:
|
||||
marker = markers[tag]
|
||||
@@ -536,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
|
||||
if os.path.exists(self.configure_abs_path):
|
||||
return
|
||||
|
||||
# Else try to regenerate it, which requires a few build dependencies
|
||||
# Else try to regenerate it, which reuquires a few build dependencies
|
||||
ensure_build_dependencies_or_raise(
|
||||
spec=spec,
|
||||
dependencies=["autoconf", "automake", "libtool"],
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import collections.abc
|
||||
import os
|
||||
import re
|
||||
from typing import Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
@@ -16,12 +15,6 @@
|
||||
from .cmake import CMakeBuilder, CMakePackage
|
||||
|
||||
|
||||
def spec_uses_toolchain(spec):
|
||||
gcc_toolchain_regex = re.compile(".*gcc-toolchain.*")
|
||||
using_toolchain = list(filter(gcc_toolchain_regex.match, spec.compiler_flags["cxxflags"]))
|
||||
return using_toolchain
|
||||
|
||||
|
||||
def cmake_cache_path(name, value, comment="", force=False):
|
||||
"""Generate a string for a cmake cache variable"""
|
||||
force_str = " FORCE" if force else ""
|
||||
@@ -139,11 +132,6 @@ def initconfig_compiler_entries(self):
|
||||
"endif()\n",
|
||||
]
|
||||
|
||||
# We defined hipcc as top-level compiler for packages when +rocm.
|
||||
# This avoid problems coming from rocm flags being applied to another compiler.
|
||||
if "+rocm" in spec:
|
||||
entries.insert(0, cmake_cache_path("CMAKE_CXX_COMPILER", self.spec["hip"].hipcc))
|
||||
|
||||
flags = spec.compiler_flags
|
||||
|
||||
# use global spack compiler flags
|
||||
@@ -211,8 +199,6 @@ def initconfig_mpi_entries(self):
|
||||
mpiexec = "/usr/bin/srun"
|
||||
else:
|
||||
mpiexec = os.path.join(spec["slurm"].prefix.bin, "srun")
|
||||
elif hasattr(spec["mpi"].package, "mpiexec"):
|
||||
mpiexec = spec["mpi"].package.mpiexec
|
||||
else:
|
||||
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
|
||||
if not os.path.exists(mpiexec):
|
||||
@@ -225,7 +211,7 @@ def initconfig_mpi_entries(self):
|
||||
else:
|
||||
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
|
||||
# vs the older versions which expect MPIEXEC
|
||||
if spec["cmake"].satisfies("@3.10:"):
|
||||
if self.pkg.spec["cmake"].satisfies("@3.10:"):
|
||||
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
|
||||
else:
|
||||
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
|
||||
@@ -260,17 +246,12 @@ def initconfig_hardware_entries(self):
|
||||
# Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages
|
||||
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
|
||||
|
||||
# CUDA_FLAGS
|
||||
cuda_flags = []
|
||||
|
||||
if not spec.satisfies("cuda_arch=none"):
|
||||
cuda_archs = ";".join(spec.variants["cuda_arch"].value)
|
||||
entries.append(cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", cuda_archs))
|
||||
|
||||
if spec_uses_toolchain(spec):
|
||||
cuda_flags.append("-Xcompiler {}".format(spec_uses_toolchain(spec)[0]))
|
||||
|
||||
entries.append(cmake_cache_string("CMAKE_CUDA_FLAGS", " ".join(cuda_flags)))
|
||||
archs = spec.variants["cuda_arch"].value
|
||||
if archs[0] != "none":
|
||||
arch_str = ";".join(archs)
|
||||
entries.append(
|
||||
cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", "{0}".format(arch_str))
|
||||
)
|
||||
|
||||
if "+rocm" in spec:
|
||||
entries.append("#------------------{0}".format("-" * 30))
|
||||
@@ -279,6 +260,9 @@ def initconfig_hardware_entries(self):
|
||||
|
||||
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
|
||||
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
|
||||
entries.append(
|
||||
cmake_cache_path("HIP_CXX_COMPILER", "{0}".format(self.spec["hip"].hipcc))
|
||||
)
|
||||
llvm_bin = spec["llvm-amdgpu"].prefix.bin
|
||||
llvm_prefix = spec["llvm-amdgpu"].prefix
|
||||
# Some ROCm systems seem to point to /<path>/rocm-<ver>/ and
|
||||
@@ -291,9 +275,11 @@ def initconfig_hardware_entries(self):
|
||||
archs = self.spec.variants["amdgpu_target"].value
|
||||
if archs[0] != "none":
|
||||
arch_str = ";".join(archs)
|
||||
entries.append(cmake_cache_string("CMAKE_HIP_ARCHITECTURES", arch_str))
|
||||
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str))
|
||||
entries.append(cmake_cache_string("GPU_TARGETS", arch_str))
|
||||
entries.append(
|
||||
cmake_cache_string("CMAKE_HIP_ARCHITECTURES", "{0}".format(arch_str))
|
||||
)
|
||||
entries.append(cmake_cache_string("AMDGPU_TARGETS", "{0}".format(arch_str)))
|
||||
entries.append(cmake_cache_string("GPU_TARGETS", "{0}".format(arch_str)))
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
import spack.deptypes as dt
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
@@ -32,86 +31,8 @@ def _extract_primary_generator(generator):
|
||||
primary generator from the generator string which may contain an
|
||||
optional secondary generator.
|
||||
"""
|
||||
return _primary_generator_extractor.match(generator).group(1)
|
||||
|
||||
|
||||
def _maybe_set_python_hints(pkg: spack.package_base.PackageBase, args: List[str]) -> None:
|
||||
"""Set the PYTHON_EXECUTABLE, Python_EXECUTABLE, and Python3_EXECUTABLE CMake variables
|
||||
if the package has Python as build or link dep and ``find_python_hints`` is set to True. See
|
||||
``find_python_hints`` for context."""
|
||||
if not getattr(pkg, "find_python_hints", False):
|
||||
return
|
||||
pythons = pkg.spec.dependencies("python", dt.BUILD | dt.LINK)
|
||||
if len(pythons) != 1:
|
||||
return
|
||||
try:
|
||||
python_executable = pythons[0].package.command.path
|
||||
except RuntimeError:
|
||||
return
|
||||
|
||||
args.extend(
|
||||
[
|
||||
CMakeBuilder.define("PYTHON_EXECUTABLE", python_executable),
|
||||
CMakeBuilder.define("Python_EXECUTABLE", python_executable),
|
||||
CMakeBuilder.define("Python3_EXECUTABLE", python_executable),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def _supports_compilation_databases(pkg: spack.package_base.PackageBase) -> bool:
|
||||
"""Check if this package (and CMake) can support compilation databases."""
|
||||
|
||||
# CMAKE_EXPORT_COMPILE_COMMANDS only exists for CMake >= 3.5
|
||||
if not pkg.spec.satisfies("^cmake@3.5:"):
|
||||
return False
|
||||
|
||||
# CMAKE_EXPORT_COMPILE_COMMANDS is only implemented for Makefile and Ninja generators
|
||||
if not (pkg.spec.satisfies("generator=make") or pkg.spec.satisfies("generator=ninja")):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _conditional_cmake_defaults(pkg: spack.package_base.PackageBase, args: List[str]) -> None:
|
||||
"""Set a few default defines for CMake, depending on its version."""
|
||||
cmakes = pkg.spec.dependencies("cmake", dt.BUILD)
|
||||
|
||||
if len(cmakes) != 1:
|
||||
return
|
||||
|
||||
cmake = cmakes[0]
|
||||
|
||||
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
|
||||
try:
|
||||
ipo = pkg.spec.variants["ipo"].value
|
||||
except KeyError:
|
||||
ipo = False
|
||||
|
||||
if cmake.satisfies("@3.9:"):
|
||||
args.append(CMakeBuilder.define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
|
||||
|
||||
# Disable Package Registry: export(PACKAGE) may put files in the user's home directory, and
|
||||
# find_package may search there. This is not what we want.
|
||||
|
||||
# Do not populate CMake User Package Registry
|
||||
if cmake.satisfies("@3.15:"):
|
||||
# see https://cmake.org/cmake/help/latest/policy/CMP0090.html
|
||||
args.append(CMakeBuilder.define("CMAKE_POLICY_DEFAULT_CMP0090", "NEW"))
|
||||
elif cmake.satisfies("@3.1:"):
|
||||
# see https://cmake.org/cmake/help/latest/variable/CMAKE_EXPORT_NO_PACKAGE_REGISTRY.html
|
||||
args.append(CMakeBuilder.define("CMAKE_EXPORT_NO_PACKAGE_REGISTRY", True))
|
||||
|
||||
# Do not use CMake User/System Package Registry
|
||||
# https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#disabling-the-package-registry
|
||||
if cmake.satisfies("@3.16:"):
|
||||
args.append(CMakeBuilder.define("CMAKE_FIND_USE_PACKAGE_REGISTRY", False))
|
||||
elif cmake.satisfies("@3.1:3.15"):
|
||||
args.append(CMakeBuilder.define("CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY", False))
|
||||
args.append(CMakeBuilder.define("CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY", False))
|
||||
|
||||
# Export a compilation database if supported.
|
||||
if _supports_compilation_databases(pkg):
|
||||
args.append(CMakeBuilder.define("CMAKE_EXPORT_COMPILE_COMMANDS", True))
|
||||
primary_generator = _primary_generator_extractor.match(generator).group(1)
|
||||
return primary_generator
|
||||
|
||||
|
||||
def generator(*names: str, default: Optional[str] = None):
|
||||
@@ -165,13 +86,6 @@ class CMakePackage(spack.package_base.PackageBase):
|
||||
#: Legacy buildsystem attribute used to deserialize and install old specs
|
||||
legacy_buildsystem = "cmake"
|
||||
|
||||
#: When this package depends on Python and ``find_python_hints`` is set to True, pass the
|
||||
#: defines {Python3,Python,PYTHON}_EXECUTABLE explicitly, so that CMake locates the right
|
||||
#: Python in its builtin FindPython3, FindPython, and FindPythonInterp modules. Spack does
|
||||
#: CMake's job because CMake's modules by default only search for Python versions known at the
|
||||
#: time of release.
|
||||
find_python_hints = True
|
||||
|
||||
build_system("cmake")
|
||||
|
||||
with when("build_system=cmake"):
|
||||
@@ -302,10 +216,7 @@ class CMakeBuilder(BaseBuilder):
|
||||
@property
|
||||
def archive_files(self):
|
||||
"""Files to archive for packages based on CMake"""
|
||||
files = [os.path.join(self.build_directory, "CMakeCache.txt")]
|
||||
if _supports_compilation_databases(self):
|
||||
files.append(os.path.join(self.build_directory, "compile_commands.json"))
|
||||
return files
|
||||
return [os.path.join(self.build_directory, "CMakeCache.txt")]
|
||||
|
||||
@property
|
||||
def root_cmakelists_dir(self):
|
||||
@@ -330,9 +241,9 @@ def std_cmake_args(self):
|
||||
"""Standard cmake arguments provided as a property for
|
||||
convenience of package writers
|
||||
"""
|
||||
args = CMakeBuilder.std_args(self.pkg, generator=self.generator)
|
||||
args += getattr(self.pkg, "cmake_flag_args", [])
|
||||
return args
|
||||
std_cmake_args = CMakeBuilder.std_args(self.pkg, generator=self.generator)
|
||||
std_cmake_args += getattr(self.pkg, "cmake_flag_args", [])
|
||||
return std_cmake_args
|
||||
|
||||
@staticmethod
|
||||
def std_args(pkg, generator=None):
|
||||
@@ -352,6 +263,11 @@ def std_args(pkg, generator=None):
|
||||
except KeyError:
|
||||
build_type = "RelWithDebInfo"
|
||||
|
||||
try:
|
||||
ipo = pkg.spec.variants["ipo"].value
|
||||
except KeyError:
|
||||
ipo = False
|
||||
|
||||
define = CMakeBuilder.define
|
||||
args = [
|
||||
"-G",
|
||||
@@ -360,6 +276,10 @@ def std_args(pkg, generator=None):
|
||||
define("CMAKE_BUILD_TYPE", build_type),
|
||||
]
|
||||
|
||||
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
|
||||
if pkg.spec.satisfies("^cmake@3.9:"):
|
||||
args.append(define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
|
||||
|
||||
if primary_generator == "Unix Makefiles":
|
||||
args.append(define("CMAKE_VERBOSE_MAKEFILE", True))
|
||||
|
||||
@@ -368,9 +288,6 @@ def std_args(pkg, generator=None):
|
||||
[define("CMAKE_FIND_FRAMEWORK", "LAST"), define("CMAKE_FIND_APPBUNDLE", "LAST")]
|
||||
)
|
||||
|
||||
_conditional_cmake_defaults(pkg, args)
|
||||
_maybe_set_python_hints(pkg, args)
|
||||
|
||||
# Set up CMake rpath
|
||||
args.extend(
|
||||
[
|
||||
|
||||
@@ -218,7 +218,7 @@ def pset_components(self):
|
||||
"+inspector": " intel-inspector",
|
||||
"+itac": " intel-itac intel-ta intel-tc" " intel-trace-analyzer intel-trace-collector",
|
||||
# Trace Analyzer and Collector
|
||||
"+vtune": " intel-vtune",
|
||||
"+vtune": " intel-vtune"
|
||||
# VTune, ..-profiler since 2020, ..-amplifier before
|
||||
}.items():
|
||||
if variant in self.spec:
|
||||
|
||||
@@ -29,12 +29,15 @@ class LuaPackage(spack.package_base.PackageBase):
|
||||
|
||||
with when("build_system=lua"):
|
||||
depends_on("lua-lang")
|
||||
with when("^[virtuals=lua-lang] lua"):
|
||||
extends("lua")
|
||||
with when("^[virtuals=lua-lang] lua-luajit"):
|
||||
extends("lua-luajit+lualinks")
|
||||
with when("^[virtuals=lua-lang] lua-luajit-openresty"):
|
||||
extends("lua-luajit-openresty+lualinks")
|
||||
extends("lua", when="^lua")
|
||||
with when("^lua-luajit"):
|
||||
extends("lua-luajit")
|
||||
depends_on("luajit")
|
||||
depends_on("lua-luajit+lualinks")
|
||||
with when("^lua-luajit-openresty"):
|
||||
extends("lua-luajit-openresty")
|
||||
depends_on("luajit")
|
||||
depends_on("lua-luajit-openresty+lualinks")
|
||||
|
||||
@property
|
||||
def lua(self):
|
||||
|
||||
@@ -149,7 +149,7 @@ def std_args(pkg):
|
||||
else:
|
||||
default_library = "shared"
|
||||
|
||||
return [
|
||||
args = [
|
||||
"-Dprefix={0}".format(pkg.prefix),
|
||||
# If we do not specify libdir explicitly, Meson chooses something
|
||||
# like lib/x86_64-linux-gnu, which causes problems when trying to
|
||||
@@ -163,6 +163,8 @@ def std_args(pkg):
|
||||
"-Dwrap_mode=nodownload",
|
||||
]
|
||||
|
||||
return args
|
||||
|
||||
@property
|
||||
def build_dirname(self):
|
||||
"""Returns the directory name to use when building the package."""
|
||||
|
||||
@@ -69,7 +69,7 @@ class MSBuildBuilder(BaseBuilder):
|
||||
@property
|
||||
def build_directory(self):
|
||||
"""Return the directory containing the MSBuild solution or vcxproj."""
|
||||
return fs.windows_sfn(self.pkg.stage.source_path)
|
||||
return self.pkg.stage.source_path
|
||||
|
||||
@property
|
||||
def toolchain_version(self):
|
||||
|
||||
@@ -77,11 +77,7 @@ def ignore_quotes(self):
|
||||
@property
|
||||
def build_directory(self):
|
||||
"""Return the directory containing the makefile."""
|
||||
return (
|
||||
fs.windows_sfn(self.pkg.stage.source_path)
|
||||
if not self.makefile_root
|
||||
else fs.windows_sfn(self.makefile_root)
|
||||
)
|
||||
return self.pkg.stage.source_path if not self.makefile_root else self.makefile_root
|
||||
|
||||
@property
|
||||
def std_nmake_args(self):
|
||||
|
||||
@@ -9,13 +9,10 @@
|
||||
import shutil
|
||||
from os.path import basename, isdir
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
|
||||
from llnl.util.filesystem import HeaderList, find_libraries, join_path, mkdirp
|
||||
from llnl.util.link_tree import LinkTree
|
||||
|
||||
from spack.build_environment import dso_suffix
|
||||
from spack.directives import conflicts, license, variant
|
||||
from spack.package_base import InstallError
|
||||
from spack.directives import conflicts, variant
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
from spack.util.executable import Executable
|
||||
|
||||
@@ -26,7 +23,6 @@ class IntelOneApiPackage(Package):
|
||||
"""Base class for Intel oneAPI packages."""
|
||||
|
||||
homepage = "https://software.intel.com/oneapi"
|
||||
license("https://intel.ly/393CijO")
|
||||
|
||||
# oneAPI license does not allow mirroring outside of the
|
||||
# organization (e.g. University/Company).
|
||||
@@ -183,72 +179,16 @@ class IntelOneApiLibraryPackage(IntelOneApiPackage):
|
||||
|
||||
"""
|
||||
|
||||
def openmp_libs(self):
|
||||
"""Supply LibraryList for linking OpenMP"""
|
||||
|
||||
# NB: Hunting down explicit library files may be the Spack way of
|
||||
# doing things, but it is better to add the compiler defined option
|
||||
# e.g. -fopenmp
|
||||
|
||||
# If other packages use openmp, then all the packages need to
|
||||
# support the same ABI. Spack usually uses the same compiler
|
||||
# for all the packages, but you can force it if necessary:
|
||||
#
|
||||
# e.g. spack install blaspp%oneapi@2024 ^intel-oneapi-mkl%oneapi@2024
|
||||
#
|
||||
if self.spec.satisfies("%intel") or self.spec.satisfies("%oneapi"):
|
||||
libname = "libiomp5"
|
||||
elif self.spec.satisfies("%gcc"):
|
||||
libname = "libgomp"
|
||||
elif self.spec.satisfies("%clang"):
|
||||
libname = "libomp"
|
||||
else:
|
||||
raise InstallError(
|
||||
"OneAPI package with OpenMP threading requires one of %clang, %gcc, %oneapi, "
|
||||
"or %intel"
|
||||
)
|
||||
|
||||
# query the compiler for the library path
|
||||
with self.compiler.compiler_environment():
|
||||
omp_lib_path = Executable(self.compiler.cc)(
|
||||
"--print-file-name", f"{libname}.{dso_suffix}", output=str
|
||||
).strip()
|
||||
|
||||
# Newer versions of clang do not give the full path to libomp. If that's
|
||||
# the case, look in a path relative to the compiler where libomp is
|
||||
# typically found. If it's not found there, error out.
|
||||
if not os.path.exists(omp_lib_path) and self.spec.satisfies("%clang"):
|
||||
compiler_root = os.path.dirname(os.path.dirname(os.path.realpath(self.compiler.cc)))
|
||||
omp_lib_path_compiler = os.path.join(compiler_root, "lib", f"{libname}.{dso_suffix}")
|
||||
if os.path.exists(omp_lib_path_compiler):
|
||||
omp_lib_path = omp_lib_path_compiler
|
||||
|
||||
# if the compiler cannot find the file, it returns the input path
|
||||
if not os.path.exists(omp_lib_path):
|
||||
raise InstallError(f"OneAPI package cannot locate OpenMP library: {omp_lib_path}")
|
||||
|
||||
omp_libs = LibraryList(omp_lib_path)
|
||||
tty.info(f"OneAPI package requires OpenMP library: {omp_libs}")
|
||||
return omp_libs
|
||||
|
||||
# find_headers uses heuristics to determine the include directory
|
||||
# that does not work for oneapi packages. Use explicit directories
|
||||
# instead.
|
||||
def header_directories(self, dirs):
|
||||
h = HeaderList([])
|
||||
h.directories = dirs
|
||||
# trilinos passes the directories to cmake, and cmake requires
|
||||
# that the directory exists
|
||||
for dir in dirs:
|
||||
if not isdir(dir):
|
||||
raise RuntimeError(f"{dir} does not exist")
|
||||
return h
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
# This should match the directories added to CPATH by
|
||||
# env/vars.sh for the component
|
||||
return self.header_directories([self.component_prefix.include])
|
||||
return self.header_directories(
|
||||
[self.component_prefix.include, self.component_prefix.include.join(self.component_dir)]
|
||||
)
|
||||
|
||||
@property
|
||||
def libs(self):
|
||||
|
||||
@@ -4,15 +4,12 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import inspect
|
||||
import os
|
||||
from typing import Iterable
|
||||
|
||||
from llnl.util.filesystem import filter_file, find
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.filesystem import filter_file
|
||||
|
||||
import spack.builder
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, extends
|
||||
from spack.install_test import SkipTest, test_part
|
||||
from spack.util.executable import Executable
|
||||
|
||||
from ._checks import BaseBuilder, execute_build_time_tests
|
||||
@@ -31,58 +28,6 @@ class PerlPackage(spack.package_base.PackageBase):
|
||||
|
||||
extends("perl", when="build_system=perl")
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def _platform_dir(self):
|
||||
"""Name of platform-specific module subdirectory."""
|
||||
perl = self.spec["perl"].command
|
||||
options = "-E", "use Config; say $Config{archname}"
|
||||
out = perl(*options, output=str.split, error=str.split)
|
||||
return out.strip()
|
||||
|
||||
@property
|
||||
def use_modules(self) -> Iterable[str]:
|
||||
"""Names of the package's perl modules."""
|
||||
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
|
||||
|
||||
# Drop the platform directory, if present
|
||||
if self._platform_dir:
|
||||
platform_dir = self._platform_dir + os.sep
|
||||
module_files = [m.replace(platform_dir, "") for m in module_files]
|
||||
|
||||
# Drop the extension and library path
|
||||
prefix = self.prefix.lib + os.sep
|
||||
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
|
||||
|
||||
# Drop the perl subdirectory as well
|
||||
return ["::".join(m.split(os.sep)[1:]) for m in modules]
|
||||
|
||||
@property
|
||||
def skip_modules(self) -> Iterable[str]:
|
||||
"""Names of modules that should be skipped when running tests.
|
||||
|
||||
These are a subset of use_modules.
|
||||
|
||||
Returns:
|
||||
List of strings of module names.
|
||||
"""
|
||||
return []
|
||||
|
||||
def test_use(self):
|
||||
"""Test 'use module'"""
|
||||
if not self.use_modules:
|
||||
raise SkipTest("Test requires use_modules package property.")
|
||||
|
||||
perl = self.spec["perl"].command
|
||||
for module in self.use_modules:
|
||||
if module in self.skip_modules:
|
||||
continue
|
||||
|
||||
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
|
||||
options = ["-we", f'use strict; use {module}; print("OK\n")']
|
||||
out = perl(*options, output=str.split, error=str.split)
|
||||
assert "OK" in out
|
||||
|
||||
|
||||
@spack.builder.builder("perl")
|
||||
class PerlBuilder(BaseBuilder):
|
||||
@@ -107,7 +52,7 @@ class PerlBuilder(BaseBuilder):
|
||||
phases = ("configure", "build", "install")
|
||||
|
||||
#: Names associated with package methods in the old build-system format
|
||||
legacy_methods = ("configure_args", "check", "test_use")
|
||||
legacy_methods = ("configure_args", "check")
|
||||
|
||||
#: Names associated with package attributes in the old build-system format
|
||||
legacy_attributes = ()
|
||||
|
||||
@@ -2,15 +2,11 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import stat
|
||||
from typing import Dict, Iterable, List, Mapping, Optional, Tuple
|
||||
from typing import Iterable, List, Mapping, Optional
|
||||
|
||||
import archspec
|
||||
|
||||
@@ -27,7 +23,7 @@
|
||||
import spack.package_base
|
||||
import spack.spec
|
||||
import spack.store
|
||||
from spack.directives import build_system, depends_on, extends
|
||||
from spack.directives import build_system, depends_on, extends, maintainers
|
||||
from spack.error import NoHeadersError, NoLibrariesError
|
||||
from spack.install_test import test_part
|
||||
from spack.spec import Spec
|
||||
@@ -56,6 +52,8 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
|
||||
|
||||
|
||||
class PythonExtension(spack.package_base.PackageBase):
|
||||
maintainers("adamjstewart")
|
||||
|
||||
@property
|
||||
def import_modules(self) -> Iterable[str]:
|
||||
"""Names of modules that the Python package provides.
|
||||
@@ -138,52 +136,31 @@ def view_file_conflicts(self, view, merge_map):
|
||||
return conflicts
|
||||
|
||||
def add_files_to_view(self, view, merge_map, skip_if_exists=True):
|
||||
# Patch up shebangs to the python linked in the view only if python is built by Spack.
|
||||
if not self.extendee_spec or self.extendee_spec.external:
|
||||
if not self.extendee_spec:
|
||||
return super().add_files_to_view(view, merge_map, skip_if_exists)
|
||||
|
||||
# We only patch shebangs in the bin directory.
|
||||
copied_files: Dict[Tuple[int, int], str] = {} # File identifier -> source
|
||||
delayed_links: List[Tuple[str, str]] = [] # List of symlinks from merge map
|
||||
|
||||
bin_dir = self.spec.prefix.bin
|
||||
python_prefix = self.extendee_spec.prefix
|
||||
python_is_external = self.extendee_spec.external
|
||||
global_view = fs.same_path(python_prefix, view.get_projection_for_spec(self.spec))
|
||||
for src, dst in merge_map.items():
|
||||
if skip_if_exists and os.path.lexists(dst):
|
||||
if os.path.exists(dst):
|
||||
continue
|
||||
|
||||
if not fs.path_contains_subdirectory(src, bin_dir):
|
||||
elif global_view or not fs.path_contains_subdirectory(src, bin_dir):
|
||||
view.link(src, dst)
|
||||
continue
|
||||
|
||||
s = os.lstat(src)
|
||||
|
||||
# Symlink is delayed because we may need to re-target if its target is copied in view
|
||||
if stat.S_ISLNK(s.st_mode):
|
||||
delayed_links.append((src, dst))
|
||||
continue
|
||||
|
||||
# If it's executable and has a shebang, copy and patch it.
|
||||
if (s.st_mode & 0b111) and fs.has_shebang(src):
|
||||
copied_files[(s.st_dev, s.st_ino)] = dst
|
||||
elif not os.path.islink(src):
|
||||
shutil.copy2(src, dst)
|
||||
fs.filter_file(
|
||||
python_prefix, os.path.abspath(view.get_projection_for_spec(self.spec)), dst
|
||||
)
|
||||
is_script = fs.is_nonsymlink_exe_with_shebang(src)
|
||||
if is_script and not python_is_external:
|
||||
fs.filter_file(
|
||||
python_prefix,
|
||||
os.path.abspath(view.get_projection_for_spec(self.spec)),
|
||||
dst,
|
||||
)
|
||||
else:
|
||||
view.link(src, dst)
|
||||
|
||||
# Finally re-target the symlinks that point to copied files.
|
||||
for src, dst in delayed_links:
|
||||
try:
|
||||
s = os.stat(src)
|
||||
target = copied_files[(s.st_dev, s.st_ino)]
|
||||
except (OSError, KeyError):
|
||||
target = None
|
||||
if target:
|
||||
os.symlink(os.path.relpath(target, os.path.dirname(dst)), dst)
|
||||
else:
|
||||
view.link(src, dst, spec=self.spec)
|
||||
orig_link_target = os.path.realpath(src)
|
||||
new_link_target = os.path.abspath(merge_map[orig_link_target])
|
||||
view.link(new_link_target, dst)
|
||||
|
||||
def remove_files_from_view(self, view, merge_map):
|
||||
ignore_namespace = False
|
||||
@@ -369,19 +346,16 @@ def headers(self) -> HeaderList:
|
||||
# Remove py- prefix in package name
|
||||
name = self.spec.name[3:]
|
||||
|
||||
# Headers should only be in include or platlib, but no harm in checking purelib too
|
||||
# Headers may be in either location
|
||||
include = self.prefix.join(self.spec["python"].package.include).join(name)
|
||||
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
|
||||
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
|
||||
|
||||
headers_list = map(fs.find_all_headers, [include, platlib, purelib])
|
||||
headers = functools.reduce(operator.add, headers_list)
|
||||
headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)
|
||||
|
||||
if headers:
|
||||
return headers
|
||||
|
||||
msg = "Unable to locate {} headers in {}, {}, or {}"
|
||||
raise NoHeadersError(msg.format(self.spec.name, include, platlib, purelib))
|
||||
msg = "Unable to locate {} headers in {} or {}"
|
||||
raise NoHeadersError(msg.format(self.spec.name, include, platlib))
|
||||
|
||||
@property
|
||||
def libs(self) -> LibraryList:
|
||||
@@ -390,19 +364,15 @@ def libs(self) -> LibraryList:
|
||||
# Remove py- prefix in package name
|
||||
name = self.spec.name[3:]
|
||||
|
||||
# Libraries should only be in platlib, but no harm in checking purelib too
|
||||
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
|
||||
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
|
||||
root = self.prefix.join(self.spec["python"].package.platlib).join(name)
|
||||
|
||||
find_all_libraries = functools.partial(fs.find_all_libraries, recursive=True)
|
||||
libs_list = map(find_all_libraries, [platlib, purelib])
|
||||
libs = functools.reduce(operator.add, libs_list)
|
||||
libs = fs.find_all_libraries(root, recursive=True)
|
||||
|
||||
if libs:
|
||||
return libs
|
||||
|
||||
msg = "Unable to recursively locate {} libraries in {} or {}"
|
||||
raise NoLibrariesError(msg.format(self.spec.name, platlib, purelib))
|
||||
msg = "Unable to recursively locate {} libraries in {}"
|
||||
raise NoLibrariesError(msg.format(self.spec.name, root))
|
||||
|
||||
|
||||
@spack.builder.builder("python_pip")
|
||||
|
||||
@@ -75,8 +75,6 @@
|
||||
# does not like its directory structure.
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
import spack.variant
|
||||
from spack.directives import conflicts, depends_on, variant
|
||||
from spack.package_base import PackageBase
|
||||
@@ -156,32 +154,6 @@ def hip_flags(amdgpu_target):
|
||||
archs = ",".join(amdgpu_target)
|
||||
return "--amdgpu-target={0}".format(archs)
|
||||
|
||||
# ASAN
|
||||
@staticmethod
|
||||
def asan_on(env, llvm_path):
|
||||
env.set("CC", llvm_path + "/bin/clang")
|
||||
env.set("CXX", llvm_path + "/bin/clang++")
|
||||
env.set("ASAN_OPTIONS", "detect_leaks=0")
|
||||
|
||||
for root, dirs, files in os.walk(llvm_path):
|
||||
if "libclang_rt.asan-x86_64.so" in files:
|
||||
asan_lib_path = root
|
||||
env.prepend_path("LD_LIBRARY_PATH", asan_lib_path)
|
||||
SET_DWARF_VERSION_4 = ""
|
||||
try:
|
||||
# This will throw an error if imported on a non-Linux platform.
|
||||
import distro
|
||||
|
||||
distname = distro.id()
|
||||
except ImportError:
|
||||
distname = "unknown"
|
||||
if "rhel" in distname or "sles" in distname:
|
||||
SET_DWARF_VERSION_4 = "-gdwarf-5"
|
||||
|
||||
env.set("CFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
|
||||
env.set("CXXFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
|
||||
env.set("LDFLAGS", "-Wl,--enable-new-dtags -fuse-ld=lld -fsanitize=address -g -Wl,")
|
||||
|
||||
# HIP version vs Architecture
|
||||
|
||||
# TODO: add a bunch of lines like:
|
||||
@@ -190,9 +162,23 @@ def asan_on(env, llvm_path):
|
||||
|
||||
# Add compiler minimum versions based on the first release where the
|
||||
# processor is included in llvm/lib/Support/TargetParser.cpp
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx900:xnack-")
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx906:xnack-")
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx908:xnack-")
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx90c")
|
||||
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a")
|
||||
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack-")
|
||||
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack+")
|
||||
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx940")
|
||||
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx941")
|
||||
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx942")
|
||||
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1013")
|
||||
depends_on("llvm-amdgpu@3.8.0:", when="amdgpu_target=gfx1030")
|
||||
depends_on("llvm-amdgpu@3.9.0:", when="amdgpu_target=gfx1031")
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1032")
|
||||
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1033")
|
||||
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx1034")
|
||||
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1035")
|
||||
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx1036")
|
||||
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1100")
|
||||
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1101")
|
||||
|
||||
@@ -9,8 +9,6 @@
|
||||
import inspect
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from llnl.util import lang
|
||||
|
||||
import spack.build_environment
|
||||
|
||||
#: Builder classes, as registered by the "builder" decorator
|
||||
@@ -233,27 +231,24 @@ def __new__(mcs, name, bases, attr_dict):
|
||||
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
|
||||
staged_callbacks = temporary_stage.callbacks
|
||||
|
||||
# Here we have an adapter from an old-style package. This means there is no
|
||||
# hierarchy of builders, and every callback that had to be combined between
|
||||
# *Package and *Builder has been combined already by _PackageAdapterMeta
|
||||
if name == "Adapter":
|
||||
# We don't have callbacks in this class, move on
|
||||
if not staged_callbacks:
|
||||
continue
|
||||
|
||||
# If we are here we have callbacks. To get a complete list, we accumulate all the
|
||||
# callbacks from base classes, we deduplicate them, then prepend what we have
|
||||
# registered here.
|
||||
# If we are here we have callbacks. To get a complete list, get first what
|
||||
# was attached to parent classes, then prepend what we have registered here.
|
||||
#
|
||||
# The order should be:
|
||||
# 1. Callbacks are registered in order within the same class
|
||||
# 2. Callbacks defined in derived classes precede those defined in base
|
||||
# classes
|
||||
callbacks_from_base = []
|
||||
for base in bases:
|
||||
current_callbacks = getattr(base, temporary_stage.attribute_name, None)
|
||||
if not current_callbacks:
|
||||
continue
|
||||
callbacks_from_base.extend(current_callbacks)
|
||||
callbacks_from_base = list(lang.dedupe(callbacks_from_base))
|
||||
callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
|
||||
if callbacks_from_base:
|
||||
break
|
||||
else:
|
||||
callbacks_from_base = []
|
||||
|
||||
# Set the callbacks in this class and flush the temporary stage
|
||||
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
|
||||
del temporary_stage.callbacks[:]
|
||||
|
||||
@@ -35,9 +35,9 @@ def _misc_cache():
|
||||
|
||||
|
||||
#: Spack's cache for small data
|
||||
MISC_CACHE: Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_misc_cache)
|
||||
)
|
||||
MISC_CACHE: Union[
|
||||
spack.util.file_cache.FileCache, llnl.util.lang.Singleton
|
||||
] = llnl.util.lang.Singleton(_misc_cache)
|
||||
|
||||
|
||||
def fetch_cache_location():
|
||||
@@ -91,6 +91,6 @@ def symlink(self, mirror_ref):
|
||||
|
||||
|
||||
#: Spack's local cache for downloaded source archives
|
||||
FETCH_CACHE: Union[spack.fetch_strategy.FsCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_fetch_cache)
|
||||
)
|
||||
FETCH_CACHE: Union[
|
||||
spack.fetch_strategy.FsCache, llnl.util.lang.Singleton
|
||||
] = llnl.util.lang.Singleton(_fetch_cache)
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
JOB_NAME_FORMAT = (
|
||||
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}"
|
||||
)
|
||||
IS_WINDOWS = sys.platform == "win32"
|
||||
|
||||
spack_gpg = spack.main.SpackCommand("gpg")
|
||||
spack_compiler = spack.main.SpackCommand("compiler")
|
||||
|
||||
@@ -103,7 +103,7 @@ def get_job_name(spec: spack.spec.Spec, build_group: str = ""):
|
||||
job_name = spec.format(JOB_NAME_FORMAT)
|
||||
|
||||
if build_group:
|
||||
job_name = f"{job_name} {build_group}"
|
||||
job_name = "{0} {1}".format(job_name, build_group)
|
||||
|
||||
return job_name[:255]
|
||||
|
||||
@@ -114,7 +114,7 @@ def _remove_reserved_tags(tags):
|
||||
|
||||
|
||||
def _spec_deps_key(s):
|
||||
return f"{s.name}/{s.dag_hash(7)}"
|
||||
return "{0}/{1}".format(s.name, s.dag_hash(7))
|
||||
|
||||
|
||||
def _add_dependency(spec_label, dep_label, deps):
|
||||
@@ -213,7 +213,7 @@ def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisi
|
||||
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True)
|
||||
tty.msg("Checked the following mirrors for binaries:")
|
||||
for m in mirrors.values():
|
||||
tty.msg(f" {m.fetch_url}")
|
||||
tty.msg(" {0}".format(m.fetch_url))
|
||||
|
||||
tty.msg("Staging summary ([x] means a job needs rebuilding):")
|
||||
for stage_index, stage in enumerate(stages):
|
||||
@@ -296,7 +296,7 @@ def append_dep(s, d):
|
||||
for spec in spec_list:
|
||||
for s in spec.traverse(deptype="all"):
|
||||
if s.external:
|
||||
tty.msg(f"Will not stage external pkg: {s}")
|
||||
tty.msg("Will not stage external pkg: {0}".format(s))
|
||||
continue
|
||||
|
||||
skey = _spec_deps_key(s)
|
||||
@@ -305,7 +305,7 @@ def append_dep(s, d):
|
||||
for d in s.dependencies(deptype="all"):
|
||||
dkey = _spec_deps_key(d)
|
||||
if d.external:
|
||||
tty.msg(f"Will not stage external dep: {d}")
|
||||
tty.msg("Will not stage external dep: {0}".format(d))
|
||||
continue
|
||||
|
||||
append_dep(skey, dkey)
|
||||
@@ -374,8 +374,8 @@ def get_stack_changed(env_path, rev1="HEAD^", rev2="HEAD"):
|
||||
|
||||
for path in lines:
|
||||
if ".gitlab-ci.yml" in path or path in env_path:
|
||||
tty.debug(f"env represented by {env_path} changed")
|
||||
tty.debug(f"touched file: {path}")
|
||||
tty.debug("env represented by {0} changed".format(env_path))
|
||||
tty.debug("touched file: {0}".format(path))
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -419,7 +419,7 @@ def get_spec_filter_list(env, affected_pkgs, dependent_traverse_depth=None):
|
||||
all_concrete_specs = env.all_specs()
|
||||
tty.debug("All concrete environment specs:")
|
||||
for s in all_concrete_specs:
|
||||
tty.debug(f" {s.name}/{s.dag_hash()[:7]}")
|
||||
tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
|
||||
affected_pkgs = frozenset(affected_pkgs)
|
||||
env_matches = [s for s in all_concrete_specs if s.name in affected_pkgs]
|
||||
visited = set()
|
||||
@@ -510,7 +510,7 @@ def __is_named(self, section):
|
||||
and if so return the name otherwise return none.
|
||||
"""
|
||||
for _name in self.named_jobs:
|
||||
keys = [f"{_name}-job", f"{_name}-job-remove"]
|
||||
keys = ["{0}-job".format(_name), "{0}-job-remove".format(_name)]
|
||||
if any([key for key in keys if key in section]):
|
||||
return _name
|
||||
|
||||
@@ -525,9 +525,9 @@ def __job_name(name, suffix=""):
|
||||
|
||||
jname = name
|
||||
if suffix:
|
||||
jname = f"{name}-job{suffix}"
|
||||
jname = "{0}-job{1}".format(name, suffix)
|
||||
else:
|
||||
jname = f"{name}-job"
|
||||
jname = "{0}-job".format(name)
|
||||
|
||||
return jname
|
||||
|
||||
@@ -739,7 +739,7 @@ def generate_gitlab_ci_yaml(
|
||||
# Requested to prune untouched packages, but assume we won't do that
|
||||
# unless we're actually in a git repo.
|
||||
rev1, rev2 = get_change_revisions()
|
||||
tty.debug(f"Got following revisions: rev1={rev1}, rev2={rev2}")
|
||||
tty.debug("Got following revisions: rev1={0}, rev2={1}".format(rev1, rev2))
|
||||
if rev1 and rev2:
|
||||
# If the stack file itself did not change, proceed with pruning
|
||||
if not get_stack_changed(env.manifest_path, rev1, rev2):
|
||||
@@ -747,13 +747,13 @@ def generate_gitlab_ci_yaml(
|
||||
affected_pkgs = compute_affected_packages(rev1, rev2)
|
||||
tty.debug("affected pkgs:")
|
||||
for p in affected_pkgs:
|
||||
tty.debug(f" {p}")
|
||||
tty.debug(" {0}".format(p))
|
||||
affected_specs = get_spec_filter_list(
|
||||
env, affected_pkgs, dependent_traverse_depth=dependent_depth
|
||||
)
|
||||
tty.debug("all affected specs:")
|
||||
for s in affected_specs:
|
||||
tty.debug(f" {s.name}/{s.dag_hash()[:7]}")
|
||||
tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
|
||||
|
||||
# Allow overriding --prune-dag cli opt with environment variable
|
||||
prune_dag_override = os.environ.get("SPACK_PRUNE_UP_TO_DATE", None)
|
||||
@@ -978,7 +978,7 @@ def generate_gitlab_ci_yaml(
|
||||
rebuild_decisions = {}
|
||||
|
||||
for stage_jobs in stages:
|
||||
stage_name = f"stage-{stage_id}"
|
||||
stage_name = "stage-{0}".format(stage_id)
|
||||
stage_names.append(stage_name)
|
||||
stage_id += 1
|
||||
|
||||
@@ -1009,7 +1009,7 @@ def generate_gitlab_ci_yaml(
|
||||
job_object = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"]
|
||||
|
||||
if not job_object:
|
||||
tty.warn(f"No match found for {release_spec}, skipping it")
|
||||
tty.warn("No match found for {0}, skipping it".format(release_spec))
|
||||
continue
|
||||
|
||||
if spack_pipeline_type is not None:
|
||||
@@ -1119,7 +1119,7 @@ def main_script_replacements(cmd):
|
||||
|
||||
if artifacts_root:
|
||||
job_object["needs"].append(
|
||||
{"job": generate_job_name, "pipeline": f"{parent_pipeline_id}"}
|
||||
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
|
||||
)
|
||||
|
||||
# Let downstream jobs know whether the spec needed rebuilding, regardless
|
||||
@@ -1185,17 +1185,19 @@ def main_script_replacements(cmd):
|
||||
if spack_pipeline_type == "spack_pull_request":
|
||||
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
|
||||
|
||||
tty.debug(f"{job_id} build jobs generated in {stage_id} stages")
|
||||
tty.debug("{0} build jobs generated in {1} stages".format(job_id, stage_id))
|
||||
|
||||
if job_id > 0:
|
||||
tty.debug(f"The max_needs_job is {max_needs_job}, with {max_length_needs} needs")
|
||||
tty.debug(
|
||||
"The max_needs_job is {0}, with {1} needs".format(max_needs_job, max_length_needs)
|
||||
)
|
||||
|
||||
# Use "all_job_names" to populate the build group for this set
|
||||
if cdash_handler and cdash_handler.auth_token:
|
||||
try:
|
||||
cdash_handler.populate_buildgroup(all_job_names)
|
||||
except (SpackError, HTTPError, URLError) as err:
|
||||
tty.warn(f"Problem populating buildgroup: {err}")
|
||||
tty.warn("Problem populating buildgroup: {0}".format(err))
|
||||
else:
|
||||
tty.warn("Unable to populate buildgroup without CDash credentials")
|
||||
|
||||
@@ -1209,7 +1211,9 @@ def main_script_replacements(cmd):
|
||||
sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"])
|
||||
sync_job["stage"] = "copy"
|
||||
if artifacts_root:
|
||||
sync_job["needs"] = [{"job": generate_job_name, "pipeline": f"{parent_pipeline_id}"}]
|
||||
sync_job["needs"] = [
|
||||
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
|
||||
]
|
||||
|
||||
if "variables" not in sync_job:
|
||||
sync_job["variables"] = {}
|
||||
@@ -1226,7 +1230,6 @@ def main_script_replacements(cmd):
|
||||
# TODO: Remove this condition in Spack 0.23
|
||||
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
|
||||
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
|
||||
sync_job["dependencies"] = []
|
||||
|
||||
output_object["copy"] = sync_job
|
||||
job_id += 1
|
||||
@@ -1345,7 +1348,7 @@ def main_script_replacements(cmd):
|
||||
|
||||
copy_specs_file = os.path.join(
|
||||
copy_specs_dir,
|
||||
f"copy_{spack_stack_name if spack_stack_name else 'rebuilt'}_specs.json",
|
||||
"copy_{}_specs.json".format(spack_stack_name if spack_stack_name else "rebuilt"),
|
||||
)
|
||||
|
||||
with open(copy_specs_file, "w") as fd:
|
||||
@@ -1437,7 +1440,7 @@ def import_signing_key(base64_signing_key):
|
||||
fd.write(decoded_key)
|
||||
|
||||
key_import_output = spack_gpg("trust", sign_key_path, output=str)
|
||||
tty.debug(f"spack gpg trust {sign_key_path}")
|
||||
tty.debug("spack gpg trust {0}".format(sign_key_path))
|
||||
tty.debug(key_import_output)
|
||||
|
||||
# Now print the keys we have for verifying and signing
|
||||
@@ -1463,39 +1466,45 @@ def can_verify_binaries():
|
||||
return len(gpg_util.public_keys()) >= 1
|
||||
|
||||
|
||||
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
|
||||
def _push_mirror_contents(input_spec, sign_binaries, mirror_url):
|
||||
"""Unchecked version of the public API, for easier mocking"""
|
||||
bindist.push_or_raise(
|
||||
spec,
|
||||
spack.mirror.Mirror.from_url(mirror_url).push_url,
|
||||
bindist.PushOptions(force=True, unsigned=not sign_binaries),
|
||||
)
|
||||
unsigned = not sign_binaries
|
||||
tty.debug("Creating buildcache ({0})".format("unsigned" if unsigned else "signed"))
|
||||
push_url = spack.mirror.Mirror.from_url(mirror_url).push_url
|
||||
return bindist.push(input_spec, push_url, bindist.PushOptions(force=True, unsigned=unsigned))
|
||||
|
||||
|
||||
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
|
||||
def push_mirror_contents(input_spec: spack.spec.Spec, mirror_url, sign_binaries):
|
||||
"""Push one or more binary packages to the mirror.
|
||||
|
||||
Arguments:
|
||||
|
||||
spec: Installed spec to push
|
||||
mirror_url: URL of target mirror
|
||||
sign_binaries: If True, spack will attempt to sign binary package before pushing.
|
||||
input_spec(spack.spec.Spec): Installed spec to push
|
||||
mirror_url (str): Base url of target mirror
|
||||
sign_binaries (bool): If True, spack will attempt to sign binary
|
||||
package before pushing.
|
||||
"""
|
||||
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
|
||||
try:
|
||||
_push_to_build_cache(spec, sign_binaries, mirror_url)
|
||||
return True
|
||||
except bindist.PushToBuildCacheError as e:
|
||||
tty.error(str(e))
|
||||
return False
|
||||
except Exception as e:
|
||||
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
|
||||
# exception instead of parsing str(e)...
|
||||
msg = str(e)
|
||||
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
|
||||
tty.error(f"Permission problem writing to {mirror_url}: {msg}")
|
||||
return _push_mirror_contents(input_spec, sign_binaries, mirror_url)
|
||||
except Exception as inst:
|
||||
# If the mirror we're pushing to is on S3 and there's some
|
||||
# permissions problem, for example, we can't just target
|
||||
# that exception type here, since users of the
|
||||
# `spack ci rebuild' may not need or want any dependency
|
||||
# on boto3. So we use the first non-boto exception type
|
||||
# in the heirarchy:
|
||||
# boto3.exceptions.S3UploadFailedError
|
||||
# boto3.exceptions.Boto3Error
|
||||
# Exception
|
||||
# BaseException
|
||||
# object
|
||||
err_msg = "Error msg: {0}".format(inst)
|
||||
if any(x in err_msg for x in ["Access Denied", "InvalidAccessKeyId"]):
|
||||
tty.msg("Permission problem writing to {0}".format(mirror_url))
|
||||
tty.msg(err_msg)
|
||||
return False
|
||||
raise
|
||||
else:
|
||||
raise inst
|
||||
|
||||
|
||||
def remove_other_mirrors(mirrors_to_keep, scope=None):
|
||||
@@ -1522,9 +1531,8 @@ def copy_files_to_artifacts(src, artifacts_dir):
|
||||
try:
|
||||
fs.copy(src, artifacts_dir)
|
||||
except Exception as err:
|
||||
msg = (
|
||||
f"Unable to copy files ({src}) to artifacts {artifacts_dir} due to "
|
||||
f"exception: {str(err)}"
|
||||
msg = ("Unable to copy files ({0}) to artifacts {1} due to " "exception: {2}").format(
|
||||
src, artifacts_dir, str(err)
|
||||
)
|
||||
tty.warn(msg)
|
||||
|
||||
@@ -1540,23 +1548,23 @@ def copy_stage_logs_to_artifacts(job_spec: spack.spec.Spec, job_log_dir: str) ->
|
||||
job_spec: spec associated with spack install log
|
||||
job_log_dir: path into which build log should be copied
|
||||
"""
|
||||
tty.debug(f"job spec: {job_spec}")
|
||||
tty.debug("job spec: {0}".format(job_spec))
|
||||
if not job_spec:
|
||||
msg = f"Cannot copy stage logs: job spec ({job_spec}) is required"
|
||||
tty.error(msg)
|
||||
msg = "Cannot copy stage logs: job spec ({0}) is required"
|
||||
tty.error(msg.format(job_spec))
|
||||
return
|
||||
|
||||
try:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
|
||||
job_pkg = pkg_cls(job_spec)
|
||||
tty.debug(f"job package: {job_pkg}")
|
||||
tty.debug("job package: {0}".format(job_pkg))
|
||||
except AssertionError:
|
||||
msg = f"Cannot copy stage logs: job spec ({job_spec}) must be concrete"
|
||||
tty.error(msg)
|
||||
msg = "Cannot copy stage logs: job spec ({0}) must be concrete"
|
||||
tty.error(msg.format(job_spec))
|
||||
return
|
||||
|
||||
stage_dir = job_pkg.stage.path
|
||||
tty.debug(f"stage dir: {stage_dir}")
|
||||
tty.debug("stage dir: {0}".format(stage_dir))
|
||||
for file in [job_pkg.log_path, job_pkg.env_mods_path, *job_pkg.builder.archive_files]:
|
||||
copy_files_to_artifacts(file, job_log_dir)
|
||||
|
||||
@@ -1569,10 +1577,10 @@ def copy_test_logs_to_artifacts(test_stage, job_test_dir):
|
||||
test_stage (str): test stage path
|
||||
job_test_dir (str): the destination artifacts test directory
|
||||
"""
|
||||
tty.debug(f"test stage: {test_stage}")
|
||||
tty.debug("test stage: {0}".format(test_stage))
|
||||
if not os.path.exists(test_stage):
|
||||
msg = f"Cannot copy test logs: job test stage ({test_stage}) does not exist"
|
||||
tty.error(msg)
|
||||
msg = "Cannot copy test logs: job test stage ({0}) does not exist"
|
||||
tty.error(msg.format(test_stage))
|
||||
return
|
||||
|
||||
copy_files_to_artifacts(os.path.join(test_stage, "*", "*.txt"), job_test_dir)
|
||||
@@ -1587,7 +1595,7 @@ def download_and_extract_artifacts(url, work_dir):
|
||||
url (str): Complete url to artifacts.zip file
|
||||
work_dir (str): Path to destination where artifacts should be extracted
|
||||
"""
|
||||
tty.msg(f"Fetching artifacts from: {url}\n")
|
||||
tty.msg("Fetching artifacts from: {0}\n".format(url))
|
||||
|
||||
headers = {"Content-Type": "application/zip"}
|
||||
|
||||
@@ -1604,7 +1612,7 @@ def download_and_extract_artifacts(url, work_dir):
|
||||
response_code = response.getcode()
|
||||
|
||||
if response_code != 200:
|
||||
msg = f"Error response code ({response_code}) in reproduce_ci_job"
|
||||
msg = "Error response code ({0}) in reproduce_ci_job".format(response_code)
|
||||
raise SpackError(msg)
|
||||
|
||||
artifacts_zip_path = os.path.join(work_dir, "artifacts.zip")
|
||||
@@ -1634,7 +1642,7 @@ def get_spack_info():
|
||||
|
||||
return git_log
|
||||
|
||||
return f"no git repo, use spack {spack.spack_version}"
|
||||
return "no git repo, use spack {0}".format(spack.spack_version)
|
||||
|
||||
|
||||
def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
@@ -1657,8 +1665,8 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
"""
|
||||
# figure out the path to the spack git version being used for the
|
||||
# reproduction
|
||||
print(f"checkout_commit: {checkout_commit}")
|
||||
print(f"merge_commit: {merge_commit}")
|
||||
print("checkout_commit: {0}".format(checkout_commit))
|
||||
print("merge_commit: {0}".format(merge_commit))
|
||||
|
||||
dot_git_path = os.path.join(spack.paths.prefix, ".git")
|
||||
if not os.path.exists(dot_git_path):
|
||||
@@ -1677,14 +1685,14 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
git("log", "-1", checkout_commit, output=str, error=os.devnull, fail_on_error=False)
|
||||
|
||||
if git.returncode != 0:
|
||||
tty.error(f"Missing commit: {checkout_commit}")
|
||||
tty.error("Missing commit: {0}".format(checkout_commit))
|
||||
return False
|
||||
|
||||
if merge_commit:
|
||||
git("log", "-1", merge_commit, output=str, error=os.devnull, fail_on_error=False)
|
||||
|
||||
if git.returncode != 0:
|
||||
tty.error(f"Missing commit: {merge_commit}")
|
||||
tty.error("Missing commit: {0}".format(merge_commit))
|
||||
return False
|
||||
|
||||
# Next attempt to clone your local spack repo into the repro dir
|
||||
@@ -1707,7 +1715,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
)
|
||||
|
||||
if git.returncode != 0:
|
||||
tty.error(f"Unable to checkout {checkout_commit}")
|
||||
tty.error("Unable to checkout {0}".format(checkout_commit))
|
||||
tty.msg(co_out)
|
||||
return False
|
||||
|
||||
@@ -1726,7 +1734,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
)
|
||||
|
||||
if git.returncode != 0:
|
||||
tty.error(f"Unable to merge {merge_commit}")
|
||||
tty.error("Unable to merge {0}".format(merge_commit))
|
||||
tty.msg(merge_out)
|
||||
return False
|
||||
|
||||
@@ -1747,7 +1755,6 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
commands to run to reproduce the build once inside the container.
|
||||
"""
|
||||
work_dir = os.path.realpath(work_dir)
|
||||
platform_script_ext = "ps1" if IS_WINDOWS else "sh"
|
||||
download_and_extract_artifacts(url, work_dir)
|
||||
|
||||
gpg_path = None
|
||||
@@ -1758,13 +1765,13 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
lock_file = fs.find(work_dir, "spack.lock")[0]
|
||||
repro_lock_dir = os.path.dirname(lock_file)
|
||||
|
||||
tty.debug(f"Found lock file in: {repro_lock_dir}")
|
||||
tty.debug("Found lock file in: {0}".format(repro_lock_dir))
|
||||
|
||||
yaml_files = fs.find(work_dir, ["*.yaml", "*.yml"])
|
||||
|
||||
tty.debug("yaml files:")
|
||||
for yaml_file in yaml_files:
|
||||
tty.debug(f" {yaml_file}")
|
||||
tty.debug(" {0}".format(yaml_file))
|
||||
|
||||
pipeline_yaml = None
|
||||
|
||||
@@ -1779,10 +1786,10 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
pipeline_yaml = yaml_obj
|
||||
|
||||
if pipeline_yaml:
|
||||
tty.debug(f"\n{yf} is likely your pipeline file")
|
||||
tty.debug("\n{0} is likely your pipeline file".format(yf))
|
||||
|
||||
relative_concrete_env_dir = pipeline_yaml["variables"]["SPACK_CONCRETE_ENV_DIR"]
|
||||
tty.debug(f"Relative environment path used by cloud job: {relative_concrete_env_dir}")
|
||||
tty.debug("Relative environment path used by cloud job: {0}".format(relative_concrete_env_dir))
|
||||
|
||||
# Using the relative concrete environment path found in the generated
|
||||
# pipeline variable above, copy the spack environment files so they'll
|
||||
@@ -1796,11 +1803,10 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
shutil.copyfile(orig_yaml_path, copy_yaml_path)
|
||||
|
||||
# Find the install script in the unzipped artifacts and make it executable
|
||||
install_script = fs.find(work_dir, f"install.{platform_script_ext}")[0]
|
||||
if not IS_WINDOWS:
|
||||
# pointless on Windows
|
||||
st = os.stat(install_script)
|
||||
os.chmod(install_script, st.st_mode | stat.S_IEXEC)
|
||||
install_script = fs.find(work_dir, "install.sh")[0]
|
||||
st = os.stat(install_script)
|
||||
os.chmod(install_script, st.st_mode | stat.S_IEXEC)
|
||||
|
||||
# Find the repro details file. This just includes some values we wrote
|
||||
# during `spack ci rebuild` to make reproduction easier. E.g. the job
|
||||
# name is written here so we can easily find the configuration of the
|
||||
@@ -1838,7 +1844,7 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
job_image = job_image_elt["name"]
|
||||
else:
|
||||
job_image = job_image_elt
|
||||
tty.msg(f"Job ran with the following image: {job_image}")
|
||||
tty.msg("Job ran with the following image: {0}".format(job_image))
|
||||
|
||||
# Because we found this job was run with a docker image, so we will try
|
||||
# to print a "docker run" command that bind-mounts the directory where
|
||||
@@ -1913,75 +1919,65 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
job_tags = None
|
||||
if "tags" in job_yaml:
|
||||
job_tags = job_yaml["tags"]
|
||||
tty.msg(f"Job ran with the following tags: {job_tags}")
|
||||
tty.msg("Job ran with the following tags: {0}".format(job_tags))
|
||||
|
||||
entrypoint_script = [
|
||||
["git", "config", "--global", "--add", "safe.directory", mount_as_dir],
|
||||
[
|
||||
".",
|
||||
os.path.join(
|
||||
mount_as_dir if job_image else work_dir,
|
||||
f"share/spack/setup-env.{platform_script_ext}",
|
||||
),
|
||||
],
|
||||
[".", os.path.join(mount_as_dir if job_image else work_dir, "share/spack/setup-env.sh")],
|
||||
["spack", "gpg", "trust", mounted_gpg_path if job_image else gpg_path] if gpg_path else [],
|
||||
["spack", "env", "activate", mounted_env_dir if job_image else repro_dir],
|
||||
[
|
||||
(
|
||||
os.path.join(mounted_repro_dir, f"install.{platform_script_ext}")
|
||||
if job_image
|
||||
else install_script
|
||||
)
|
||||
],
|
||||
[os.path.join(mounted_repro_dir, "install.sh") if job_image else install_script],
|
||||
]
|
||||
entry_script = os.path.join(mounted_workdir, f"entrypoint.{platform_script_ext}")
|
||||
|
||||
inst_list = []
|
||||
# Finally, print out some instructions to reproduce the build
|
||||
if job_image:
|
||||
# Allow interactive
|
||||
install_mechanism = (
|
||||
os.path.join(mounted_repro_dir, f"install.{platform_script_ext}")
|
||||
if job_image
|
||||
else install_script
|
||||
entrypoint_script.extend(
|
||||
[
|
||||
[
|
||||
"echo",
|
||||
"Re-run install script using:\n\t{0}".format(
|
||||
os.path.join(mounted_repro_dir, "install.sh")
|
||||
if job_image
|
||||
else install_script
|
||||
),
|
||||
],
|
||||
# Allow interactive
|
||||
["exec", "$@"],
|
||||
]
|
||||
)
|
||||
entrypoint_script.append(["echo", f"Re-run install script using:\n\t{install_mechanism}"])
|
||||
# Allow interactive
|
||||
if IS_WINDOWS:
|
||||
entrypoint_script.append(["&", "($args -Join ' ')", "-NoExit"])
|
||||
else:
|
||||
entrypoint_script.append(["exec", "$@"])
|
||||
|
||||
process_command(
|
||||
"entrypoint", entrypoint_script, work_dir, run=False, exit_on_failure=False
|
||||
)
|
||||
|
||||
docker_command = [
|
||||
runtime,
|
||||
"run",
|
||||
"-i",
|
||||
"-t",
|
||||
"--rm",
|
||||
"--name",
|
||||
"spack_reproducer",
|
||||
"-v",
|
||||
":".join([work_dir, mounted_workdir, "Z"]),
|
||||
"-v",
|
||||
":".join(
|
||||
[
|
||||
os.path.join(work_dir, "jobs_scratch_dir"),
|
||||
os.path.join(mount_as_dir, "jobs_scratch_dir"),
|
||||
"Z",
|
||||
]
|
||||
),
|
||||
"-v",
|
||||
":".join([os.path.join(work_dir, "spack"), mount_as_dir, "Z"]),
|
||||
"--entrypoint",
|
||||
[
|
||||
runtime,
|
||||
"run",
|
||||
"-i",
|
||||
"-t",
|
||||
"--rm",
|
||||
"--name",
|
||||
"spack_reproducer",
|
||||
"-v",
|
||||
":".join([work_dir, mounted_workdir, "Z"]),
|
||||
"-v",
|
||||
":".join(
|
||||
[
|
||||
os.path.join(work_dir, "jobs_scratch_dir"),
|
||||
os.path.join(mount_as_dir, "jobs_scratch_dir"),
|
||||
"Z",
|
||||
]
|
||||
),
|
||||
"-v",
|
||||
":".join([os.path.join(work_dir, "spack"), mount_as_dir, "Z"]),
|
||||
"--entrypoint",
|
||||
os.path.join(mounted_workdir, "entrypoint.sh"),
|
||||
job_image,
|
||||
"bash",
|
||||
]
|
||||
]
|
||||
if IS_WINDOWS:
|
||||
docker_command.extend(["powershell.exe", job_image, entry_script, "powershell.exe"])
|
||||
else:
|
||||
docker_command.extend([entry_script, job_image, "bash"])
|
||||
docker_command = [docker_command]
|
||||
autostart = autostart and setup_result
|
||||
process_command("start", docker_command, work_dir, run=autostart)
|
||||
|
||||
@@ -1990,26 +1986,22 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
|
||||
inst_list.extend(
|
||||
[
|
||||
" - Start the docker container install",
|
||||
f" $ {work_dir}/start.{platform_script_ext}",
|
||||
" $ {0}/start.sh".format(work_dir),
|
||||
]
|
||||
)
|
||||
else:
|
||||
autostart = autostart and setup_result
|
||||
process_command("reproducer", entrypoint_script, work_dir, run=autostart)
|
||||
process_command("reproducer", entrypoint_script, work_dir, run=False)
|
||||
|
||||
inst_list.append("\nOnce on the tagged runner:\n\n")
|
||||
inst_list.extent(
|
||||
[
|
||||
" - Run the reproducer script",
|
||||
f" $ {work_dir}/reproducer.{platform_script_ext}",
|
||||
]
|
||||
[" - Run the reproducer script", " $ {0}/reproducer.sh".format(work_dir)]
|
||||
)
|
||||
|
||||
if not setup_result:
|
||||
inst_list.append("\n - Clone spack and acquire tested commit")
|
||||
inst_list.append(f"\n {spack_info}\n")
|
||||
inst_list.append("\n {0}\n".format(spack_info))
|
||||
inst_list.append("\n")
|
||||
inst_list.append(f"\n Path to clone spack: {work_dir}/spack\n\n")
|
||||
inst_list.append("\n Path to clone spack: {0}/spack\n\n".format(work_dir))
|
||||
|
||||
tty.msg("".join(inst_list))
|
||||
|
||||
@@ -2028,78 +2020,50 @@ def process_command(name, commands, repro_dir, run=True, exit_on_failure=True):
|
||||
|
||||
Returns: the exit code from processing the command
|
||||
"""
|
||||
tty.debug("spack {0} arguments: {1}".format(name, commands))
|
||||
|
||||
tty.debug(f"spack {name} arguments: {commands}")
|
||||
if len(commands) == 0 or isinstance(commands[0], str):
|
||||
commands = [commands]
|
||||
|
||||
def compose_command_err_handling(args):
|
||||
if not IS_WINDOWS:
|
||||
args = [f'"{arg}"' for arg in args]
|
||||
arg_str = " ".join(args)
|
||||
result = arg_str + "\n"
|
||||
# ErrorActionPreference will handle PWSH commandlets (Spack calls),
|
||||
# but we need to handle EXEs (git, etc) ourselves
|
||||
catch_exe_failure = (
|
||||
"""
|
||||
if ($LASTEXITCODE -ne 0){
|
||||
throw "Command {} has failed"
|
||||
}
|
||||
"""
|
||||
if IS_WINDOWS
|
||||
else ""
|
||||
)
|
||||
if exit_on_failure and catch_exe_failure:
|
||||
result += catch_exe_failure.format(arg_str)
|
||||
return result
|
||||
|
||||
# Create a string [command 1] \n [command 2] \n ... \n [command n] with
|
||||
# commands composed into a platform dependent shell script, pwsh on Windows,
|
||||
full_command = "\n".join(map(compose_command_err_handling, commands))
|
||||
# Write the command to a python script
|
||||
if IS_WINDOWS:
|
||||
script = f"{name}.ps1"
|
||||
script_content = [f"\n# spack {name} command\n"]
|
||||
if exit_on_failure:
|
||||
script_content.append('$ErrorActionPreference = "Stop"\n')
|
||||
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
|
||||
script_content.append("Set-PSDebug -Trace 2\n")
|
||||
else:
|
||||
script = f"{name}.sh"
|
||||
script_content = ["#!/bin/sh\n\n", f"\n# spack {name} command\n"]
|
||||
if exit_on_failure:
|
||||
script_content.append("set -e\n")
|
||||
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
|
||||
script_content.append("set -x\n")
|
||||
script_content.append(full_command)
|
||||
script_content.append("\n")
|
||||
# Create a string [command 1] && [command 2] && ... && [command n] with commands
|
||||
# quoted using double quotes.
|
||||
args_to_string = lambda args: " ".join('"{}"'.format(arg) for arg in args)
|
||||
full_command = " \n ".join(map(args_to_string, commands))
|
||||
|
||||
# Write the command to a shell script
|
||||
script = "{0}.sh".format(name)
|
||||
with open(script, "w") as fd:
|
||||
for line in script_content:
|
||||
fd.write(line)
|
||||
fd.write("#!/bin/sh\n\n")
|
||||
fd.write("\n# spack {0} command\n".format(name))
|
||||
if exit_on_failure:
|
||||
fd.write("set -e\n")
|
||||
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
|
||||
fd.write("set -x\n")
|
||||
fd.write(full_command)
|
||||
fd.write("\n")
|
||||
|
||||
st = os.stat(script)
|
||||
os.chmod(script, st.st_mode | stat.S_IEXEC)
|
||||
|
||||
copy_path = os.path.join(repro_dir, script)
|
||||
shutil.copyfile(script, copy_path)
|
||||
if not IS_WINDOWS:
|
||||
st = os.stat(copy_path)
|
||||
os.chmod(copy_path, st.st_mode | stat.S_IEXEC)
|
||||
st = os.stat(copy_path)
|
||||
os.chmod(copy_path, st.st_mode | stat.S_IEXEC)
|
||||
|
||||
# Run the generated shell script as if it were being run in
|
||||
# Run the generated install.sh shell script as if it were being run in
|
||||
# a login shell.
|
||||
exit_code = None
|
||||
if run:
|
||||
try:
|
||||
# We use sh as executor on Linux like platforms, pwsh on Windows
|
||||
interpreter = "powershell.exe" if IS_WINDOWS else "/bin/sh"
|
||||
cmd_process = subprocess.Popen([interpreter, f"./{script}"])
|
||||
cmd_process = subprocess.Popen(["/bin/sh", "./{0}".format(script)])
|
||||
cmd_process.wait()
|
||||
exit_code = cmd_process.returncode
|
||||
except (ValueError, subprocess.CalledProcessError, OSError) as err:
|
||||
tty.error(f"Encountered error running {name} script")
|
||||
tty.error("Encountered error running {0} script".format(name))
|
||||
tty.error(err)
|
||||
exit_code = 1
|
||||
|
||||
tty.debug(f"spack {name} exited {exit_code}")
|
||||
tty.debug("spack {0} exited {1}".format(name, exit_code))
|
||||
else:
|
||||
# Delete the script, it is copied to the destination dir
|
||||
os.remove(script)
|
||||
@@ -2124,7 +2088,7 @@ def create_buildcache(
|
||||
for mirror_url in destination_mirror_urls:
|
||||
results.append(
|
||||
PushResult(
|
||||
success=push_to_build_cache(input_spec, mirror_url, sign_binaries), url=mirror_url
|
||||
success=push_mirror_contents(input_spec, mirror_url, sign_binaries), url=mirror_url
|
||||
)
|
||||
)
|
||||
|
||||
@@ -2158,7 +2122,7 @@ def write_broken_spec(url, pkg_name, stack_name, job_url, pipeline_url, spec_dic
|
||||
# If there is an S3 error (e.g., access denied or connection
|
||||
# error), the first non boto-specific class in the exception
|
||||
# hierarchy is Exception. Just print a warning and return
|
||||
msg = f"Error writing to broken specs list {url}: {err}"
|
||||
msg = "Error writing to broken specs list {0}: {1}".format(url, err)
|
||||
tty.warn(msg)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
@@ -2171,7 +2135,7 @@ def read_broken_spec(broken_spec_url):
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(broken_spec_url)
|
||||
except (URLError, web_util.SpackWebError, HTTPError):
|
||||
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
|
||||
tty.warn("Unable to read broken spec from {0}".format(broken_spec_url))
|
||||
return None
|
||||
|
||||
broken_spec_contents = codecs.getreader("utf-8")(fs).read()
|
||||
@@ -2186,14 +2150,14 @@ def display_broken_spec_messages(base_url, hashes):
|
||||
for spec_hash, broken_spec in [tup for tup in broken_specs if tup[1]]:
|
||||
details = broken_spec["broken-spec"]
|
||||
if "job-name" in details:
|
||||
item_name = f"{details['job-name']}/{spec_hash[:7]}"
|
||||
item_name = "{0}/{1}".format(details["job-name"], spec_hash[:7])
|
||||
else:
|
||||
item_name = spec_hash
|
||||
|
||||
if "job-stack" in details:
|
||||
item_name = f"{item_name} (in stack {details['job-stack']})"
|
||||
item_name = "{0} (in stack {1})".format(item_name, details["job-stack"])
|
||||
|
||||
msg = f" {item_name} was reported broken here: {details['job-url']}"
|
||||
msg = " {0} was reported broken here: {1}".format(item_name, details["job-url"])
|
||||
tty.msg(msg)
|
||||
|
||||
|
||||
@@ -2216,7 +2180,7 @@ def run_standalone_tests(**kwargs):
|
||||
log_file = kwargs.get("log_file")
|
||||
|
||||
if cdash and log_file:
|
||||
tty.msg(f"The test log file {log_file} option is ignored with CDash reporting")
|
||||
tty.msg("The test log file {0} option is ignored with CDash reporting".format(log_file))
|
||||
log_file = None
|
||||
|
||||
# Error out but do NOT terminate if there are missing required arguments.
|
||||
@@ -2242,10 +2206,10 @@ def run_standalone_tests(**kwargs):
|
||||
test_args.extend(["--log-file", log_file])
|
||||
test_args.append(job_spec.name)
|
||||
|
||||
tty.debug(f"Running {job_spec.name} stand-alone tests")
|
||||
tty.debug("Running {0} stand-alone tests".format(job_spec.name))
|
||||
exit_code = process_command("test", test_args, repro_dir)
|
||||
|
||||
tty.debug(f"spack test exited {exit_code}")
|
||||
tty.debug("spack test exited {0}".format(exit_code))
|
||||
|
||||
|
||||
class CDashHandler:
|
||||
@@ -2268,7 +2232,7 @@ def __init__(self, ci_cdash):
|
||||
# append runner description to the site if available
|
||||
runner = os.environ.get("CI_RUNNER_DESCRIPTION")
|
||||
if runner:
|
||||
self.site += f" ({runner})"
|
||||
self.site += " ({0})".format(runner)
|
||||
|
||||
# track current spec, if any
|
||||
self.current_spec = None
|
||||
@@ -2296,13 +2260,21 @@ def build_name(self):
|
||||
Returns: (str) current spec's CDash build name."""
|
||||
spec = self.current_spec
|
||||
if spec:
|
||||
build_name = f"{spec.name}@{spec.version}%{spec.compiler} \
|
||||
hash={spec.dag_hash()} arch={spec.architecture} ({self.build_group})"
|
||||
tty.debug(f"Generated CDash build name ({build_name}) from the {spec.name}")
|
||||
build_name = "{0}@{1}%{2} hash={3} arch={4} ({5})".format(
|
||||
spec.name,
|
||||
spec.version,
|
||||
spec.compiler,
|
||||
spec.dag_hash(),
|
||||
spec.architecture,
|
||||
self.build_group,
|
||||
)
|
||||
tty.debug(
|
||||
"Generated CDash build name ({0}) from the {1}".format(build_name, spec.name)
|
||||
)
|
||||
return build_name
|
||||
|
||||
build_name = os.environ.get("SPACK_CDASH_BUILD_NAME")
|
||||
tty.debug(f"Using CDash build name ({build_name}) from the environment")
|
||||
tty.debug("Using CDash build name ({0}) from the environment".format(build_name))
|
||||
return build_name
|
||||
|
||||
@property # type: ignore
|
||||
@@ -2316,25 +2288,25 @@ def build_stamp(self):
|
||||
Returns: (str) current CDash build stamp"""
|
||||
build_stamp = os.environ.get("SPACK_CDASH_BUILD_STAMP")
|
||||
if build_stamp:
|
||||
tty.debug(f"Using build stamp ({build_stamp}) from the environment")
|
||||
tty.debug("Using build stamp ({0}) from the environment".format(build_stamp))
|
||||
return build_stamp
|
||||
|
||||
build_stamp = cdash_build_stamp(self.build_group, time.time())
|
||||
tty.debug(f"Generated new build stamp ({build_stamp})")
|
||||
tty.debug("Generated new build stamp ({0})".format(build_stamp))
|
||||
return build_stamp
|
||||
|
||||
@property # type: ignore
|
||||
@memoized
|
||||
def project_enc(self):
|
||||
tty.debug(f"Encoding project ({type(self.project)}): {self.project})")
|
||||
tty.debug("Encoding project ({0}): {1})".format(type(self.project), self.project))
|
||||
encode = urlencode({"project": self.project})
|
||||
index = encode.find("=") + 1
|
||||
return encode[index:]
|
||||
|
||||
@property
|
||||
def upload_url(self):
|
||||
url_format = f"{self.url}/submit.php?project={self.project_enc}"
|
||||
return url_format
|
||||
url_format = "{0}/submit.php?project={1}"
|
||||
return url_format.format(self.url, self.project_enc)
|
||||
|
||||
def copy_test_results(self, source, dest):
|
||||
"""Copy test results to artifacts directory."""
|
||||
@@ -2352,7 +2324,7 @@ def create_buildgroup(self, opener, headers, url, group_name, group_type):
|
||||
response_code = response.getcode()
|
||||
|
||||
if response_code not in [200, 201]:
|
||||
msg = f"Creating buildgroup failed (response code = {response_code})"
|
||||
msg = "Creating buildgroup failed (response code = {0})".format(response_code)
|
||||
tty.warn(msg)
|
||||
return None
|
||||
|
||||
@@ -2363,10 +2335,10 @@ def create_buildgroup(self, opener, headers, url, group_name, group_type):
|
||||
return build_group_id
|
||||
|
||||
def populate_buildgroup(self, job_names):
|
||||
url = f"{self.url}/api/v1/buildgroup.php"
|
||||
url = "{0}/api/v1/buildgroup.php".format(self.url)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.auth_token}",
|
||||
"Authorization": "Bearer {0}".format(self.auth_token),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
@@ -2374,11 +2346,11 @@ def populate_buildgroup(self, job_names):
|
||||
|
||||
parent_group_id = self.create_buildgroup(opener, headers, url, self.build_group, "Daily")
|
||||
group_id = self.create_buildgroup(
|
||||
opener, headers, url, f"Latest {self.build_group}", "Latest"
|
||||
opener, headers, url, "Latest {0}".format(self.build_group), "Latest"
|
||||
)
|
||||
|
||||
if not parent_group_id or not group_id:
|
||||
msg = f"Failed to create or retrieve buildgroups for {self.build_group}"
|
||||
msg = "Failed to create or retrieve buildgroups for {0}".format(self.build_group)
|
||||
tty.warn(msg)
|
||||
return
|
||||
|
||||
@@ -2398,7 +2370,7 @@ def populate_buildgroup(self, job_names):
|
||||
response_code = response.getcode()
|
||||
|
||||
if response_code != 200:
|
||||
msg = f"Error response code ({response_code}) in populate_buildgroup"
|
||||
msg = "Error response code ({0}) in populate_buildgroup".format(response_code)
|
||||
tty.warn(msg)
|
||||
|
||||
def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optional[str]):
|
||||
|
||||
@@ -7,7 +7,9 @@
|
||||
get_job_name = lambda needs_entry: (
|
||||
needs_entry.get("job")
|
||||
if (isinstance(needs_entry, collections.abc.Mapping) and needs_entry.get("artifacts", True))
|
||||
else needs_entry if isinstance(needs_entry, str) else None
|
||||
else needs_entry
|
||||
if isinstance(needs_entry, str)
|
||||
else None
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -7,14 +7,13 @@
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import multiprocessing
|
||||
import multiprocessing.pool
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import urllib.request
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.string import plural
|
||||
@@ -275,37 +274,23 @@ def setup_parser(subparser: argparse.ArgumentParser):
|
||||
|
||||
# Sync buildcache entries from one mirror to another
|
||||
sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
|
||||
|
||||
sync_manifest_source = sync.add_argument_group(
|
||||
"Manifest Source",
|
||||
"Specify a list of build cache objects to sync using manifest file(s)."
|
||||
'This option takes the place of the "source mirror" for synchronization'
|
||||
'and optionally takes a "destination mirror" ',
|
||||
sync.add_argument(
|
||||
"--manifest-glob", help="a quoted glob pattern identifying copy manifest files"
|
||||
)
|
||||
sync_manifest_source.add_argument(
|
||||
"--manifest-glob", help="a quoted glob pattern identifying CI rebuild manifest files"
|
||||
)
|
||||
sync_source_mirror = sync.add_argument_group(
|
||||
"Named Source",
|
||||
"Specify a single registered source mirror to synchronize from. This option requires"
|
||||
"the specification of a destination mirror.",
|
||||
)
|
||||
sync_source_mirror.add_argument(
|
||||
sync.add_argument(
|
||||
"src_mirror",
|
||||
metavar="source mirror",
|
||||
nargs="?",
|
||||
type=arguments.mirror_name_or_url,
|
||||
nargs="?",
|
||||
help="source mirror name, path, or URL",
|
||||
)
|
||||
|
||||
sync.add_argument(
|
||||
"dest_mirror",
|
||||
metavar="destination mirror",
|
||||
nargs="?",
|
||||
type=arguments.mirror_name_or_url,
|
||||
nargs="?",
|
||||
help="destination mirror name, path, or URL",
|
||||
)
|
||||
|
||||
sync.set_defaults(func=sync_fn)
|
||||
|
||||
# Update buildcache index without copying any additional packages
|
||||
@@ -341,30 +326,8 @@ def _progress(i: int, total: int):
|
||||
return ""
|
||||
|
||||
|
||||
class NoPool:
|
||||
def map(self, func, args):
|
||||
return [func(a) for a in args]
|
||||
|
||||
def starmap(self, func, args):
|
||||
return [func(*a) for a in args]
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
MaybePool = Union[multiprocessing.pool.Pool, NoPool]
|
||||
|
||||
|
||||
def _make_pool() -> MaybePool:
|
||||
"""Can't use threading because it's unsafe, and can't use spawned processes because of globals.
|
||||
That leaves only forking"""
|
||||
if multiprocessing.get_start_method() == "fork":
|
||||
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
|
||||
else:
|
||||
return NoPool()
|
||||
def _make_pool():
|
||||
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
|
||||
|
||||
|
||||
def push_fn(args):
|
||||
@@ -608,15 +571,6 @@ def _put_manifest(
|
||||
base_manifest, base_config = base_images[architecture]
|
||||
env = _retrieve_env_dict_from_config(base_config)
|
||||
|
||||
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
|
||||
# This is because Singularity / Apptainer is very strict about not mixing them.
|
||||
base_manifest_mediaType = base_manifest.get(
|
||||
"mediaType", "application/vnd.oci.image.manifest.v1+json"
|
||||
)
|
||||
use_docker_format = (
|
||||
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
|
||||
)
|
||||
|
||||
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
|
||||
|
||||
# Create an oci.image.config file
|
||||
@@ -648,8 +602,8 @@ def _put_manifest(
|
||||
# Upload the config file
|
||||
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
|
||||
|
||||
manifest = {
|
||||
"mediaType": base_manifest_mediaType,
|
||||
oci_manifest = {
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": base_manifest["config"]["mediaType"],
|
||||
@@ -660,11 +614,7 @@ def _put_manifest(
|
||||
*(layer for layer in base_manifest["layers"]),
|
||||
*(
|
||||
{
|
||||
"mediaType": (
|
||||
"application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
if use_docker_format
|
||||
else "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
),
|
||||
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
"digest": str(checksums[s.dag_hash()].compressed_digest),
|
||||
"size": checksums[s.dag_hash()].size,
|
||||
}
|
||||
@@ -673,11 +623,11 @@ def _put_manifest(
|
||||
],
|
||||
}
|
||||
|
||||
if not use_docker_format and annotations:
|
||||
manifest["annotations"] = annotations
|
||||
if annotations:
|
||||
oci_manifest["annotations"] = annotations
|
||||
|
||||
# Finally upload the manifest
|
||||
upload_manifest_with_retry(image_ref, manifest=manifest)
|
||||
upload_manifest_with_retry(image_ref, oci_manifest=oci_manifest)
|
||||
|
||||
# delete the config file
|
||||
os.unlink(config_file)
|
||||
@@ -713,7 +663,7 @@ def _push_oci(
|
||||
base_image: Optional[ImageReference],
|
||||
installed_specs_with_deps: List[Spec],
|
||||
tmpdir: str,
|
||||
pool: MaybePool,
|
||||
pool: multiprocessing.pool.Pool,
|
||||
force: bool = False,
|
||||
) -> Tuple[List[str], Dict[str, Tuple[dict, dict]], Dict[str, spack.oci.oci.Blob]]:
|
||||
"""Push specs to an OCI registry
|
||||
@@ -829,10 +779,11 @@ def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
|
||||
return config if "spec" in config else None
|
||||
|
||||
|
||||
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
|
||||
request = urllib.request.Request(url=image_ref.tags_url())
|
||||
response = spack.oci.opener.urlopen(request)
|
||||
spack.oci.opener.ensure_status(request, response, 200)
|
||||
def _update_index_oci(
|
||||
image_ref: ImageReference, tmpdir: str, pool: multiprocessing.pool.Pool
|
||||
) -> None:
|
||||
response = spack.oci.opener.urlopen(urllib.request.Request(url=image_ref.tags_url()))
|
||||
spack.oci.opener.ensure_status(response, 200)
|
||||
tags = json.load(response)["tags"]
|
||||
|
||||
# Fetch all image config files in parallel
|
||||
@@ -1084,17 +1035,7 @@ def sync_fn(args):
|
||||
requires an active environment in order to know which specs to sync
|
||||
"""
|
||||
if args.manifest_glob:
|
||||
# Passing the args.src_mirror here because it is not possible to
|
||||
# have the destination be required when specifying a named source
|
||||
# mirror and optional for the --manifest-glob argument. In the case
|
||||
# of manifest glob sync, the source mirror positional argument is the
|
||||
# destination mirror if it is specified. If there are two mirrors
|
||||
# specified, the second is ignored and the first is the override
|
||||
# destination.
|
||||
if args.dest_mirror:
|
||||
tty.warn(f"Ignoring unused arguemnt: {args.dest_mirror.name}")
|
||||
|
||||
manifest_copy(glob.glob(args.manifest_glob), args.src_mirror)
|
||||
manifest_copy(glob.glob(args.manifest_glob))
|
||||
return 0
|
||||
|
||||
if args.src_mirror is None or args.dest_mirror is None:
|
||||
@@ -1145,7 +1086,7 @@ def sync_fn(args):
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def manifest_copy(manifest_file_list, dest_mirror=None):
|
||||
def manifest_copy(manifest_file_list):
|
||||
"""Read manifest files containing information about specific specs to copy
|
||||
from source to destination, remove duplicates since any binary packge for
|
||||
a given hash should be the same as any other, and copy all files specified
|
||||
@@ -1159,17 +1100,10 @@ def manifest_copy(manifest_file_list, dest_mirror=None):
|
||||
# Last duplicate hash wins
|
||||
deduped_manifest[spec_hash] = copy_list
|
||||
|
||||
build_cache_dir = bindist.build_cache_relative_path()
|
||||
for spec_hash, copy_list in deduped_manifest.items():
|
||||
for copy_file in copy_list:
|
||||
dest = copy_file["dest"]
|
||||
if dest_mirror:
|
||||
src_relative_path = os.path.join(
|
||||
build_cache_dir, copy_file["src"].rsplit(build_cache_dir, 1)[1].lstrip("/")
|
||||
)
|
||||
dest = url_util.join(dest_mirror.push_url, src_relative_path)
|
||||
tty.debug("copying {0} to {1}".format(copy_file["src"], dest))
|
||||
copy_buildcache_file(copy_file["src"], dest)
|
||||
tty.debug("copying {0} to {1}".format(copy_file["src"], copy_file["dest"]))
|
||||
copy_buildcache_file(copy_file["src"], copy_file["dest"])
|
||||
|
||||
|
||||
def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
||||
@@ -1196,18 +1130,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
||||
url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path()
|
||||
)
|
||||
|
||||
try:
|
||||
bindist.generate_key_index(keys_url)
|
||||
except bindist.CannotListKeys as e:
|
||||
# Do not error out if listing keys went wrong. This usually means that the _gpg path
|
||||
# does not exist. TODO: distinguish between this and other errors.
|
||||
tty.warn(f"did not update the key index: {e}")
|
||||
bindist.generate_key_index(keys_url)
|
||||
|
||||
|
||||
def update_index_fn(args):
|
||||
"""update a buildcache index"""
|
||||
return update_index(args.mirror, update_keys=args.keys)
|
||||
update_index(args.mirror, update_keys=args.keys)
|
||||
|
||||
|
||||
def buildcache(parser, args):
|
||||
return args.func(args)
|
||||
if args.func:
|
||||
args.func(args)
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
import re
|
||||
import sys
|
||||
from typing import Dict, Optional
|
||||
|
||||
import llnl.string
|
||||
import llnl.util.lang
|
||||
@@ -18,15 +17,10 @@
|
||||
import spack.util.crypto
|
||||
import spack.util.web as web_util
|
||||
from spack.cmd.common import arguments
|
||||
from spack.package_base import (
|
||||
ManualDownloadRequiredError,
|
||||
PackageBase,
|
||||
deprecated_version,
|
||||
preferred_version,
|
||||
)
|
||||
from spack.package_base import PackageBase, deprecated_version, preferred_version
|
||||
from spack.util.editor import editor
|
||||
from spack.util.format import get_version_lines
|
||||
from spack.version import StandardVersion, Version
|
||||
from spack.version import Version
|
||||
|
||||
description = "checksum available versions of a package"
|
||||
section = "packaging"
|
||||
@@ -90,30 +84,28 @@ def checksum(parser, args):
|
||||
spec = spack.spec.Spec(args.package)
|
||||
|
||||
# Get the package we're going to generate checksums for
|
||||
pkg: PackageBase = spack.repo.PATH.get_pkg_class(spec.name)(spec)
|
||||
pkg = spack.repo.PATH.get_pkg_class(spec.name)(spec)
|
||||
|
||||
# Skip manually downloaded packages
|
||||
if pkg.manual_download:
|
||||
raise ManualDownloadRequiredError(pkg.download_instr)
|
||||
versions = [Version(v) for v in args.versions]
|
||||
|
||||
versions = [StandardVersion.from_string(v) for v in args.versions]
|
||||
|
||||
# Define placeholder for remote versions. This'll help reduce redundant work if we need to
|
||||
# check for the existence of remote versions more than once.
|
||||
remote_versions: Optional[Dict[StandardVersion, str]] = None
|
||||
# Define placeholder for remote versions.
|
||||
# This'll help reduce redundant work if we need to check for the existance
|
||||
# of remote versions more than once.
|
||||
remote_versions = None
|
||||
|
||||
# Add latest version if requested
|
||||
if args.latest:
|
||||
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
|
||||
remote_versions = pkg.fetch_remote_versions(args.jobs)
|
||||
if len(remote_versions) > 0:
|
||||
versions.append(max(remote_versions.keys()))
|
||||
latest_version = sorted(remote_versions.keys(), reverse=True)[0]
|
||||
versions.append(latest_version)
|
||||
|
||||
# Add preferred version if requested (todo: exclude git versions)
|
||||
# Add preferred version if requested
|
||||
if args.preferred:
|
||||
versions.append(preferred_version(pkg))
|
||||
|
||||
# Store a dict of the form version -> URL
|
||||
url_dict: Dict[StandardVersion, str] = {}
|
||||
url_dict = {}
|
||||
|
||||
for version in versions:
|
||||
if deprecated_version(pkg, version):
|
||||
@@ -123,16 +115,16 @@ def checksum(parser, args):
|
||||
if url is not None:
|
||||
url_dict[version] = url
|
||||
continue
|
||||
# If we get here, it's because no valid url was provided by the package. Do expensive
|
||||
# fallback to try to recover
|
||||
# if we get here, it's because no valid url was provided by the package
|
||||
# do expensive fallback to try to recover
|
||||
if remote_versions is None:
|
||||
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
|
||||
remote_versions = pkg.fetch_remote_versions(args.jobs)
|
||||
if version in remote_versions:
|
||||
url_dict[version] = remote_versions[version]
|
||||
|
||||
if len(versions) <= 0:
|
||||
if remote_versions is None:
|
||||
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
|
||||
remote_versions = pkg.fetch_remote_versions(args.jobs)
|
||||
url_dict = remote_versions
|
||||
|
||||
# A spidered URL can differ from the package.py *computed* URL, pointing to different tarballs.
|
||||
@@ -183,7 +175,7 @@ def checksum(parser, args):
|
||||
print()
|
||||
|
||||
if args.add_to_package:
|
||||
add_versions_to_package(pkg, version_lines, args.batch)
|
||||
add_versions_to_package(pkg, version_lines)
|
||||
|
||||
|
||||
def print_checksum_status(pkg: PackageBase, version_hashes: dict):
|
||||
@@ -229,7 +221,7 @@ def print_checksum_status(pkg: PackageBase, version_hashes: dict):
|
||||
tty.die("Invalid checksums found.")
|
||||
|
||||
|
||||
def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool):
|
||||
def add_versions_to_package(pkg: PackageBase, version_lines: str):
|
||||
"""
|
||||
Add checksumed versions to a package's instructions and open a user's
|
||||
editor so they may double check the work of the function.
|
||||
@@ -282,5 +274,5 @@ def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool
|
||||
tty.msg(f"Added {num_versions_added} new versions to {pkg.name}")
|
||||
tty.msg(f"Open {filename} to review the additions.")
|
||||
|
||||
if sys.stdout.isatty() and not is_batch:
|
||||
if sys.stdout.isatty():
|
||||
editor(filename)
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -14,7 +13,6 @@
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.ci as spack_ci
|
||||
import spack.cmd
|
||||
import spack.cmd.buildcache as buildcache
|
||||
import spack.config as cfg
|
||||
import spack.environment as ev
|
||||
@@ -33,7 +31,6 @@
|
||||
SPACK_COMMAND = "spack"
|
||||
MAKE_COMMAND = "make"
|
||||
INSTALL_FAIL_CODE = 1
|
||||
FAILED_CREATE_BUILDCACHE_CODE = 100
|
||||
|
||||
|
||||
def deindent(desc):
|
||||
@@ -160,9 +157,7 @@ def setup_parser(subparser):
|
||||
description=deindent(ci_reproduce.__doc__),
|
||||
help=spack.cmd.first_line(ci_reproduce.__doc__),
|
||||
)
|
||||
reproduce.add_argument(
|
||||
"job_url", help="URL of GitLab job web page or artifact", type=_gitlab_artifacts_url
|
||||
)
|
||||
reproduce.add_argument("job_url", help="URL of job artifacts bundle")
|
||||
reproduce.add_argument(
|
||||
"--runtime",
|
||||
help="Container runtime to use.",
|
||||
@@ -707,9 +702,11 @@ def ci_rebuild(args):
|
||||
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
|
||||
cdash_handler.copy_test_results(reports_dir, job_test_dir)
|
||||
|
||||
# If the install succeeded, create a buildcache entry for this job spec
|
||||
# and push it to one or more mirrors. If the install did not succeed,
|
||||
# print out some instructions on how to reproduce this build failure
|
||||
# outside of the pipeline environment.
|
||||
if install_exit_code == 0:
|
||||
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
|
||||
# will result in a non-zero exit code. Pushing is best-effort.
|
||||
mirror_urls = [buildcache_mirror_url]
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
@@ -721,12 +718,13 @@ def ci_rebuild(args):
|
||||
destination_mirror_urls=mirror_urls,
|
||||
sign_binaries=spack_ci.can_sign_binaries(),
|
||||
):
|
||||
if not result.success:
|
||||
install_exit_code = FAILED_CREATE_BUILDCACHE_CODE
|
||||
(tty.msg if result.success else tty.error)(
|
||||
f'{"Pushed" if result.success else "Failed to push"} '
|
||||
f'{job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when())} '
|
||||
f"to {result.url}"
|
||||
msg = tty.msg if result.success else tty.warn
|
||||
msg(
|
||||
"{} {} to {}".format(
|
||||
"Pushed" if result.success else "Failed to push",
|
||||
job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
|
||||
result.url,
|
||||
)
|
||||
)
|
||||
|
||||
# If this is a develop pipeline, check if the spec that we just built is
|
||||
@@ -747,22 +745,22 @@ def ci_rebuild(args):
|
||||
tty.warn(msg.format(broken_spec_path, err))
|
||||
|
||||
else:
|
||||
# If the install did not succeed, print out some instructions on how to reproduce this
|
||||
# build failure outside of the pipeline environment.
|
||||
tty.debug("spack install exited non-zero, will not create buildcache")
|
||||
|
||||
api_root_url = os.environ.get("CI_API_V4_URL")
|
||||
ci_project_id = os.environ.get("CI_PROJECT_ID")
|
||||
ci_job_id = os.environ.get("CI_JOB_ID")
|
||||
|
||||
repro_job_url = f"{api_root_url}/projects/{ci_project_id}/jobs/{ci_job_id}/artifacts"
|
||||
repro_job_url = "{0}/projects/{1}/jobs/{2}/artifacts".format(
|
||||
api_root_url, ci_project_id, ci_job_id
|
||||
)
|
||||
|
||||
# Control characters cause this to be printed in blue so it stands out
|
||||
print(
|
||||
f"""
|
||||
reproduce_msg = """
|
||||
|
||||
\033[34mTo reproduce this build locally, run:
|
||||
|
||||
spack ci reproduce-build {repro_job_url} [--working-dir <dir>] [--autostart]
|
||||
spack ci reproduce-build {0} [--working-dir <dir>] [--autostart]
|
||||
|
||||
If this project does not have public pipelines, you will need to first:
|
||||
|
||||
@@ -770,9 +768,12 @@ def ci_rebuild(args):
|
||||
|
||||
... then follow the printed instructions.\033[0;0m
|
||||
|
||||
"""
|
||||
""".format(
|
||||
repro_job_url
|
||||
)
|
||||
|
||||
print(reproduce_msg)
|
||||
|
||||
rebuild_timer.stop()
|
||||
try:
|
||||
with open("install_timers.json", "w") as timelog:
|
||||
@@ -791,6 +792,11 @@ def ci_reproduce(args):
|
||||
artifacts of the provided gitlab pipeline rebuild job's URL will be used to derive
|
||||
instructions for reproducing the build locally
|
||||
"""
|
||||
job_url = args.job_url
|
||||
work_dir = args.working_dir
|
||||
autostart = args.autostart
|
||||
runtime = args.runtime
|
||||
|
||||
# Allow passing GPG key for reprocuding protected CI jobs
|
||||
if args.gpg_file:
|
||||
gpg_key_url = url_util.path_to_file_url(args.gpg_file)
|
||||
@@ -799,47 +805,7 @@ def ci_reproduce(args):
|
||||
else:
|
||||
gpg_key_url = None
|
||||
|
||||
return spack_ci.reproduce_ci_job(
|
||||
args.job_url, args.working_dir, args.autostart, gpg_key_url, args.runtime
|
||||
)
|
||||
|
||||
|
||||
def _gitlab_artifacts_url(url: str) -> str:
|
||||
"""Take a URL either to the URL of the job in the GitLab UI, or to the artifacts zip file,
|
||||
and output the URL to the artifacts zip file."""
|
||||
parsed = urlparse(url)
|
||||
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError(url)
|
||||
|
||||
parts = parsed.path.split("/")
|
||||
|
||||
if len(parts) < 2:
|
||||
raise ValueError(url)
|
||||
|
||||
# Just use API endpoints verbatim, they're probably generated by Spack.
|
||||
if parts[1] == "api":
|
||||
return url
|
||||
|
||||
# If it's a URL to the job in the Gitlab UI, we may need to append the artifacts path.
|
||||
minus_idx = parts.index("-")
|
||||
|
||||
# Remove repeated slashes in the remainder
|
||||
rest = [p for p in parts[minus_idx + 1 :] if p]
|
||||
|
||||
# Now the format is jobs/X or jobs/X/artifacts/download
|
||||
if len(rest) < 2 or rest[0] != "jobs":
|
||||
raise ValueError(url)
|
||||
|
||||
if len(rest) == 2:
|
||||
# replace jobs/X with jobs/X/artifacts/download
|
||||
rest.extend(("artifacts", "download"))
|
||||
|
||||
# Replace the parts and unparse.
|
||||
parts[minus_idx + 1 :] = rest
|
||||
|
||||
# Don't allow fragments / queries
|
||||
return urlunparse(parsed._replace(path="/".join(parts), fragment="", query=""))
|
||||
return spack_ci.reproduce_ci_job(job_url, work_dir, autostart, gpg_key_url, runtime)
|
||||
|
||||
|
||||
def ci(parser, args):
|
||||
|
||||
@@ -570,14 +570,6 @@ def add_concretizer_args(subparser):
|
||||
default=None,
|
||||
help="reuse installed dependencies only",
|
||||
)
|
||||
subgroup.add_argument(
|
||||
"--deprecated",
|
||||
action=ConfigSetAction,
|
||||
dest="config:deprecated",
|
||||
const=True,
|
||||
default=None,
|
||||
help="allow concretizer to select deprecated versions",
|
||||
)
|
||||
|
||||
|
||||
def add_connection_args(subparser, add_help):
|
||||
|
||||
@@ -89,7 +89,7 @@ def compiler_find(args):
|
||||
paths, scope=None, mixed_toolchain=args.mixed_toolchain
|
||||
)
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope)
|
||||
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
|
||||
n = len(new_compilers)
|
||||
s = "s" if n > 1 else ""
|
||||
|
||||
|
||||
@@ -76,10 +76,6 @@ def setup_parser(subparser):
|
||||
)
|
||||
add_parser.add_argument("-f", "--file", help="file from which to set all config values")
|
||||
|
||||
change_parser = sp.add_parser("change", help="swap variants etc. on specs in config")
|
||||
change_parser.add_argument("path", help="colon-separated path to config section with specs")
|
||||
change_parser.add_argument("--match-spec", help="only change constraints that match this")
|
||||
|
||||
prefer_upstream_parser = sp.add_parser(
|
||||
"prefer-upstream", help="set package preferences from upstream"
|
||||
)
|
||||
@@ -122,7 +118,7 @@ def _get_scope_and_section(args):
|
||||
if not section and not scope:
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
scope = env.scope_name
|
||||
scope = env.env_file_config_scope_name()
|
||||
|
||||
# set scope defaults
|
||||
elif not scope:
|
||||
@@ -267,98 +263,6 @@ def _can_update_config_file(scope: spack.config.ConfigScope, cfg_file):
|
||||
return fs.can_write_to_dir(scope.path) and fs.can_access(cfg_file)
|
||||
|
||||
|
||||
def _config_change_requires_scope(path, spec, scope, match_spec=None):
|
||||
"""Return whether or not anything changed."""
|
||||
require = spack.config.get(path, scope=scope)
|
||||
if not require:
|
||||
return False
|
||||
|
||||
changed = False
|
||||
|
||||
def override_cfg_spec(spec_str):
|
||||
nonlocal changed
|
||||
|
||||
init_spec = spack.spec.Spec(spec_str)
|
||||
# Overridden spec cannot be anonymous
|
||||
init_spec.name = spec.name
|
||||
if match_spec and not init_spec.satisfies(match_spec):
|
||||
# If there is a match_spec, don't change constraints that
|
||||
# don't match it
|
||||
return spec_str
|
||||
elif not init_spec.intersects(spec):
|
||||
changed = True
|
||||
return str(spack.spec.Spec.override(init_spec, spec))
|
||||
else:
|
||||
# Don't override things if they intersect, otherwise we'd
|
||||
# be e.g. attaching +debug to every single version spec
|
||||
return spec_str
|
||||
|
||||
if isinstance(require, str):
|
||||
new_require = override_cfg_spec(require)
|
||||
else:
|
||||
new_require = []
|
||||
for item in require:
|
||||
if "one_of" in item:
|
||||
item["one_of"] = [override_cfg_spec(x) for x in item["one_of"]]
|
||||
elif "any_of" in item:
|
||||
item["any_of"] = [override_cfg_spec(x) for x in item["any_of"]]
|
||||
elif "spec" in item:
|
||||
item["spec"] = override_cfg_spec(item["spec"])
|
||||
elif isinstance(item, str):
|
||||
item = override_cfg_spec(item)
|
||||
else:
|
||||
raise ValueError(f"Unexpected requirement: ({type(item)}) {str(item)}")
|
||||
new_require.append(item)
|
||||
|
||||
spack.config.set(path, new_require, scope=scope)
|
||||
return changed
|
||||
|
||||
|
||||
def _config_change(config_path, match_spec_str=None):
|
||||
all_components = spack.config.process_config_path(config_path)
|
||||
key_components = all_components[:-1]
|
||||
key_path = ":".join(key_components)
|
||||
|
||||
spec = spack.spec.Spec(syaml.syaml_str(all_components[-1]))
|
||||
|
||||
match_spec = None
|
||||
if match_spec_str:
|
||||
match_spec = spack.spec.Spec(match_spec_str)
|
||||
|
||||
if key_components[-1] == "require":
|
||||
# Extract the package name from the config path, which allows
|
||||
# args.spec to be anonymous if desired
|
||||
pkg_name = key_components[1]
|
||||
spec.name = pkg_name
|
||||
|
||||
changed = False
|
||||
for scope in spack.config.writable_scope_names():
|
||||
changed |= _config_change_requires_scope(key_path, spec, scope, match_spec=match_spec)
|
||||
|
||||
if not changed:
|
||||
existing_requirements = spack.config.get(key_path)
|
||||
if isinstance(existing_requirements, str):
|
||||
raise spack.config.ConfigError(
|
||||
"'config change' needs to append a requirement,"
|
||||
" but existing require: config is not a list"
|
||||
)
|
||||
|
||||
ideal_scope_to_modify = None
|
||||
for scope in spack.config.writable_scope_names():
|
||||
if spack.config.get(key_path, scope=scope):
|
||||
ideal_scope_to_modify = scope
|
||||
break
|
||||
|
||||
update_path = f"{key_path}:[{str(spec)}]"
|
||||
spack.config.add(update_path, scope=ideal_scope_to_modify)
|
||||
else:
|
||||
raise ValueError("'config change' can currently only change 'require' sections")
|
||||
|
||||
|
||||
def config_change(args):
|
||||
_config_change(args.path, args.match_spec)
|
||||
|
||||
|
||||
def config_update(args):
|
||||
# Read the configuration files
|
||||
spack.config.CONFIG.get_config(args.section, scope=args.scope)
|
||||
@@ -586,6 +490,5 @@ def config(parser, args):
|
||||
"update": config_update,
|
||||
"revert": config_revert,
|
||||
"prefer-upstream": config_prefer_upstream,
|
||||
"change": config_change,
|
||||
}
|
||||
action[args.config_command](args)
|
||||
|
||||
@@ -64,9 +64,8 @@ class {class_name}({base_class_name}):
|
||||
# maintainers("github_user1", "github_user2")
|
||||
|
||||
# FIXME: Add the SPDX identifier of the project's license below.
|
||||
# See https://spdx.org/licenses/ for a list. Upon manually verifying
|
||||
# the license, set checked_by to your Github username.
|
||||
license("UNKNOWN", checked_by="github_user1")
|
||||
# See https://spdx.org/licenses/ for a list.
|
||||
license("UNKNOWN")
|
||||
|
||||
{versions}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"])
|
||||
arguments.add_common_arguments(subparser, ["jobs"])
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--source-path",
|
||||
@@ -34,6 +34,7 @@ def setup_parser(subparser):
|
||||
dest="ignore_deps",
|
||||
help="do not try to install dependencies of requested packages",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
subparser.add_argument(
|
||||
"--keep-prefix",
|
||||
action="store_true",
|
||||
@@ -62,6 +63,7 @@ def setup_parser(subparser):
|
||||
choices=["root", "all"],
|
||||
help="run tests on only root packages or all packages",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["spec"])
|
||||
|
||||
stop_group = subparser.add_mutually_exclusive_group()
|
||||
stop_group.add_argument(
|
||||
@@ -123,6 +125,9 @@ def dev_build(self, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
tests = False
|
||||
if args.test == "all":
|
||||
tests = True
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.cmd
|
||||
import spack.config
|
||||
import spack.spec
|
||||
import spack.util.path
|
||||
import spack.version
|
||||
@@ -22,7 +21,6 @@
|
||||
|
||||
def setup_parser(subparser):
|
||||
subparser.add_argument("-p", "--path", help="source location of package")
|
||||
subparser.add_argument("-b", "--build-directory", help="build directory for the package")
|
||||
|
||||
clone_group = subparser.add_mutually_exclusive_group()
|
||||
clone_group.add_argument(
|
||||
@@ -153,11 +151,4 @@ def develop(parser, args):
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
tty.debug("Updating develop config for {0} transactionally".format(env.name))
|
||||
with env.write_transaction():
|
||||
if args.build_directory is not None:
|
||||
spack.config.add(
|
||||
"packages:{}:package_attributes:build_directory:{}".format(
|
||||
spec.name, args.build_directory
|
||||
),
|
||||
env.scope_name,
|
||||
)
|
||||
_update_config(spec, path)
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import llnl.string as string
|
||||
@@ -45,7 +44,6 @@
|
||||
"deactivate",
|
||||
"create",
|
||||
["remove", "rm"],
|
||||
["rename", "mv"],
|
||||
["list", "ls"],
|
||||
["status", "st"],
|
||||
"loads",
|
||||
@@ -56,104 +54,6 @@
|
||||
]
|
||||
|
||||
|
||||
#
|
||||
# env create
|
||||
#
|
||||
def env_create_setup_parser(subparser):
|
||||
"""create a new environment"""
|
||||
subparser.add_argument(
|
||||
"env_name",
|
||||
metavar="env",
|
||||
help=(
|
||||
"name of managed environment or directory of the anonymous env "
|
||||
"(when using --dir/-d) to activate"
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
)
|
||||
view_opts = subparser.add_mutually_exclusive_group()
|
||||
view_opts.add_argument(
|
||||
"--without-view", action="store_true", help="do not maintain a view for this environment"
|
||||
)
|
||||
view_opts.add_argument(
|
||||
"--with-view",
|
||||
help="specify that this environment should maintain a view at the"
|
||||
" specified path (by default the view is maintained in the"
|
||||
" environment directory)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
)
|
||||
|
||||
|
||||
def env_create(args):
|
||||
if args.with_view:
|
||||
# Expand relative paths provided on the command line to the current working directory
|
||||
# This way we interpret `spack env create --with-view ./view --dir ./env` as
|
||||
# a view in $PWD/view, not $PWD/env/view. This is different from specifying a relative
|
||||
# path in the manifest, which is resolved relative to the manifest file's location.
|
||||
with_view = os.path.abspath(args.with_view)
|
||||
elif args.without_view:
|
||||
with_view = False
|
||||
else:
|
||||
# Note that 'None' means unspecified, in which case the Environment
|
||||
# object could choose to enable a view by default. False means that
|
||||
# the environment should not include a view.
|
||||
with_view = None
|
||||
|
||||
env = _env_create(
|
||||
args.env_name,
|
||||
init_file=args.envfile,
|
||||
dir=args.dir,
|
||||
with_view=with_view,
|
||||
keep_relative=args.keep_relative,
|
||||
)
|
||||
|
||||
# Generate views, only really useful for environments created from spack.lock files.
|
||||
env.regenerate_views()
|
||||
|
||||
|
||||
def _env_create(name_or_path, *, init_file=None, dir=False, with_view=None, keep_relative=False):
|
||||
"""Create a new environment, with an optional yaml description.
|
||||
|
||||
Arguments:
|
||||
name_or_path (str): name of the environment to create, or path to it
|
||||
init_file (str or file): optional initialization file -- can be
|
||||
a JSON lockfile (*.lock, *.json) or YAML manifest file
|
||||
dir (bool): if True, create an environment in a directory instead
|
||||
of a named environment
|
||||
keep_relative (bool): if True, develop paths are copied verbatim into
|
||||
the new environment file, otherwise they may be made absolute if the
|
||||
new environment is in a different location
|
||||
"""
|
||||
if not dir:
|
||||
env = ev.create(
|
||||
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
|
||||
)
|
||||
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
|
||||
tty.msg("You can activate this environment with:")
|
||||
tty.msg(" spack env activate %s" % (name_or_path))
|
||||
return env
|
||||
|
||||
env = ev.create_in_dir(
|
||||
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
|
||||
)
|
||||
tty.msg("Created environment in %s" % env.path)
|
||||
tty.msg("You can activate this environment with:")
|
||||
tty.msg(" spack env activate %s" % env.path)
|
||||
return env
|
||||
|
||||
|
||||
#
|
||||
# env activate
|
||||
#
|
||||
@@ -218,46 +118,22 @@ def env_activate_setup_parser(subparser):
|
||||
help="decorate the command line prompt when activating",
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
env_options = subparser.add_mutually_exclusive_group()
|
||||
env_options.add_argument(
|
||||
"--temp",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="create and activate an environment in a temporary directory",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--create",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="create and activate the environment if it doesn't exist",
|
||||
env_options.add_argument(
|
||||
"-d", "--dir", default=None, help="activate the environment in this directory"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--dir",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="activate environment based on the directory supplied",
|
||||
)
|
||||
subparser.add_argument(
|
||||
env_options.add_argument(
|
||||
metavar="env",
|
||||
dest="env_name",
|
||||
dest="activate_env",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help=(
|
||||
"name of managed environment or directory of the anonymous env"
|
||||
" (when using --dir/-d) to activate"
|
||||
),
|
||||
help="name of environment to activate",
|
||||
)
|
||||
|
||||
|
||||
@@ -272,8 +148,7 @@ def create_temp_env_directory():
|
||||
def _tty_info(msg):
|
||||
"""tty.info like function that prints the equivalent printf statement for eval."""
|
||||
decorated = f'{colorize("@*b{==>}")} {msg}\n'
|
||||
executor = "echo" if sys.platform == "win32" else "printf"
|
||||
print(f"{executor} {shlex.quote(decorated)};")
|
||||
print(f"printf {shlex.quote(decorated)};")
|
||||
|
||||
|
||||
def env_activate(args):
|
||||
@@ -287,17 +162,11 @@ def env_activate(args):
|
||||
if args.env or args.no_env or args.env_dir:
|
||||
tty.die("Calling spack env activate with --env, --env-dir and --no-env is ambiguous")
|
||||
|
||||
# special parser error handling relative to the --temp flag
|
||||
temp_conflicts = iter([args.keep_relative, args.dir, args.env_name, args.with_view])
|
||||
if args.temp and any(temp_conflicts):
|
||||
tty.die(
|
||||
"spack env activate --temp cannot be combined with managed environments, --with-view,"
|
||||
" --keep-relative, or --dir."
|
||||
)
|
||||
env_name_or_dir = args.activate_env or args.dir
|
||||
|
||||
# When executing `spack env activate` without further arguments, activate
|
||||
# the default environment. It's created when it doesn't exist yet.
|
||||
if not args.env_name and not args.temp:
|
||||
if not env_name_or_dir and not args.temp:
|
||||
short_name = "default"
|
||||
if not ev.exists(short_name):
|
||||
ev.create(short_name)
|
||||
@@ -316,25 +185,17 @@ def env_activate(args):
|
||||
_tty_info(f"Created and activated temporary environment in {env_path}")
|
||||
|
||||
# Managed environment
|
||||
elif ev.exists(args.env_name) and not args.dir:
|
||||
env_path = ev.root(args.env_name)
|
||||
short_name = args.env_name
|
||||
elif ev.exists(env_name_or_dir) and not args.dir:
|
||||
env_path = ev.root(env_name_or_dir)
|
||||
short_name = env_name_or_dir
|
||||
|
||||
# Environment directory
|
||||
elif ev.is_env_dir(args.env_name):
|
||||
env_path = os.path.abspath(args.env_name)
|
||||
elif ev.is_env_dir(env_name_or_dir):
|
||||
env_path = os.path.abspath(env_name_or_dir)
|
||||
short_name = os.path.basename(env_path)
|
||||
|
||||
# create if user requested, and then recall recursively
|
||||
elif args.create:
|
||||
tty.set_msg_enabled(False)
|
||||
env_create(args)
|
||||
tty.set_msg_enabled(True)
|
||||
env_activate(args)
|
||||
return
|
||||
|
||||
else:
|
||||
tty.die("No such environment: '%s'" % args.env_name)
|
||||
tty.die("No such environment: '%s'" % env_name_or_dir)
|
||||
|
||||
env_prompt = "[%s]" % short_name
|
||||
|
||||
@@ -429,6 +290,97 @@ def env_deactivate(args):
|
||||
sys.stdout.write(cmds)
|
||||
|
||||
|
||||
#
|
||||
# env create
|
||||
#
|
||||
def env_create_setup_parser(subparser):
|
||||
"""create a new environment"""
|
||||
subparser.add_argument("create_env", metavar="env", help="name of environment to create")
|
||||
subparser.add_argument(
|
||||
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
)
|
||||
view_opts = subparser.add_mutually_exclusive_group()
|
||||
view_opts.add_argument(
|
||||
"--without-view", action="store_true", help="do not maintain a view for this environment"
|
||||
)
|
||||
view_opts.add_argument(
|
||||
"--with-view",
|
||||
help="specify that this environment should maintain a view at the"
|
||||
" specified path (by default the view is maintained in the"
|
||||
" environment directory)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
)
|
||||
|
||||
|
||||
def env_create(args):
|
||||
if args.with_view:
|
||||
# Expand relative paths provided on the command line to the current working directory
|
||||
# This way we interpret `spack env create --with-view ./view --dir ./env` as
|
||||
# a view in $PWD/view, not $PWD/env/view. This is different from specifying a relative
|
||||
# path in the manifest, which is resolved relative to the manifest file's location.
|
||||
with_view = os.path.abspath(args.with_view)
|
||||
elif args.without_view:
|
||||
with_view = False
|
||||
else:
|
||||
# Note that 'None' means unspecified, in which case the Environment
|
||||
# object could choose to enable a view by default. False means that
|
||||
# the environment should not include a view.
|
||||
with_view = None
|
||||
|
||||
env = _env_create(
|
||||
args.create_env,
|
||||
init_file=args.envfile,
|
||||
dir=args.dir,
|
||||
with_view=with_view,
|
||||
keep_relative=args.keep_relative,
|
||||
)
|
||||
|
||||
# Generate views, only really useful for environments created from spack.lock files.
|
||||
env.regenerate_views()
|
||||
|
||||
|
||||
def _env_create(name_or_path, *, init_file=None, dir=False, with_view=None, keep_relative=False):
|
||||
"""Create a new environment, with an optional yaml description.
|
||||
|
||||
Arguments:
|
||||
name_or_path (str): name of the environment to create, or path to it
|
||||
init_file (str or file): optional initialization file -- can be
|
||||
a JSON lockfile (*.lock, *.json) or YAML manifest file
|
||||
dir (bool): if True, create an environment in a directory instead
|
||||
of a named environment
|
||||
keep_relative (bool): if True, develop paths are copied verbatim into
|
||||
the new environment file, otherwise they may be made absolute if the
|
||||
new environment is in a different location
|
||||
"""
|
||||
if not dir:
|
||||
env = ev.create(
|
||||
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
|
||||
)
|
||||
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
|
||||
tty.msg("You can activate this environment with:")
|
||||
tty.msg(" spack env activate %s" % (name_or_path))
|
||||
return env
|
||||
|
||||
env = ev.create_in_dir(
|
||||
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
|
||||
)
|
||||
tty.msg("Created environment in %s" % env.path)
|
||||
tty.msg("You can activate this environment with:")
|
||||
tty.msg(" spack env activate %s" % env.path)
|
||||
return env
|
||||
|
||||
|
||||
#
|
||||
# env remove
|
||||
#
|
||||
@@ -474,82 +426,11 @@ def env_remove(args):
|
||||
tty.msg(f"Successfully removed environment '{bad_env_name}'")
|
||||
|
||||
|
||||
#
|
||||
# env rename
|
||||
#
|
||||
def env_rename_setup_parser(subparser):
|
||||
"""rename an existing environment"""
|
||||
subparser.add_argument(
|
||||
"mv_from", metavar="from", help="name (or path) of existing environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"mv_to", metavar="to", help="new name (or path) for existing environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--dir",
|
||||
action="store_true",
|
||||
help="the specified arguments correspond to directory paths",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
|
||||
)
|
||||
|
||||
|
||||
def env_rename(args):
|
||||
"""Rename an environment.
|
||||
|
||||
This renames a managed environment or moves an anonymous environment.
|
||||
"""
|
||||
|
||||
# Directory option has been specified
|
||||
if args.dir:
|
||||
if not ev.is_env_dir(args.mv_from):
|
||||
tty.die("The specified path does not correspond to a valid spack environment")
|
||||
from_path = Path(args.mv_from)
|
||||
if not args.force:
|
||||
if ev.is_env_dir(args.mv_to):
|
||||
tty.die(
|
||||
"The new path corresponds to an existing environment;"
|
||||
" specify the --force flag to overwrite it."
|
||||
)
|
||||
if Path(args.mv_to).exists():
|
||||
tty.die("The new path already exists; specify the --force flag to overwrite it.")
|
||||
to_path = Path(args.mv_to)
|
||||
|
||||
# Name option being used
|
||||
elif ev.exists(args.mv_from):
|
||||
from_path = ev.environment.environment_dir_from_name(args.mv_from)
|
||||
if not args.force and ev.exists(args.mv_to):
|
||||
tty.die(
|
||||
"The new name corresponds to an existing environment;"
|
||||
" specify the --force flag to overwrite it."
|
||||
)
|
||||
to_path = ev.environment.root(args.mv_to)
|
||||
|
||||
# Neither
|
||||
else:
|
||||
tty.die("The specified name does not correspond to a managed spack environment")
|
||||
|
||||
# Guard against renaming from or to an active environment
|
||||
active_env = ev.active_environment()
|
||||
if active_env:
|
||||
from_env = ev.Environment(from_path)
|
||||
if from_env.path == active_env.path:
|
||||
tty.die("Cannot rename active environment")
|
||||
if to_path == active_env.path:
|
||||
tty.die(f"{args.mv_to} is an active environment")
|
||||
|
||||
shutil.rmtree(to_path, ignore_errors=True)
|
||||
fs.rename(from_path, to_path)
|
||||
tty.msg(f"Successfully renamed environment {args.mv_from} to {args.mv_to}")
|
||||
|
||||
|
||||
#
|
||||
# env list
|
||||
#
|
||||
def env_list_setup_parser(subparser):
|
||||
"""list managed environments"""
|
||||
"""list available environments"""
|
||||
|
||||
|
||||
def env_list(args):
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
import spack.cray_manifest as cray_manifest
|
||||
import spack.detection
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.util.environment
|
||||
from spack.cmd.common import arguments
|
||||
|
||||
@@ -153,9 +152,9 @@ def external_find(args):
|
||||
def packages_to_search_for(
|
||||
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
|
||||
):
|
||||
result = list(
|
||||
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)}
|
||||
)
|
||||
result = []
|
||||
for current_tag in tags:
|
||||
result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
|
||||
|
||||
if names:
|
||||
# Match both fully qualified and unqualified
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
subparser.add_argument(
|
||||
"-m",
|
||||
"--missing",
|
||||
@@ -28,7 +28,7 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
"-D", "--dependencies", action="store_true", help="also fetch all dependencies"
|
||||
)
|
||||
arguments.add_concretizer_args(subparser)
|
||||
arguments.add_common_arguments(subparser, ["specs"])
|
||||
subparser.epilog = (
|
||||
"With an active environment, the specs "
|
||||
"parameter can be omitted. In this case all (uninstalled"
|
||||
@@ -40,6 +40,9 @@ def fetch(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=True)
|
||||
else:
|
||||
|
||||
@@ -140,12 +140,6 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
"--only-deprecated", action="store_true", help="show only deprecated packages"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--install-tree",
|
||||
action="store",
|
||||
default="all",
|
||||
help="Install trees to query: 'all' (default), 'local', 'upstream', upstream name or path",
|
||||
)
|
||||
|
||||
subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]")
|
||||
subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]")
|
||||
@@ -174,12 +168,6 @@ def query_arguments(args):
|
||||
|
||||
q_args = {"installed": installed, "known": known, "explicit": explicit}
|
||||
|
||||
install_tree = args.install_tree
|
||||
upstreams = spack.config.get("upstreams", {})
|
||||
if install_tree in upstreams.keys():
|
||||
install_tree = upstreams[install_tree]["install_tree"]
|
||||
q_args["install_tree"] = install_tree
|
||||
|
||||
# Time window of installation
|
||||
for attribute in ("start_date", "end_date"):
|
||||
date = getattr(args, attribute)
|
||||
|
||||
@@ -18,14 +18,7 @@
|
||||
|
||||
def setup_parser(subparser):
|
||||
setup_parser.parser = subparser
|
||||
subparser.epilog = """
|
||||
Outside of an environment, the command concretizes specs and graphs them, unless the
|
||||
--installed option is given. In that case specs are matched from the current DB.
|
||||
|
||||
If an environment is active, specs are matched from the currently available concrete specs
|
||||
in the lockfile.
|
||||
|
||||
"""
|
||||
method = subparser.add_mutually_exclusive_group()
|
||||
method.add_argument(
|
||||
"-a", "--ascii", action="store_true", help="draw graph as ascii to stdout (default)"
|
||||
@@ -48,40 +41,39 @@ def setup_parser(subparser):
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
"-i", "--installed", action="store_true", help="graph specs from the DB"
|
||||
"-i",
|
||||
"--installed",
|
||||
action="store_true",
|
||||
help="graph installed specs, or specs in the active env (implies --dot)",
|
||||
)
|
||||
|
||||
arguments.add_common_arguments(subparser, ["deptype", "specs"])
|
||||
|
||||
|
||||
def graph(parser, args):
|
||||
env = ev.active_environment()
|
||||
if args.installed and env:
|
||||
tty.die("cannot use --installed with an active environment")
|
||||
if args.installed and args.specs:
|
||||
tty.die("cannot specify specs with --installed")
|
||||
|
||||
if args.color and not args.dot:
|
||||
tty.die("the --color option can be used only with --dot")
|
||||
|
||||
if args.installed:
|
||||
if not args.specs:
|
||||
specs = spack.store.STORE.db.query()
|
||||
args.dot = True
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
specs = env.concrete_roots()
|
||||
else:
|
||||
result = []
|
||||
for item in args.specs:
|
||||
result.extend(spack.store.STORE.db.query(item))
|
||||
specs = list(set(result))
|
||||
elif env:
|
||||
specs = env.concrete_roots()
|
||||
if args.specs:
|
||||
specs = env.all_matching_specs(*args.specs)
|
||||
specs = spack.store.STORE.db.query()
|
||||
|
||||
else:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=not args.static)
|
||||
|
||||
if not specs:
|
||||
tty.die("no spec matching the query")
|
||||
setup_parser.parser.print_help()
|
||||
return 1
|
||||
|
||||
if args.static:
|
||||
args.dot = True
|
||||
static_graph_dot(specs, depflag=args.deptype)
|
||||
return
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
@c{@min:max} version range (inclusive)
|
||||
@c{@min:} version <min> or higher
|
||||
@c{@:max} up to version <max> (inclusive)
|
||||
@c{@=version} exact version
|
||||
|
||||
compilers:
|
||||
@g{%compiler} build with <compiler>
|
||||
|
||||
@@ -176,7 +176,7 @@ def setup_parser(subparser):
|
||||
dest="install_source",
|
||||
help="install source files in prefix",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["no_checksum"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
subparser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
@@ -290,11 +290,11 @@ def require_user_confirmation_for_overwrite(concrete_specs, args):
|
||||
def _dump_log_on_error(e: spack.build_environment.InstallError):
|
||||
e.print_context()
|
||||
assert e.pkg, "Expected InstallError to include the associated package"
|
||||
if not os.path.exists(e.pkg.log_path):
|
||||
if not os.path.exists(e.pkg.build_log_path):
|
||||
tty.error("'spack install' created no log.")
|
||||
else:
|
||||
sys.stderr.write("Full build log:\n")
|
||||
with open(e.pkg.log_path, errors="replace") as log:
|
||||
with open(e.pkg.build_log_path, errors="replace") as log:
|
||||
shutil.copyfileobj(log, sys.stderr)
|
||||
|
||||
|
||||
@@ -326,6 +326,9 @@ def install(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
if args.log_file and not args.log_format:
|
||||
msg = "the '--log-format' must be specified when using '--log-file'"
|
||||
tty.die(msg)
|
||||
@@ -420,9 +423,10 @@ def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter_
|
||||
with reporter_factory(specs_to_install):
|
||||
env.install_specs(specs_to_install, **install_kwargs)
|
||||
finally:
|
||||
if env.views:
|
||||
with env.write_transaction():
|
||||
env.write(regenerate=True)
|
||||
# TODO: this is doing way too much to trigger
|
||||
# views and modules to be generated.
|
||||
with env.write_transaction():
|
||||
env.write(regenerate=True)
|
||||
|
||||
|
||||
def concrete_specs_from_cli(args, install_kwargs):
|
||||
|
||||
@@ -292,11 +292,9 @@ def head(n, span_id, title, anchor=None):
|
||||
out.write("<dd>\n")
|
||||
out.write(
|
||||
", ".join(
|
||||
(
|
||||
d
|
||||
if d not in pkg_names
|
||||
else '<a class="reference internal" href="#%s">%s</a>' % (d, d)
|
||||
)
|
||||
d
|
||||
if d not in pkg_names
|
||||
else '<a class="reference internal" href="#%s">%s</a>' % (d, d)
|
||||
for d in deps
|
||||
)
|
||||
)
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.find
|
||||
import spack.environment as ev
|
||||
@@ -68,6 +70,16 @@ def setup_parser(subparser):
|
||||
help="load the first match if multiple packages match the spec",
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
"--only",
|
||||
default="package,dependencies",
|
||||
dest="things_to_load",
|
||||
choices=["package", "dependencies"],
|
||||
help="select whether to load the package and its dependencies\n\n"
|
||||
"the default is to load the package and all dependencies. alternatively, "
|
||||
"one can decide to load only the package or only the dependencies",
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
"--list",
|
||||
action="store_true",
|
||||
@@ -98,6 +110,11 @@ def load(parser, args):
|
||||
)
|
||||
return 1
|
||||
|
||||
if args.things_to_load != "package,dependencies":
|
||||
tty.warn(
|
||||
"The `--only` flag in spack load is deprecated and will be removed in Spack v0.22"
|
||||
)
|
||||
|
||||
with spack.store.STORE.db.read_transaction():
|
||||
env_mod = uenv.environment_modifications_for_specs(*specs)
|
||||
for spec in specs:
|
||||
|
||||
@@ -53,7 +53,6 @@ def setup_parser(subparser):
|
||||
"-S", "--stages", action="store_true", help="top level stage directory"
|
||||
)
|
||||
directories.add_argument(
|
||||
"-c",
|
||||
"--source-dir",
|
||||
action="store_true",
|
||||
help="source directory for a spec (requires it to be staged first)",
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import errno
|
||||
import gzip
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import spack.cmd
|
||||
import spack.spec
|
||||
import spack.util.compression as compression
|
||||
from spack.cmd.common import arguments
|
||||
from spack.main import SpackCommandError
|
||||
|
||||
description = "print out logs for packages"
|
||||
section = "basic"
|
||||
level = "long"
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["spec"])
|
||||
|
||||
|
||||
def _dump_byte_stream_to_stdout(instream: io.BufferedIOBase) -> None:
|
||||
# Reopen stdout in binary mode so we don't have to worry about encoding
|
||||
outstream = os.fdopen(sys.stdout.fileno(), "wb", closefd=False)
|
||||
shutil.copyfileobj(instream, outstream)
|
||||
|
||||
|
||||
def _logs(cmdline_spec: spack.spec.Spec, concrete_spec: spack.spec.Spec):
|
||||
if concrete_spec.installed:
|
||||
log_path = concrete_spec.package.install_log_path
|
||||
elif os.path.exists(concrete_spec.package.stage.path):
|
||||
# TODO: `spack logs` can currently not show the logs while a package is being built, as the
|
||||
# combined log file is only written after the build is finished.
|
||||
log_path = concrete_spec.package.log_path
|
||||
else:
|
||||
raise SpackCommandError(f"{cmdline_spec} is not installed or staged")
|
||||
|
||||
try:
|
||||
stream = open(log_path, "rb")
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise SpackCommandError(f"No logs are available for {cmdline_spec}") from e
|
||||
raise SpackCommandError(f"Error reading logs for {cmdline_spec}: {e}") from e
|
||||
|
||||
with stream as f:
|
||||
ext = compression.extension_from_magic_numbers_by_stream(f, decompress=False)
|
||||
if ext and ext != "gz":
|
||||
raise SpackCommandError(f"Unsupported storage format for {log_path}: {ext}")
|
||||
|
||||
# If the log file is gzip compressed, wrap it with a decompressor
|
||||
_dump_byte_stream_to_stdout(gzip.GzipFile(fileobj=f) if ext == "gz" else f)
|
||||
|
||||
|
||||
def logs(parser, args):
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
|
||||
if not specs:
|
||||
raise SpackCommandError("You must supply a spec.")
|
||||
|
||||
if len(specs) != 1:
|
||||
raise SpackCommandError("Too many specs. Supply only one.")
|
||||
|
||||
concrete_spec = spack.cmd.matching_spec_from_env(specs[0])
|
||||
|
||||
_logs(specs[0], concrete_spec)
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
|
||||
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
|
||||
|
||||
@@ -72,7 +72,6 @@ def setup_parser(subparser):
|
||||
" retrieve all versions of each package",
|
||||
)
|
||||
arguments.add_common_arguments(create_parser, ["specs"])
|
||||
arguments.add_concretizer_args(create_parser)
|
||||
|
||||
# Destroy
|
||||
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
|
||||
@@ -550,4 +549,7 @@ def mirror(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
action[args.mirror_command](args)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user