Compare commits

..

4 Commits

Author SHA1 Message Date
Wouter Deconinck
c034e1ba9f meshlab: allow downloads of vendored components 2024-06-28 11:01:09 -05:00
Wouter Deconinck
84b6266190 meshlab: put in builtin repo 2024-04-08 14:41:37 -05:00
Wouter Deconinck
94a42d786f meshlab: disable two failing plugins 2024-04-08 14:29:31 -05:00
Wouter Deconinck
0e71ea51ce meshlab: new package 2024-04-08 14:29:31 -05:00
737 changed files with 6802 additions and 15390 deletions

View File

@@ -17,51 +17,33 @@ concurrency:
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ${{ matrix.system.os }}
runs-on: ${{ matrix.operating_system }}
strategy:
matrix:
system:
- { os: windows-latest, shell: 'powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}' }
- { os: ubuntu-latest, shell: bash }
- { os: macos-latest, shell: bash }
defaults:
run:
shell: ${{ matrix.system.shell }}
operating_system: ["ubuntu-latest", "macos-latest"]
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
run: |
pip install --upgrade pip setuptools pytest coverage[toml]
- name: Setup for Windows run
if: runner.os == 'Windows'
run: |
python -m pip install --upgrade pywin32
- name: Package audits (with coverage)
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
if: ${{ inputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage run $(which spack) -d audit externals
coverage run $(which spack) audit externals
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' && runner.os != 'Windows' }}
if: ${{ inputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
spack -d audit externals
- name: Package audits (without coverage)
if: ${{ runner.os == 'Windows' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
./share/spack/qa/validate_last_exit.ps1
spack -d audit externals
./share/spack/qa/validate_last_exit.ps1
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits

View File

@@ -1,8 +1,7 @@
#!/bin/bash
set -e
set -ex
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable github-actions-v0.4
$PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack $SPACK_FLAGS solve zlib
$PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store
exit 0

View File

@@ -13,22 +13,118 @@ concurrency:
cancel-in-progress: true
jobs:
distros-clingo-sources:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: ${{ matrix.image }}
strategy:
matrix:
image: ["fedora:latest", "opensuse/leap:latest"]
container: "fedora:latest"
steps:
- name: Setup Fedora
if: ${{ matrix.image == 'fedora:latest' }}
- name: Install dependencies
run: |
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gzip \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Setup OpenSUSE
if: ${{ matrix.image == 'opensuse/leap:latest' }}
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
steps:
- name: Install dependencies
run: |
# Harden CI by applying the workaround described here: https://www.suse.com/support/kb/doc/?id=000019505
zypper update -y || zypper update -y
@@ -37,9 +133,15 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup repo
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -49,102 +151,77 @@ jobs:
spack -d solve zlib
tree ~/.spack/bootstrap/store/
clingo-sources:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
macos-clingo-sources:
runs-on: macos-latest
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
- name: Install dependencies
run: |
brew install cmake bison tree
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
with:
fetch-depth: 0
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: "3.12"
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find --not-buildable cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
gnupg-sources:
runs-on: ${{ matrix.runner }}
macos-clingo-binaries:
runs-on: ${{ matrix.macos-version }}
strategy:
matrix:
runner: [ 'macos-13', 'macos-14', "ubuntu-latest" ]
macos-version: ['macos-11', 'macos-12']
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: |
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
with:
fetch-depth: 0
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
from-binaries:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: |
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: |
3.8
3.9
3.10
3.11
3.12
- name: Set bootstrap sources
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Bootstrap clingo
run: |
set -e
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' ; do
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
ubuntu-clingo-binaries:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup repo
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
echo "Testing $ver_dir"
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
@@ -159,9 +236,122 @@ jobs:
exit 1
fi
done
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-binaries:
runs-on: macos-latest
steps:
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-sources:
runs-on: macos-latest
steps:
- name: Install dependencies
run: |
brew install gawk tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -45,18 +45,17 @@ jobs:
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'],
[ubuntu-noble, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:24.04'],
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'],
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']]
[fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'],
[fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta
@@ -88,16 +87,16 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
with:
name: dockerfiles_${{ matrix.dockerfile[0] }}
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20
- name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
@@ -121,14 +120,3 @@ jobs:
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
merge-dockerfiles:
runs-on: ubuntu-latest
needs: deploy-images
steps:
- name: Merge Artifacts
uses: actions/upload-artifact/merge@65462800fd760344b1a7b4382951275a0abb4808
with:
name: dockerfiles
pattern: dockerfiles_*
delete-merged: true

View File

@@ -36,7 +36,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
@@ -77,8 +77,13 @@ jobs:
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
secrets: inherit
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
secrets: inherit
all:
needs: [ unit-tests, bootstrap ]
needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest
steps:
- name: Success

View File

@@ -14,7 +14,7 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d

View File

@@ -1,4 +1,4 @@
black==24.4.2
black==24.3.0
clingo==5.7.1
flake8==7.0.0
isort==5.13.2

View File

@@ -51,7 +51,7 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
@@ -91,7 +91,7 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,linux,${{ matrix.concretizer }}
token: ${{ secrets.CODECOV_TOKEN }}
@@ -100,7 +100,7 @@ jobs:
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
@@ -124,7 +124,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: shelltests,linux
token: ${{ secrets.CODECOV_TOKEN }}
@@ -141,7 +141,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Setup repo and non-root user
run: |
git --version
@@ -160,7 +160,7 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
@@ -185,7 +185,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,linux,clingo
token: ${{ secrets.CODECOV_TOKEN }}
@@ -195,10 +195,10 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-13, macos-14]
os: [macos-latest, macos-14]
python-version: ["3.11"]
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
@@ -223,39 +223,8 @@ jobs:
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,macos
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Run unit tests on Windows
windows:
defaults:
run:
shell:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
runs-on: windows-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/setup_git.ps1
- name: Unit Test
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -18,7 +18,7 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: '3.11'
@@ -35,7 +35,7 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
@@ -70,7 +70,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Setup repo and non-root user
run: |
git --version

83
.github/workflows/windows_python.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: windows
on:
workflow_call:
concurrency:
group: windows-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
defaults:
run:
shell:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/setup_git.ps1
- name: Unit Test
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml --ignore=lib/spack/spack/test/cmd
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools coverage pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/setup_git.ps1
- name: Command Unit Test
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml lib/spack/spack/test/cmd
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools coverage
- name: Build Test
run: |
spack compiler find
spack -d external find cmake ninja
spack -d install abseil-cpp

View File

@@ -1,432 +1,3 @@
# v0.22.2 (2024-09-21)
## Bugfixes
- Forward compatibility with Spack 0.23 packages with language dependencies (#45205, #45191)
- Forward compatibility with `urllib` from Python 3.12.6+ (#46453, #46483)
- Bump vendored `archspec` for better aarch64 support (#45721, #46445)
- Support macOS Sequoia (#45018, #45127)
- Fix regression in `{variants.X}` and `{variants.X.value}` format strings (#46206)
- Ensure shell escaping of environment variable values in load and activate commands (#42780)
- Fix an issue where `spec[pkg]` considers specs outside the current DAG (#45090)
- Do not halt concretization on unknown variants in externals (#45326)
- Improve validation of `develop` config section (#46485)
- Explicitly disable `ccache` if turned off in config, to avoid cache pollution (#45275)
- Improve backwards compatibility in `include_concrete` (#45766)
- Fix issue where package tags were sometimes repeated (#45160)
- Make `setup-env.sh` "sourced only" by dropping execution bits (#45641)
- Make certain source/binary fetch errors recoverable instead of a hard error (#45683)
- Remove debug statements in package hash computation (#45235)
- Remove redundant clingo warnings (#45269)
- Remove hard-coded layout version (#45645)
- Do not initialize previous store state in `use_store` (#45268)
- Docs improvements (#46475)
## Package updates
- `chapel` major update (#42197, #44931, #45304)
# v0.22.1 (2024-07-04)
## Bugfixes
- Fix reuse of externals on Linux (#44316)
- Ensure parent gcc-runtime version >= child (#44834, #44870)
- Ensure the latest gcc-runtime is rpath'ed when multiple exist among link deps (#44219)
- Improve version detection of glibc (#44154)
- Improve heuristics for solver (#44893, #44976, #45023)
- Make strong preferences override reuse (#44373)
- Reduce verbosity when C compiler is missing (#44182)
- Make missing ccache executable an error when required (#44740)
- Make every environment view containing `python` a `venv` (#44382)
- Fix external detection for compilers with os but no target (#44156)
- Fix version optimization for roots (#44272)
- Handle common implementations of pagination of tags in OCI build caches (#43136)
- Apply fetched patches to develop specs (#44950)
- Avoid Windows wrappers for filesystem utilities on non-Windows (#44126)
- Fix issue with long filenames in build caches on Windows (#43851)
- Fix formatting issue in `spack audit` (#45045)
- CI fixes (#44582, #43965, #43967, #44279, #44213)
## Package updates
- protobuf: fix 3.4:3.21 patch checksum (#44443)
- protobuf: update hash for patch needed when="@3.4:3.21" (#44210)
- git: bump v2.39 to 2.45; deprecate unsafe versions (#44248)
- gcc: use -rpath {rpath_dir} not -rpath={rpath dir} (#44315)
- Remove mesa18 and libosmesa (#44264)
- Enforce consistency of `gl` providers (#44307)
- Require libiconv for iconv (#44335, #45026).
Notice that glibc/musl also provide iconv, but are not guaranteed to be
complete. Set `packages:iconv:require:[glibc]` to restore the old behavior.
- py-matplotlib: qualify when to do a post install (#44191)
- rust: fix v1.78.0 instructions (#44127)
- suite-sparse: improve setting of the `libs` property (#44214)
- netlib-lapack: provide blas and lapack together (#44981)
# v0.22.0 (2024-05-12)
`v0.22.0` is a major feature release.
## Features in this release
1. **Compiler dependencies**
We are in the process of making compilers proper dependencies in Spack, and a number
of changes in `v0.22` support that effort. You may notice nodes in your dependency
graphs for compiler runtime libraries like `gcc-runtime` or `libgfortran`, and you
may notice that Spack graphs now include `libc`. We've also begun moving compiler
configuration from `compilers.yaml` to `packages.yaml` to make it consistent with
other externals. We are trying to do this with the least disruption possible, so
your existing `compilers.yaml` files should still work. We expect to be done with
this transition by the `v0.23` release in November.
* #41104: Packages compiled with `%gcc` on Linux, macOS and FreeBSD now depend on a
new package `gcc-runtime`, which contains a copy of the shared compiler runtime
libraries. This enables gcc runtime libraries to be installed and relocated when
using a build cache. When building minimal Spack-generated container images it is
no longer necessary to install libgfortran, libgomp etc. using the system package
manager.
* #42062: Packages compiled with `%oneapi` now depend on a new package
`intel-oneapi-runtime`. This is similar to `gcc-runtime`, and the runtimes can
provide virtuals and compilers can inject dependencies on virtuals into compiled
packages. This allows us to model library soname compatibility and allows
compilers like `%oneapi` to provide virtuals like `sycl` (which can also be
provided by standalone libraries). Note that until we have an agreement in place
with intel, Intel packages are marked `redistribute(source=False, binary=False)`
and must be downloaded outside of Spack.
* #43272: changes to the optimization criteria of the solver improve the hit-rate of
buildcaches by a fair amount. The solver more relaxed compatibility rules and will
not try to strictly match compilers or targets of reused specs. Users can still
enforce the previous strict behavior with `require:` sections in `packages.yaml`.
Note that to enforce correct linking, Spack will *not* reuse old `%gcc` and
`%oneapi` specs that do not have the runtime libraries as a dependency.
* #43539: Spack will reuse specs built with compilers that are *not* explicitly
configured in `compilers.yaml`. Because we can now keep runtime libraries in build
cache, we do not require you to also have a local configured compiler to *use* the
runtime libraries. This improves reuse in buildcaches and avoids conflicts with OS
updates that happen underneath Spack.
* #43190: binary compatibility on `linux` is now based on the `libc` version,
instead of on the `os` tag. Spack builds now detect the host `libc` (`glibc` or
`musl`) and add it as an implicit external node in the dependency graph. Binaries
with a `libc` with the same name and a version less than or equal to that of the
detected `libc` can be reused. This is only on `linux`, not `macos` or `Windows`.
* #43464: each package that can provide a compiler is now detectable using `spack
external find`. External packages defining compiler paths are effectively used as
compilers, and `spack external find -t compiler` can be used as a substitute for
`spack compiler find`. More details on this transition are in
[the docs](https://spack.readthedocs.io/en/latest/getting_started.html#manual-compiler-configuration)
2. **Improved `spack find` UI for Environments**
If you're working in an enviroment, you likely care about:
* What are the roots
* Which ones are installed / not installed
* What's been added that still needs to be concretized
We've tweaked `spack find` in environments to show this information much more
clearly. Installation status is shown next to each root, so you can see what is
installed. Roots are also shown in bold in the list of installed packages. There is
also a new option for `spack find -r` / `--only-roots` that will only show env
roots, if you don't want to look at all the installed specs.
More details in #42334.
3. **Improved command-line string quoting**
We are making some breaking changes to how Spack parses specs on the CLI in order to
respect shell quoting instead of trying to fight it. If you (sadly) had to write
something like this on the command line:
```
spack install zlib cflags=\"-O2 -g\"
```
That will now result in an error, but you can now write what you probably expected
to work in the first place:
```
spack install zlib cflags="-O2 -g"
```
Quoted can also now include special characters, so you can supply flags like:
```
spack intall zlib ldflags='-Wl,-rpath=$ORIGIN/_libs'
```
To reduce ambiguity in parsing, we now require that you *not* put spaces around `=`
and `==` when for flags or variants. This would not have broken before but will now
result in an error:
```
spack install zlib cflags = "-O2 -g"
```
More details and discussion in #30634.
4. **Revert default `spack install` behavior to `--reuse`**
We changed the default concretizer behavior from `--reuse` to `--reuse-deps` in
#30990 (in `v0.20`), which meant that *every* `spack install` invocation would
attempt to build a new version of the requested package / any environment roots.
While this is a common ask for *upgrading* and for *developer* workflows, we don't
think it should be the default for a package manager.
We are going to try to stick to this policy:
1. Prioritize reuse and build as little as possible by default.
2. Only upgrade or install duplicates if they are explicitly asked for, or if there
is a known security issue that necessitates an upgrade.
With the install command you now have three options:
* `--reuse` (default): reuse as many existing installations as possible.
* `--reuse-deps` / `--fresh-roots`: upgrade (freshen) roots but reuse dependencies if possible.
* `--fresh`: install fresh versions of requested packages (roots) and their dependencies.
We've also introduced `--fresh-roots` as an alias for `--reuse-deps` to make it more clear
that it may give you fresh versions. More details in #41302 and #43988.
5. **More control over reused specs**
You can now control which packages to reuse and how. There is a new
`concretizer:reuse` config option, which accepts the following properties:
- `roots`: `true` to reuse roots, `false` to reuse just dependencies
- `exclude`: list of constraints used to select which specs *not* to reuse
- `include`: list of constraints used to select which specs *to* reuse
- `from`: list of sources for reused specs (some combination of `local`,
`buildcache`, or `external`)
For example, to reuse only specs compiled with GCC, you could write:
```yaml
concretizer:
reuse:
roots: true
include:
- "%gcc"
```
Or, if `openmpi` must be used from externals, and it must be the only external used:
```yaml
concretizer:
reuse:
roots: true
from:
- type: local
exclude: ["openmpi"]
- type: buildcache
exclude: ["openmpi"]
- type: external
include: ["openmpi"]
```
6. **New `redistribute()` directive**
Some packages can't be redistributed in source or binary form. We need an explicit
way to say that in a package.
Now there is a `redistribute()` directive so that package authors can write:
```python
class MyPackage(Package):
redistribute(source=False, binary=False)
```
Like other directives, this works with `when=`:
```python
class MyPackage(Package):
# 12.0 and higher are proprietary
redistribute(source=False, binary=False, when="@12.0:")
# can't redistribute when we depend on some proprietary dependency
redistribute(source=False, binary=False, when="^proprietary-dependency")
```
More in #20185.
7. **New `conflict:` and `prefer:` syntax for package preferences**
Previously, you could express conflicts and preferences in `packages.yaml` through
some contortions with `require:`:
```yaml
packages:
zlib-ng:
require:
- one_of: ["%clang", "@:"] # conflict on %clang
- any_of: ["+shared", "@:"] # strong preference for +shared
```
You can now use `require:` and `prefer:` for a much more readable configuration:
```yaml
packages:
zlib-ng:
conflict:
- "%clang"
prefer:
- "+shared"
```
See [the documentation](https://spack.readthedocs.io/en/latest/packages_yaml.html#conflicts-and-strong-preferences)
and #41832 for more details.
8. **`include_concrete` in environments**
You may want to build on the *concrete* contents of another environment without
changing that environment. You can now include the concrete specs from another
environment's `spack.lock` with `include_concrete`:
```yaml
spack:
specs: []
concretizer:
unify: true
include_concrete:
- /path/to/environment1
- /path/to/environment2
```
Now, when *this* environment is concretized, it will bring in the already concrete
specs from `environment1` and `environment2`, and build on top of them without
changing them. This is useful if you have phased deployments, where old deployments
should not be modified but you want to use as many of them as possible. More details
in #33768.
9. **`python-venv` isolation**
Spack has unique requirements for Python because it:
1. installs every package in its own independent directory, and
2. allows users to register *external* python installations.
External installations may contain their own installed packages that can interfere
with Spack installations, and some distributions (Debian and Ubuntu) even change the
`sysconfig` in ways that alter the installation layout of installed Python packages
(e.g., with the addition of a `/local` prefix on Debian or Ubuntu). To isolate Spack
from these and other issues, we now insert a small `python-venv` package in between
`python` and packages that need to install Python code. This isolates Spack's build
environment, isolates Spack from any issues with an external python, and resolves a
large number of issues we've had with Python installations.
See #40773 for further details.
## New commands, options, and directives
* Allow packages to be pushed to build cache after install from source (#42423)
* `spack develop`: stage build artifacts in same root as non-dev builds #41373
* Don't delete `spack develop` build artifacts after install (#43424)
* `spack find`: add options for local/upstream only (#42999)
* `spack logs`: print log files for packages (either partially built or installed) (#42202)
* `patch`: support reversing patches (#43040)
* `develop`: Add -b/--build-directory option to set build_directory package attribute (#39606)
* `spack list`: add `--namesapce` / `--repo` option (#41948)
* directives: add `checked_by` field to `license()`, add some license checks
* `spack gc`: add options for environments and build dependencies (#41731)
* Add `--create` to `spack env activate` (#40896)
## Performance improvements
* environment.py: fix excessive re-reads (#43746)
* ruamel yaml: fix quadratic complexity bug (#43745)
* Refactor to improve `spec format` speed (#43712)
* Do not acquire a write lock on the env post install if no views (#43505)
* asp.py: fewer calls to `spec.copy()` (#43715)
* spec.py: early return in `__str__`
* avoid `jinja2` import at startup unless needed (#43237)
## Other new features of note
* `archspec`: update to `v0.2.4`: support for Windows, bugfixes for `neoverse-v1` and
`neoverse-v2` detection.
* `spack config get`/`blame`: with no args, show entire config
* `spack env create <env>`: dir if dir-like (#44024)
* ASP-based solver: update os compatibility for macOS (#43862)
* Add handling of custom ssl certs in urllib ops (#42953)
* Add ability to rename environments (#43296)
* Add config option and compiler support to reuse across OS's (#42693)
* Support for prereleases (#43140)
* Only reuse externals when configured (#41707)
* Environments: Add support for including views (#42250)
## Binary caches
* Build cache: make signed/unsigned a mirror property (#41507)
* tools stack
## Removals, deprecations, and syntax changes
* remove `dpcpp` compiler and package (#43418)
* spack load: remove --only argument (#42120)
## Notable Bugfixes
* repo.py: drop deleted packages from provider cache (#43779)
* Allow `+` in module file names (#41999)
* `cmd/python`: use runpy to allow multiprocessing in scripts (#41789)
* Show extension commands with spack -h (#41726)
* Support environment variable expansion inside module projections (#42917)
* Alert user to failed concretizations (#42655)
* shell: fix zsh color formatting for PS1 in environments (#39497)
* spack mirror create --all: include patches (#41579)
## Spack community stats
* 7,994 total packages; 525 since `v0.21.0`
* 178 new Python packages, 5 new R packages
* 358 people contributed to this release
* 344 committers to packages
* 45 committers to core
# v0.21.2 (2024-03-01)
## Bugfixes
- Containerize: accommodate nested or pre-existing spack-env paths (#41558)
- Fix setup-env script, when going back and forth between instances (#40924)
- Fix using fully-qualified namespaces from root specs (#41957)
- Fix a bug when a required provider is requested for multiple virtuals (#42088)
- OCI buildcaches:
- only push in parallel when forking (#42143)
- use pickleable errors (#42160)
- Fix using sticky variants in externals (#42253)
- Fix a rare issue with conditional requirements and multi-valued variants (#42566)
## Package updates
- rust: add v1.75, rework a few variants (#41161,#41903)
- py-transformers: add v4.35.2 (#41266)
- mgard: fix OpenMP on AppleClang (#42933)
# v0.21.1 (2024-01-11)
## New features
- Add support for reading buildcaches created by Spack v0.22 (#41773)
## Bugfixes
- spack graph: fix coloring with environments (#41240)
- spack info: sort variants in --variants-by-name (#41389)
- Spec.format: error on old style format strings (#41934)
- ASP-based solver:
- fix infinite recursion when computing concretization errors (#41061)
- don't error for type mismatch on preferences (#41138)
- don't emit spurious debug output (#41218)
- Improve the error message for deprecated preferences (#41075)
- Fix MSVC preview version breaking clingo build on Windows (#41185)
- Fix multi-word aliases (#41126)
- Add a warning for unconfigured compiler (#41213)
- environment: fix an issue with deconcretization/reconcretization of specs (#41294)
- buildcache: don't error if a patch is missing, when installing from binaries (#41986)
- Multiple improvements to unit-tests (#41215,#41369,#41495,#41359,#41361,#41345,#41342,#41308,#41226)
## Package updates
- root: add a webgui patch to address security issue (#41404)
- BerkeleyGW: update source urls (#38218)
# v0.21.0 (2023-11-11)
`v0.21.0` is a major feature release.

View File

@@ -88,7 +88,7 @@ Resources:
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions):
for Q&A and discussions. Note the pinned discussions for announcements.
* **X**: [@spackpm](https://twitter.com/spackpm). Be sure to
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack):
only for announcements. Please use other venues for discussions.

View File

@@ -144,5 +144,3 @@ switch($SpackSubCommand)
"unload" {Invoke-SpackLoad}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}
exit $LASTEXITCODE

View File

@@ -15,7 +15,7 @@ concretizer:
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization. If `dependencies`, we'll only reuse dependencies but
# give you a fresh concretization for your root specs.
reuse: true
reuse: dependencies
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets

View File

@@ -19,6 +19,7 @@ packages:
- apple-clang
- clang
- gcc
- intel
providers:
elf: [libelf]
fuse: [macfuse]

View File

@@ -1,3 +0,0 @@
packages:
iconv:
require: [libiconv]

View File

@@ -15,10 +15,9 @@
# -------------------------------------------------------------------------
packages:
all:
compiler: [gcc, clang, oneapi, xl, nag, fj, aocc]
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
providers:
awk: [gawk]
armci: [armcimpi]
blas: [openblas, amdblis]
D: [ldc]
daal: [intel-oneapi-daal]
@@ -36,11 +35,11 @@ packages:
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libc: [glibc, musl]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx]
libglx: [mesa+glx, mesa18+glx]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb]

View File

@@ -1,12 +0,0 @@
{% extends "!layout.html" %}
{%- block extrahead %}
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-S0PQ7WV75K"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-S0PQ7WV75K');
</script>
{% endblock %}

View File

@@ -865,7 +865,7 @@ There are several different ways to use Spack packages once you have
installed them. As you've seen, spack packages are installed into long
paths with hashes, and you need a way to get them into your path. The
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
described in this section.
described in the next section.
Some more advanced ways to use Spack packages include:
@@ -959,86 +959,7 @@ use ``spack find --loaded``.
You can also use ``spack load --list`` to get the same output, but it
does not have the full set of query options that ``spack find`` offers.
We'll learn more about Spack's spec syntax in :ref:`a later section <sec-specs>`.
.. _extensions:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Python packages and virtual environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack can install a large number of Python packages. Their names are
typically prefixed with ``py-``. Installing and using them is no
different from any other package:
.. code-block:: console
$ spack install py-numpy
$ spack load py-numpy
$ python3
>>> import numpy
The ``spack load`` command sets the ``PATH`` variable so that the right Python
executable is used, and makes sure that ``numpy`` and its dependencies can be
located in the ``PYTHONPATH``.
Spack is different from other Python package managers in that it installs
every package into its *own* prefix. This is in contrast to ``pip``, which
installs all packages into the same prefix, be it in a virtual environment
or not.
For many users, **virtual environments** are more convenient than repeated
``spack load`` commands, particularly when working with multiple Python
packages. Fortunately Spack supports environments itself, which together
with a view are no different from Python virtual environments.
The recommended way of working with Python extensions such as ``py-numpy``
is through :ref:`Environments <environments>`. The following example creates
a Spack environment with ``numpy`` in the current working directory. It also
puts a filesystem view in ``./view``, which is a more traditional combined
prefix for all packages in the environment.
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
Now you can activate the environment and start using the packages:
.. code-block:: console
$ spack env activate .
$ python3
>>> import numpy
The environment view is also a virtual environment, which is useful if you are
sharing the environment with others who are unfamiliar with Spack. They can
either use the Python executable directly:
.. code-block:: console
$ ./view/bin/python3
>>> import numpy
or use the activation script:
.. code-block:: console
$ source ./view/bin/activate
$ python3
>>> import numpy
In general, there should not be much difference between ``spack env activate``
and using the virtual environment. The main advantage of ``spack env activate``
is that it knows about more packages than just Python packages, and it may set
additional runtime variables that are not covered by the virtual environment
activation script.
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
We'll learn more about Spack's spec syntax in the next section.
.. _sec-specs:
@@ -1784,6 +1705,165 @@ check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
.. _extensions:
---------------------------
Extensions & Python support
---------------------------
Spack's installation model assumes that each package will live in its
own install prefix. However, certain packages are typically installed
*within* the directory hierarchy of other packages. For example,
`Python <https://www.python.org>`_ packages are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
In Spack, installation prefixes are immutable, so this type of installation
is not directly supported. However, it is possible to create views that
allow you to merge install prefixes of multiple packages into a single new prefix.
Views are a convenient way to get a more traditional filesystem structure.
Using *extensions*, you can ensure that Python packages always share the
same prefix in the view as Python itself. Suppose you have
Python installed like so:
.. code-block:: console
$ spack find python
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
python@2.7.8
.. _cmd-spack-extensions:
^^^^^^^^^^^^^^^^^^^^
``spack extensions``
^^^^^^^^^^^^^^^^^^^^
You can find extensions for your Python installation like this:
.. code-block:: console
$ spack extensions python
==> python@2.7.8%gcc@4.4.7 arch=linux-debian7-x86_64-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
py-biopython py-mako py-pmw py-rpy2 py-sympy
py-cython py-matplotlib py-pychecker py-scientificpython py-virtualenv
py-dateutil py-mpi4py py-pygments py-scikit-learn
py-epydoc py-mx py-pylint py-scipy
py-gnuplot py-nose py-pyparsing py-setuptools
py-h5py py-numpy py-pyqt py-shiboken
==> 12 installed:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-dateutil@2.4.0 py-nose@1.3.4 py-pyside@1.2.2
py-dateutil@2.4.0 py-numpy@1.9.1 py-pytz@2014.10
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
The extensions are a subset of what's returned by ``spack list``, and
they are packages like any other. They are installed into their own
prefixes, and you can see this with ``spack find --paths``:
.. code-block:: console
$ spack find --paths py-numpy
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-numpy@1.9.1 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/py-numpy@1.9.1-66733244
However, even though this package is installed, you cannot use it
directly when you run ``python``:
.. code-block:: console
$ spack load python
$ python
Python 2.7.8 (default, Feb 17 2015, 01:35:25)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-11)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named numpy
>>>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Extensions in Environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The recommended way of working with extensions such as ``py-numpy``
above is through :ref:`Environments <environments>`. For example,
the following creates an environment in the current working directory
with a filesystem view in the ``./view`` directory:
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
We recommend environments for two reasons. Firstly, environments
can be activated (requires :ref:`shell-support`):
.. code-block:: console
$ spack env activate .
which sets all the right environment variables such as ``PATH`` and
``PYTHONPATH``. This ensures that
.. code-block:: console
$ python
>>> import numpy
works. Secondly, even without shell support, the view ensures
that Python can locate its extensions:
.. code-block:: console
$ ./view/bin/python
>>> import numpy
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
^^^^^^^^^^^^^^^^^^^^
Using ``spack load``
^^^^^^^^^^^^^^^^^^^^
A more traditional way of using Spack and extensions is ``spack load``
(requires :ref:`shell-support`). This will add the extension to ``PYTHONPATH``
in your current shell, and Python itself will be available in the ``PATH``:
.. code-block:: console
$ spack load py-numpy
$ python
>>> import numpy
The loaded packages can be checked using ``spack find --loaded``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Apart from ``spack env activate`` and ``spack load``, you can load numpy
through your environment modules (using ``environment-modules`` or
``lmod``). This will also add the extension to the ``PYTHONPATH`` in
your current shell.
.. code-block:: console
$ module load <name of numpy module>
If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec.
-----------------------
Filesystem requirements
-----------------------

View File

@@ -220,40 +220,6 @@ section of the configuration:
.. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
-----------------------------------------
OCI / Docker V2 registries as build cache
-----------------------------------------

View File

@@ -21,86 +21,23 @@ is the following:
Reuse already installed packages
--------------------------------
The ``reuse`` attribute controls how aggressively Spack reuses binary packages during concretization. The
attribute can either be a single value, or an object for more complex configurations.
In the former case ("single value") it allows Spack to:
1. Reuse installed packages and buildcaches for all the specs to be concretized, when ``true``
2. Reuse installed packages and buildcaches only for the dependencies of the root specs, when ``dependencies``
3. Disregard reusing installed packages and buildcaches, when ``false``
In case a finer control over which specs are reused is needed, then the value of this attribute can be
an object, with the following keys:
1. ``roots``: if ``true`` root specs are reused, if ``false`` only dependencies of root specs are reused
2. ``from``: list of sources from which reused specs are taken
Each source in ``from`` is itself an object:
.. list-table:: Attributes for a source or reusable specs
:header-rows: 1
* - Attribute name
- Description
* - type (mandatory, string)
- Can be ``local``, ``buildcache``, or ``external``
* - include (optional, list of specs)
- If present, reusable specs must match at least one of the constraint in the list
* - exclude (optional, list of specs)
- If present, reusable specs must not match any of the constraint in the list.
For instance, the following configuration:
.. code-block:: yaml
concretizer:
reuse:
roots: true
from:
- type: local
include:
- "%gcc"
- "%clang"
tells the concretizer to reuse all specs compiled with either ``gcc`` or ``clang``, that are installed
in the local store. Any spec from remote buildcaches is disregarded.
To reduce the boilerplate in configuration files, default values for the ``include`` and
``exclude`` options can be pushed up one level:
.. code-block:: yaml
concretizer:
reuse:
roots: true
include:
- "%gcc"
from:
- type: local
- type: buildcache
- type: local
include:
- "foo %oneapi"
In the example above we reuse all specs compiled with ``gcc`` from the local store
and remote buildcaches, and we also reuse ``foo %oneapi``. Note that the last source of
specs override the default ``include`` attribute.
For one-off concretizations, the are command line arguments for each of the simple "single value"
configurations. This means a user can:
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
.. code-block:: console
% spack install --reuse <spec>
to enable reuse for a single installation, or:
to enable reuse for a single installation, and you can use:
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: dependencies`` is the default.
.. seealso::

View File

@@ -718,45 +718,23 @@ command-line tool, or C/C++/Fortran program with optional Python
modules? The former should be prepended with ``py-``, while the
latter should not.
""""""""""""""""""""""""""""""
``extends`` vs. ``depends_on``
""""""""""""""""""""""""""""""
""""""""""""""""""""""
extends vs. depends_on
""""""""""""""""""""""
This is very similar to the naming dilemma above, with a slight twist.
As mentioned in the :ref:`Packaging Guide <packaging_extensions>`,
``extends`` and ``depends_on`` are very similar, but ``extends`` ensures
that the extension and extendee share the same prefix in views.
This allows the user to import a Python module without
having to add that module to ``PYTHONPATH``.
Additionally, ``extends("python")`` adds a dependency on the package
``python-venv``. This improves isolation from the system, whether
it's during the build or at runtime: user and system site packages
cannot accidentally be used by any package that ``extends("python")``.
As a rule of thumb: if a package does not install any Python modules
of its own, and merely puts a Python script in the ``bin`` directory,
then there is no need for ``extends``. If the package installs modules
in the ``site-packages`` directory, it requires ``extends``.
"""""""""""""""""""""""""""""""""""""
Executing ``python`` during the build
"""""""""""""""""""""""""""""""""""""
Whenever you need to execute a Python command or pass the path of the
Python interpreter to the build system, it is best to use the global
variable ``python`` directly. For example:
.. code-block:: python
@run_before("install")
def recythonize(self):
python("setup.py", "clean") # use the `python` global
As mentioned in the previous section, ``extends("python")`` adds an
automatic dependency on ``python-venv``, which is a virtual environment
that guarantees build isolation. The ``python`` global always refers to
the correct Python interpreter, whether the package uses ``extends("python")``
or ``depends_on("python")``.
When deciding between ``extends`` and ``depends_on``, the best rule of
thumb is to check the installation prefix. If Python libraries are
installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you
should use ``extends``. If Python libraries are installed elsewhere
or the only files that get installed reside in ``<prefix>/bin``, then
don't use ``extends``.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack

View File

@@ -5,9 +5,9 @@
.. chain:
=============================================
Chaining Spack Installations (upstreams.yaml)
=============================================
============================
Chaining Spack Installations
============================
You can point your Spack installation to another installation to use any
packages that are installed there. To register the other Spack instance,

View File

@@ -150,7 +150,7 @@ this can expose you to attacks. Use at your own risk.
--------------------
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to an absolute file path.
filesytem path, or an environment variable that expands to a file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
@@ -160,9 +160,6 @@ in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
In all cases the expanded path must be absolute for Spack to use the certificates.
Certificates relative to an environment can be created by prepending the path variable
with the Spack configuration variable``$env``.
--------------------
``checksum``

View File

@@ -194,15 +194,15 @@ The OS that are currently supported are summarized in the table below:
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04
- ``ubuntu:20.04``
- ``spack/ubuntu-focal``
* - Ubuntu 22.04
- ``ubuntu:22.04``
- ``spack/ubuntu-jammy``
* - Ubuntu 24.04
- ``ubuntu:24.04``
- ``spack/ubuntu-noble``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
@@ -227,12 +227,12 @@ The OS that are currently supported are summarized in the table below:
* - Rocky Linux 9
- ``rockylinux:9``
- ``spack/rockylinux9``
* - Fedora Linux 39
- ``fedora:39``
- ``spack/fedora39``
* - Fedora Linux 40
- ``fedora:40``
- ``spack/fedora40``
* - Fedora Linux 37
- ``fedora:37``
- ``spack/fedora37``
* - Fedora Linux 38
- ``fedora:38``
- ``spack/fedora38``

View File

@@ -552,11 +552,11 @@ With either interpreter you can run a single command:
.. code-block:: console
$ spack python -c 'from spack.spec import Spec; Spec("python").concretized()'
...
$ spack python -c 'import distro; distro.linux_distribution()'
('Ubuntu', '18.04', 'Bionic Beaver')
$ spack python -i ipython -c 'from spack.spec import Spec; Spec("python").concretized()'
Out[1]: ...
$ spack python -i ipython -c 'import distro; distro.linux_distribution()'
Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
or a file:
@@ -1071,9 +1071,9 @@ Announcing a release
We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are
X, Slack, and the mailing list. Here are the steps:
Twitter, Slack, and the mailing list. Here are the steps:
#. Announce the release on X.
#. Announce the release on Twitter.
* Compose the tweet on the ``@spackpm`` account per the
``spack-twitter`` slack channel.

View File

@@ -142,8 +142,12 @@ user's prompt to begin with the environment name in brackets.
$ spack env activate -p myenv
[myenv] $ ...
The ``activate`` command can also be used to create a new environment if it does not already
exist.
The ``activate`` command can also be used to create a new environment, if it is
not already defined, by adding the ``--create`` flag. Managed and anonymous
environments, anonymous environments are explained in the next section,
can both be created using the same flags that `spack env create` accepts.
If an environment already exists then spack will simply activate it and ignore the
create specific flags.
.. code-block:: console
@@ -172,36 +176,21 @@ environment will remove the view from the user environment.
Anonymous Environments
^^^^^^^^^^^^^^^^^^^^^^
Apart from managed environments, Spack also supports anonymous environments.
Anonymous environments can be placed in any directory of choice.
.. note::
When uninstalling packages, Spack asks the user to confirm the removal of packages
that are still used in a managed environment. This is not the case for anonymous
environments.
To create an anonymous environment, use one of the following commands:
Any directory can be treated as an environment if it contains a file
``spack.yaml``. To load an anonymous environment, use:
.. code-block:: console
$ spack env create --dir my_env
$ spack env create ./my_env
$ spack env activate -d /path/to/directory
As a shorthand, you can also create an anonymous environment upon activation if it does not
already exist:
Anonymous specs can be created in place using the command:
.. code-block:: console
$ spack env activate --create ./my_env
For convenience, Spack can also place an anonymous environment in a temporary directory for you:
.. code-block:: console
$ spack env activate --temp
$ spack env create -d .
In this case Spack simply creates a ``spack.yaml`` file in the requested
directory.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Environment Sensitive Commands
@@ -460,125 +449,6 @@ Sourcing that file in Bash will make the environment available to the
user; and can be included in ``.bashrc`` files, etc. The ``loads``
file may also be copied out of the environment, renamed, etc.
.. _environment_include_concrete:
------------------------------
Included Concrete Environments
------------------------------
Spack environments can create an environment based off of information in already
established environments. You can think of it as a combination of existing
environments. It will gather information from the existing environment's
``spack.lock`` and use that during the creation of this included concrete
environment. When an included concrete environment is created it will generate
a ``spack.lock`` file for the newly created environment.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating included environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To create a combined concrete environment, you must have at least one existing
concrete environment. You will use the command ``spack env create`` with the
argument ``--include-concrete`` followed by the name or path of the environment
you'd like to include. Here is an example of how to create a combined environment
from the command line.
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
You can also include an environment directly in the ``spack.yaml`` file. It
involves adding the ``include_concrete`` heading in the yaml followed by the
absolute path to the independent environments.
.. code-block:: yaml
spack:
specs: []
concretizer:
unify: true
include_concrete:
- /absolute/path/to/environment1
- /absolute/path/to/environment2
Once the ``spack.yaml`` has been updated you must concretize the environment to
get the concrete specs from the included environments.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating an included environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If changes were made to the base environment and you want that reflected in the
included environment you will need to reconcretize both the base environment and the
included environment for the change to be implemented. For example:
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
$ spack -e myenv find
==> In environment myenv
==> Root specs
python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
Here we see that ``included_env`` has access to the python package through
the ``myenv`` environment. But if we were to add another spec to ``myenv``,
``included_env`` will not be able to access the new information.
.. code-block:: console
$ spack -e myenv add perl
$ spack -e myenv concretize
$ spack -e myenv find
==> In environment myenv
==> Root specs
perl python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
It isn't until you run the ``spack concretize`` command that the combined
environment will get the updated information from the reconcretized base environmennt.
.. code-block:: console
$ spack -e included_env concretize
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
perl python
==> 0 installed packages
.. _environment-configuration:
------------------------
@@ -930,7 +800,6 @@ For example, the following environment has three root packages:
This allows for a much-needed reduction in redundancy between packages
and constraints.
----------------
Filesystem Views
----------------
@@ -1164,7 +1033,7 @@ other targets to depend on the environment installation.
A typical workflow is as follows:
.. code-block:: console
.. code:: console
spack env create -d .
spack -e . add perl
@@ -1257,7 +1126,7 @@ its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently:
.. code-block:: console
.. code:: console
$ spack env depfile -o Makefile
@@ -1279,7 +1148,7 @@ This can be accomplished through the generated ``[<prefix>/]SPACK_PACKAGE_IDS``
variable. Assuming we have an active and concrete environment, we generate the
associated ``Makefile`` with a prefix ``example``:
.. code-block:: console
.. code:: console
$ spack env depfile -o env.mk --make-prefix example

View File

@@ -478,13 +478,6 @@ prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
Note that the format for the ``paths`` field in the
``extra_attributes`` section is different than in the ``compilers``
config. For compilers configured as external packages, the section is
named ``compilers`` and the dictionary maps language names (``c``,
``cxx``, ``fortran``) to paths, rather than using the names ``cc``,
``fc``, and ``f77``.
.. code-block:: yaml
packages:
@@ -500,10 +493,11 @@ named ``compilers`` and the dictionary maps language names (``c``,
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/clang-with-suffix
paths:
cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fortran: /usr/bin/gfortran
fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
@@ -1578,8 +1572,6 @@ Microsoft Visual Studio
"""""""""""""""""""""""
Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack.
Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your
visual studio installation as it is required to build many packages from source.
We require several specific components to be included in the Visual Studio installation.
One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools,"
@@ -1587,7 +1579,6 @@ depending on installation type (Professional, Build Tools, etc.) The other requ
"C++ CMake tools for Windows," which can be selected from among the optional packages.
This provides CMake and Ninja for use during Spack configuration.
If you already have Visual Studio installed, you can make sure these components are installed by
rerunning the installer. Next to your installation, select "Modify" and look at the
"Installation details" pane on the right.

View File

@@ -6435,12 +6435,9 @@ the ``paths`` attribute:
echo "Target: x86_64-pc-linux-gnu"
echo "Thread model: posix"
echo "InstalledDir: /usr/bin"
platforms: ["linux", "darwin"]
results:
- spec: 'llvm@3.9.1 +clang~lld~lldb'
If the ``platforms`` attribute is present, tests are run only if the current host
matches one of the listed platforms.
Each test is performed by first creating a temporary directory structure as
specified in the corresponding ``layout`` and by then running
package detection and checking that the outcome matches the expected
@@ -6474,10 +6471,6 @@ package detection and checking that the outcome matches the expected
- A spec that is expected from detection
- Any valid spec
- Yes
* - ``results:[0]:extra_attributes``
- Extra attributes expected on the associated Spec
- Nested dictionary with string as keys, and regular expressions as leaf values
- No
"""""""""""""""""""""""""""""""
Reuse tests from other packages

View File

@@ -2,12 +2,12 @@ sphinx==7.2.6
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.1
python-levenshtein==0.25.0
docutils==0.20.1
pygments==2.17.2
urllib3==2.2.1
pytest==8.2.0
pytest==8.1.1
isort==5.13.2
black==24.4.2
black==24.3.0
flake8==7.0.0
mypy==1.10.0
mypy==1.9.0

250
lib/spack/env/cc vendored
View File

@@ -47,8 +47,7 @@ SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG
SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS
SPACK_MANAGED_DIRS"
SPACK_SYSTEM_DIRS"
# Optional parameters that aren't required to be set
@@ -174,6 +173,22 @@ preextend() {
unset IFS
}
# system_dir PATH
# test whether a path is a system directory
system_dir() {
IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
path="$1"
for sd in $SPACK_SYSTEM_DIRS; do
if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
# success if path starts with a system prefix
unset IFS
return 0
fi
done
unset IFS
return 1 # fail if path starts no system prefix
}
# Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
@@ -186,18 +201,6 @@ for param in $params; do
fi
done
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
}
"
# Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
@@ -417,12 +420,11 @@ input_command="$*"
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
case "$1" in
@@ -430,25 +432,21 @@ parse_Wl() {
arg="${1#-rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
--rpath=*)
arg="${1#--rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
-rpath|--rpath)
wl_expect_rpath=yes
@@ -475,20 +473,12 @@ categorize_arguments() {
return_other_args_list=""
return_isystem_was_used=""
return_isystem_spack_store_include_dirs_list=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_spack_store_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_spack_store_lib_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_spack_store_rpath_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
@@ -556,32 +546,29 @@ categorize_arguments() {
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_isystem_spack_store_include_dirs_list "$arg" ;;
1) append return_isystem_include_dirs_list "$arg" ;;
2) append return_isystem_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_include_dirs_list "$arg" ;;
1) append return_include_dirs_list "$arg" ;;
2) append return_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_lib_dirs_list "$arg" ;;
1) append return_lib_dirs_list "$arg" ;;
2) append return_system_lib_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -614,32 +601,29 @@ categorize_arguments() {
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
@@ -677,25 +661,16 @@ categorize_arguments() {
}
categorize_arguments "$@"
spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
include_dirs_list="$return_include_dirs_list"
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
include_dirs_list="$return_include_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -755,7 +730,7 @@ esac
# Linker flags
case "$mode" in
ccld)
ld|ccld)
extend spack_flags_list SPACK_LDFLAGS
;;
esac
@@ -763,25 +738,16 @@ esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
@@ -801,13 +767,11 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
# Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS
fi
fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS
fi
@@ -834,50 +798,38 @@ case "$mode" in
;;
esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
#
# Finally, reassemble the command line.
#
args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system)
# Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_spack_store_include_dirs_list -I
extend args_list spack_store_include_dirs_list -I
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_include_dirs_list "-I"
extend args_list include_dirs_list "-I"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
fi
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
# Library search paths
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
@@ -887,12 +839,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
extend args_list spack_store_rpath_dirs_list "$rpath"
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
@@ -900,12 +848,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.5-dev (commit cbb1fd5eb397a70d466e5160b393b87b0dbcc78f)
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
astunparse
----------------

View File

@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
setattr(t, a, copy.deepcopy(getattr(self, a), memo))
setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -1,3 +1,3 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.4"
__version__ = "0.2.3"

View File

@@ -5,10 +5,9 @@
"""The "cpu" package permits to query and compare different
CPU microarchitectures.
"""
from .detect import brand_string, host
from .detect import host
from .microarchitecture import (
TARGETS,
InvalidCompilerVersion,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
@@ -16,12 +15,10 @@
)
__all__ = [
"brand_string",
"host",
"TARGETS",
"InvalidCompilerVersion",
"Microarchitecture",
"UnsupportedMicroarchitecture",
"TARGETS",
"generic_microarchitecture",
"host",
"version_components",
]

View File

@@ -47,11 +47,7 @@ def decorator(factory):
def partial_uarch(
name: str = "",
vendor: str = "",
features: Optional[Set[str]] = None,
generation: int = 0,
cpu_part: str = "",
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture(
@@ -61,7 +57,6 @@ def partial_uarch(
features=features or set(),
compilers={},
generation=generation,
cpu_part=cpu_part,
)
@@ -95,7 +90,6 @@ def proc_cpuinfo() -> Microarchitecture:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
cpu_part=data.get("CPU part", ""),
)
if architecture in (PPC64LE, PPC64):
@@ -161,31 +155,6 @@ def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
def brand_string(self) -> Optional[str]:
"""Returns the brand string, if available."""
if self.highest_extension_support < 0x80000004:
return None
r1 = self.cpuid.registers_for(eax=0x80000002, ecx=0)
r2 = self.cpuid.registers_for(eax=0x80000003, ecx=0)
r3 = self.cpuid.registers_for(eax=0x80000004, ecx=0)
result = struct.pack(
"IIIIIIIIIIII",
r1.eax,
r1.ebx,
r1.ecx,
r1.edx,
r2.eax,
r2.ebx,
r2.ecx,
r2.edx,
r3.eax,
r3.ebx,
r3.ecx,
r3.edx,
).decode("utf-8")
return result.strip("\x00")
@detection(operating_system="Windows")
def cpuid_info():
@@ -205,8 +174,8 @@ def _check_output(args, env):
WINDOWS_MAPPING = {
"AMD64": X86_64,
"ARM64": AARCH64,
"AMD64": "x86_64",
"ARM64": "aarch64",
}
@@ -351,10 +320,6 @@ def sorting_fn(item):
generic_candidates = [c for c in candidates if c.vendor == "generic"]
best_generic = max(generic_candidates, key=sorting_fn)
# Relevant for AArch64. Filter on "cpu_part" if we have any match
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
# Filter the candidates to be descendant of the best generic candidate.
# This is to avoid that the lack of a niche feature that can be disabled
# from e.g. BIOS prevents detection of a reasonably performant architecture
@@ -444,16 +409,3 @@ def compatibility_check_for_riscv64(info, target):
return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic"
)
def brand_string() -> Optional[str]:
"""Returns the brand string of the host, if detected, or None."""
if platform.system() == "Darwin":
return _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if host().family == X86_64:
return CpuidInfoCollector().brand_string()
return None

View File

@@ -2,7 +2,9 @@
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Types and functions to manage information on CPU microarchitectures."""
"""Types and functions to manage information
on CPU microarchitectures.
"""
import functools
import platform
import re
@@ -63,24 +65,21 @@ class Microarchitecture:
passed in as argument above.
* versions: versions that support this micro-architecture.
generation (int): generation of the micro-architecture, if relevant.
cpu_part (str): cpu part of the architecture, if relevant.
generation (int): generation of the micro-architecture, if
relevant.
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
# pylint: disable=too-many-arguments
#: Aliases for micro-architecture's features
feature_aliases = FEATURE_ALIASES
def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""):
def __init__(self, name, parents, vendor, features, compilers, generation=0):
self.name = name
self.parents = parents
self.vendor = vendor
self.features = features
self.compilers = compilers
# Only relevant for PowerPC
self.generation = generation
# Only relevant for AArch64
self.cpu_part = cpu_part
# Cache the ancestor computation
self._ancestors = None
@@ -112,7 +111,6 @@ def __eq__(self, other):
and self.parents == other.parents # avoid ancestors here
and self.compilers == other.compilers
and self.generation == other.generation
and self.cpu_part == other.cpu_part
)
@coerce_target_names
@@ -145,8 +143,7 @@ def __repr__(self):
cls_name = self.__class__.__name__
fmt = (
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
"{0.features!r}, {0.compilers!r}, generation={0.generation!r}, "
"cpu_part={0.cpu_part!r})"
"{0.features!r}, {0.compilers!r}, {0.generation!r})"
)
return fmt.format(self)
@@ -193,7 +190,6 @@ def to_dict(self):
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
"cpupart": self.cpu_part,
}
@staticmethod
@@ -206,15 +202,12 @@ def from_dict(data) -> "Microarchitecture":
features=set(data["features"]),
compilers=data.get("compilers", {}),
generation=data.get("generation", 0),
cpu_part=data.get("cpupart", ""),
)
def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs
to be used to produce code optimized for this micro-architecture.
The version is expected to be a string of dot separated digits.
If there is no information on the compiler passed as argument the
function returns an empty string. If it is known that the compiler
version we want to use does not support this architecture the function
@@ -223,11 +216,6 @@ def optimization_flags(self, compiler, version):
Args:
compiler (str): name of the compiler to be used
version (str): version of the compiler to be used
Raises:
UnsupportedMicroarchitecture: if the requested compiler does not support
this micro-architecture.
ValueError: if the version doesn't match the expected format
"""
# If we don't have information on compiler at all return an empty string
if compiler not in self.family.compilers:
@@ -244,14 +232,6 @@ def optimization_flags(self, compiler, version):
msg = msg.format(compiler, best_target, best_target.family)
raise UnsupportedMicroarchitecture(msg)
# Check that the version matches the expected format
if not re.match(r"^(?:\d+\.)*\d+$", version):
msg = (
"invalid format for the compiler version argument. "
"Only dot separated digits are allowed."
)
raise InvalidCompilerVersion(msg)
# If we have information on this compiler we need to check the
# version being used
compiler_info = self.compilers[compiler]
@@ -312,7 +292,7 @@ def generic_microarchitecture(name):
Args:
name (str): name of the micro-architecture
"""
return Microarchitecture(name, parents=[], vendor="generic", features=set(), compilers={})
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
def version_components(version):
@@ -365,11 +345,8 @@ def fill_target_from_dict(name, data, targets):
features = set(values["features"])
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
cpu_part = values.get("cpupart", "")
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation=generation, cpu_part=cpu_part
)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
@@ -390,15 +367,7 @@ def fill_target_from_dict(name, data, targets):
TARGETS = LazyDictionary(_known_microarchitectures)
class ArchspecError(Exception):
"""Base class for errors within archspec"""
class UnsupportedMicroarchitecture(ArchspecError, ValueError):
class UnsupportedMicroarchitecture(ValueError):
"""Raised if a compiler version does not support optimization for a given
micro-architecture.
"""
class InvalidCompilerVersion(ArchspecError, ValueError):
"""Raised when an invalid format is used for compiler versions in archspec."""

View File

@@ -2225,14 +2225,10 @@
],
"nvhpc": [
{
"versions": "21.11:23.8",
"versions": "21.11:",
"name": "zen3",
"flags": "-tp {name}",
"warnings": "zen4 is not fully supported by nvhpc versions < 23.9, falling back to zen3"
},
{
"versions": "23.9:",
"flags": "-tp {name}"
"warnings": "zen4 is not fully supported by nvhpc yet, falling back to zen3"
}
]
}
@@ -2715,8 +2711,7 @@
"flags": "-mcpu=thunderx2t99"
}
]
},
"cpupart": "0x0af"
}
},
"a64fx": {
"from": ["armv8.2a"],
@@ -2784,8 +2779,7 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
]
},
"cpupart": "0x001"
}
},
"cortex_a72": {
"from": ["aarch64"],
@@ -2822,8 +2816,7 @@
"flags" : "-mcpu=cortex-a72"
}
]
},
"cpupart": "0xd08"
}
},
"neoverse_n1": {
"from": ["cortex_a72", "armv8.2a"],
@@ -2844,7 +2837,8 @@
"asimdrdm",
"lrcpc",
"dcpop",
"asimddp"
"asimddp",
"ssbs"
],
"compilers" : {
"gcc": [
@@ -2908,8 +2902,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd0c"
}
},
"neoverse_v1": {
"from": ["neoverse_n1", "armv8.4a"],
@@ -2933,6 +2926,8 @@
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
@@ -2941,6 +2936,9 @@
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"paca",
"pacg",
"dcpodp",
"svei8mm",
"svebf16",
@@ -3008,7 +3006,7 @@
},
{
"versions": "11:",
"flags" : "-march=armv8.4-a+sve+fp16+bf16+crypto+i8mm+rng"
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
},
{
"versions": "12:",
@@ -3032,8 +3030,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd40"
}
},
"neoverse_v2": {
"from": ["neoverse_n1", "armv9.0a"],
@@ -3057,22 +3054,35 @@
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"sve2",
"sveaes",
"svepmull",
"svebitperm",
"svesha3",
"svesm4",
"flagm2",
"frint",
"svei8mm",
"svebf16",
"i8mm",
"bf16"
"bf16",
"dgh",
"bti"
],
"compilers" : {
"gcc": [
@@ -3097,19 +3107,15 @@
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
},
{
"versions": "10.0:11.3.99",
"versions": "10.0:11.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
},
{
"versions": "11.4:11.99",
"flags" : "-mcpu=neoverse-v2"
},
{
"versions": "12.0:12.2.99",
"versions": "12.0:12.99",
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
},
{
"versions": "12.3:",
"versions": "13.0:",
"flags" : "-mcpu=neoverse-v2"
}
],
@@ -3144,112 +3150,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd4f"
},
"neoverse_n2": {
"from": ["neoverse_n1", "armv9.0a"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"uscat",
"ilrcpc",
"flagm",
"sb",
"dcpodp",
"sve2",
"flagm2",
"frint",
"svei8mm",
"svebf16",
"i8mm",
"bf16"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:5.99",
"flags": "-march=armv8-a"
},
{
"versions": "6:6.99",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7.0:7.99",
"flags" : "-march=armv8.2-a -mtune=cortex-a72"
},
{
"versions": "8.0:8.99",
"flags" : "-march=armv8.4-a+sve -mtune=cortex-a72"
},
{
"versions": "9.0:9.99",
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
},
{
"versions": "10.0:10.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
},
{
"versions": "11.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"clang" : [
{
"versions": "9.0:10.99",
"flags" : "-march=armv8.5-a+sve"
},
{
"versions": "11.0:13.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16"
},
{
"versions": "14.0:15.99",
"flags" : "-march=armv9-a+i8mm+bf16"
},
{
"versions": "16.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"arm" : [
{
"versions": "23.04.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"nvhpc" : [
{
"versions": "23.3:",
"name": "neoverse-n1",
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd49"
}
},
"m1": {
"from": ["armv8.4a"],
@@ -3315,8 +3216,7 @@
"flags" : "-mcpu=apple-m1"
}
]
},
"cpupart": "0x022"
}
},
"m2": {
"from": ["m1", "armv8.5a"],
@@ -3394,8 +3294,7 @@
"flags" : "-mcpu=apple-m2"
}
]
},
"cpupart": "0x032"
}
},
"arm": {
"from": [],

View File

@@ -52,9 +52,6 @@
}
}
}
},
"cpupart": {
"type": "string"
}
},
"required": [
@@ -110,4 +107,4 @@
"additionalProperties": false
}
}
}
}

View File

@@ -1,13 +0,0 @@
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/comments.py b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
index 1badeda585..892c868af3 100644
--- a/lib/spack/external/_vendoring/ruamel/yaml/comments.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
- setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ setattr(t, a, copy.deepcopy(getattr(self, a), memo))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -98,10 +98,3 @@ def path_filter_caller(*args, **kwargs):
if _func:
return holder_func(_func)
return holder_func
def sanitize_win_longpath(path: str) -> str:
"""Strip Windows extended path prefix from strings
Returns sanitized string.
no-op if extended path prefix is not present"""
return path.lstrip("\\\\?\\")

View File

@@ -187,58 +187,26 @@ def polite_filename(filename: str) -> str:
return _polite_antipattern().sub("_", filename)
def getuid() -> Union[str, int]:
"""Returns os getuid on non Windows
On Windows returns 0 for admin users, login string otherwise
This is in line with behavior from get_owner_uid which
always returns the login string on Windows
"""
def getuid():
if sys.platform == "win32":
import ctypes
# If not admin, use the string name of the login as a unique ID
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
return os.getlogin()
return 1
return 0
else:
return os.getuid()
def _win_rename(src, dst):
# os.replace will still fail if on Windows (but not POSIX) if the dst
# is a symlink to a directory (all other cases have parity Windows <-> Posix)
if os.path.islink(dst) and os.path.isdir(os.path.realpath(dst)):
if os.path.samefile(src, dst):
# src and dst are the same
# do nothing and exit early
return
# If dst exists and is a symlink to a directory
# we need to remove dst and then perform rename/replace
# this is safe to do as there's no chance src == dst now
os.remove(dst)
os.replace(src, dst)
@system_path_filter
def msdos_escape_parens(path):
"""MS-DOS interprets parens as grouping parameters even in a quoted string"""
if sys.platform == "win32":
return path.replace("(", "^(").replace(")", "^)")
else:
return path
@system_path_filter
def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists
# os.replace is the same as os.rename on POSIX and is MoveFileExW w/
# the MOVEFILE_REPLACE_EXISTING flag on Windows
# Windows invocation is abstracted behind additonal logic handling
# remaining cases of divergent behavior accross platforms
if sys.platform == "win32":
_win_rename(src, dst)
else:
os.replace(src, dst)
# Windows path existence checks will sometimes fail on junctions/links/symlinks
# so check for that case
if os.path.exists(dst) or islink(dst):
os.remove(dst)
os.rename(src, dst)
@system_path_filter
@@ -568,13 +536,7 @@ def exploding_archive_handler(tarball_container, stage):
@system_path_filter(arg_slice=slice(1))
def get_owner_uid(path, err_msg=None) -> Union[str, int]:
"""Returns owner UID of path destination
On non Windows this is the value of st_uid
On Windows this is the login string associated with the
owning user.
"""
def get_owner_uid(path, err_msg=None):
if not os.path.exists(path):
mkdirp(path, mode=stat.S_IRWXU)
@@ -843,7 +805,7 @@ def copy_tree(
if islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = readlink(s)
target = os.readlink(s)
if os.path.isabs(target):
def escaped_path(path):
@@ -1255,12 +1217,10 @@ def windows_sfn(path: os.PathLike):
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Method with null values returns size of short path name
sz = k32.GetShortPathNameW(path, None, 0)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * sz
TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ctypes.byref(ret_str), sz)
k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value
@@ -2450,10 +2410,9 @@ def add_library_dependent(self, *dest):
"""
for pth in dest:
if os.path.isfile(pth):
new_pth = pathlib.Path(pth).parent
self._additional_library_dependents.add(pathlib.Path(pth).parent)
else:
new_pth = pathlib.Path(pth)
self._additional_library_dependents.add(new_pth)
self._additional_library_dependents.add(pathlib.Path(pth))
@property
def rpaths(self):
@@ -2531,14 +2490,8 @@ def establish_link(self):
# for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib)
# install a symlink to each dependent library
# do not rpath for system libraries included in the dag
# we should not be modifying libraries managed by the Windows system
# as this will negatively impact linker behavior and can result in permission
# errors if those system libs are not modifiable by Spack
if "windows-system" not in getattr(self.pkg, "tags", []):
for library, lib_dir in itertools.product(self.rpaths, self.library_dependents):
self._link(library, lib_dir)
for library, lib_dir in itertools.product(self.rpaths, self.library_dependents):
self._link(library, lib_dir)
@system_path_filter

View File

@@ -11,7 +11,7 @@
from llnl.util import lang, tty
from ..path import sanitize_win_longpath, system_path_filter
from ..path import system_path_filter
if sys.platform == "win32":
from win32file import CreateHardLink
@@ -247,9 +247,9 @@ def _windows_create_junction(source: str, link: str):
out, err = proc.communicate()
tty.debug(out.decode())
if proc.returncode != 0:
err_str = err.decode()
tty.error(err_str)
raise SymlinkError("Make junction command returned a non-zero return code.", err_str)
err = err.decode()
tty.error(err)
raise SymlinkError("Make junction command returned a non-zero return code.", err)
def _windows_create_hard_link(path: str, link: str):
@@ -269,14 +269,14 @@ def _windows_create_hard_link(path: str, link: str):
CreateHardLink(link, path)
def readlink(path: str, *, dir_fd=None):
def readlink(path: str):
"""Spack utility to override of os.readlink method to work cross platform"""
if _windows_is_hardlink(path):
return _windows_read_hard_link(path)
elif _windows_is_junction(path):
return _windows_read_junction(path)
else:
return sanitize_win_longpath(os.readlink(path, dir_fd=dir_fd))
return os.readlink(path)
def _windows_read_hard_link(link: str) -> str:

View File

@@ -12,7 +12,7 @@
import traceback
from datetime import datetime
from sys import platform as _platform
from typing import Any, NoReturn
from typing import NoReturn
if _platform != "win32":
import fcntl
@@ -158,22 +158,21 @@ def get_timestamp(force=False):
return ""
def msg(message: Any, *args: Any, newline: bool = True) -> None:
def msg(message, *args, **kwargs):
if not msg_enabled():
return
if isinstance(message, Exception):
message = f"{message.__class__.__name__}: {message}"
else:
message = str(message)
message = "%s: %s" % (message.__class__.__name__, str(message))
newline = kwargs.get("newline", True)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(2)
nl = "\n" if newline else ""
cwrite(f"@*b{{{st_text}==>}} {get_timestamp()}{cescape(_output_filter(message))}{nl}")
if newline:
cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
for arg in args:
print(indent + _output_filter(str(arg)))

View File

@@ -237,6 +237,7 @@ def transpose():
def colified(
elts: List[Any],
cols: int = 0,
output: Optional[IO] = None,
indent: int = 0,
padding: int = 2,
tty: Optional[bool] = None,

View File

@@ -59,11 +59,9 @@
To output an @, use '@@'. To output a } inside braces, use '}}'.
"""
import os
import re
import sys
from contextlib import contextmanager
from typing import Optional
class ColorParseError(Exception):
@@ -97,34 +95,14 @@ def __init__(self, message):
} # white
# Regex to be used for color formatting
COLOR_RE = re.compile(r"@(?:(@)|(\.)|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)")
color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)"
# Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False}
def _color_when_value(when):
"""Raise a ValueError for an invalid color setting.
Valid values are 'always', 'never', and 'auto', or equivalently,
True, False, and None.
"""
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
return when
def _color_from_environ() -> Optional[bool]:
try:
return _color_when_value(os.environ.get("SPACK_COLOR", "auto"))
except ValueError:
return None
#: When `None` colorize when stdout is tty, when `True` or `False` always or never colorize resp.
_force_color = _color_from_environ()
# Force color; None: Only color if stdout is a tty
# True: Always colorize output, False: Never colorize output
_force_color = None
def try_enable_terminal_color_on_windows():
@@ -185,6 +163,19 @@ def _err_check(result, func, args):
debug("Unable to support color on Windows terminal")
def _color_when_value(when):
"""Raise a ValueError for an invalid color setting.
Valid values are 'always', 'never', and 'auto', or equivalently,
True, False, and None.
"""
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
return when
def get_color_when():
"""Return whether commands should print color or not."""
if _force_color is not None:
@@ -212,64 +203,77 @@ def color_when(value):
set_color_when(old_value)
def _escape(s: str, color: bool, enclose: bool, zsh: bool) -> str:
"""Returns a TTY escape sequence for a color"""
if color:
if zsh:
result = rf"\e[0;{s}m"
class match_to_ansi:
def __init__(self, color=True, enclose=False, zsh=False):
self.color = _color_when_value(color)
self.enclose = enclose
self.zsh = zsh
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.zsh:
result = rf"\e[0;{s}m"
else:
result = f"\033[{s}m"
if self.enclose:
result = rf"\[{result}\]"
return result
else:
result = f"\033[{s}m"
return ""
if enclose:
result = rf"\[{result}\]"
def __call__(self, match):
"""Convert a match object generated by ``color_re`` into an ansi
color code. This can be used as a handler in ``re.sub``.
"""
style, color, text = match.groups()
m = match.group(0)
return result
else:
return ""
if m == "@@":
return "@"
elif m == "@.":
return self.escape(0)
elif m == "@":
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError(
"Invalid color specifier: '%s' in '%s'" % (color, match.string)
)
string += ";" + str(colors[color])
colored_text = ""
if text:
colored_text = text + self.escape(0)
return self.escape(string) + colored_text
def colorize(
string: str, color: Optional[bool] = None, enclose: bool = False, zsh: bool = False
) -> str:
def colorize(string, **kwargs):
"""Replace all color expressions in a string with ANSI control codes.
Args:
string: The string to replace
string (str): The string to replace
Returns:
The filtered string
str: The filtered string
Keyword Arguments:
color: If False, output will be plain text without control codes, for output to
non-console devices (default: automatically choose color or not)
enclose: If True, enclose ansi color sequences with
color (bool): If False, output will be plain text without control
codes, for output to non-console devices.
enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width.
zsh: If True, use zsh ansi codes instead of bash ones (for variables like PS1)
zsh (bool): If True, use zsh ansi codes instead of bash ones (for variables like PS1)
"""
color = color if color is not None else get_color_when()
def match_to_ansi(match):
"""Convert a match object generated by ``COLOR_RE`` into an ansi
color code. This can be used as a handler in ``re.sub``.
"""
escaped_at, dot, style, color_code, text = match.groups()
if escaped_at:
return "@"
elif dot:
return _escape(0, color, enclose, zsh)
elif not (style or color_code):
raise ColorParseError(
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
)
ansi_code = _escape(f"{styles[style]};{colors.get(color_code, '')}", color, enclose, zsh)
if text:
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
else:
return ansi_code
return COLOR_RE.sub(match_to_ansi, string).replace("}}", "}")
color = _color_when_value(kwargs.get("color", get_color_when()))
zsh = kwargs.get("zsh", False)
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string, zsh)
string = string.replace("}}", "}")
return string
def clen(string):
@@ -301,7 +305,7 @@ def cprint(string, stream=None, color=None):
cwrite(string + "\n", stream, color)
def cescape(string: str) -> str:
def cescape(string):
"""Escapes special characters needed for color codes.
Replaces the following symbols with their equivalent literal forms:
@@ -317,7 +321,10 @@ def cescape(string: str) -> str:
Returns:
(str): the string with color codes escaped
"""
return string.replace("@", "@@").replace("}", "}}")
string = str(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
return string
class ColorStream:

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
__version__ = "0.22.2"
__version__ = "0.22.0.dev0"
spack_version = __version__

View File

@@ -254,8 +254,8 @@ def _search_duplicate_specs_in_externals(error_cls):
@config_packages
def _deprecated_preferences(error_cls):
"""Search package preferences deprecated in v0.21 (and slated for removal in v0.23)"""
# TODO (v0.23): remove this audit as the attributes will not be allowed in config
"""Search package preferences deprecated in v0.21 (and slated for removal in v0.22)"""
# TODO (v0.22): remove this audit as the attributes will not be allowed in config
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
@@ -779,7 +779,7 @@ def check_virtual_with_variants(spec, msg):
return
error = error_cls(
f"{pkg_name}: {msg}",
[f"remove variants from '{spec}' in depends_on directive in {filename}"],
f"remove variants from '{spec}' in depends_on directive in {filename}",
)
errors.append(error)
@@ -1046,7 +1046,7 @@ def _extracts_errors(triggers, summary):
group="externals",
tag="PKG-EXTERNALS",
description="Sanity checks for external software detection",
kwargs=("pkgs", "debug_log"),
kwargs=("pkgs",),
)
@@ -1069,7 +1069,7 @@ def packages_with_detection_tests():
@external_detection
def _test_detection_by_executable(pkgs, debug_log, error_cls):
def _test_detection_by_executable(pkgs, error_cls):
"""Test drive external detection for packages"""
import spack.detection
@@ -1095,7 +1095,6 @@ def _test_detection_by_executable(pkgs, debug_log, error_cls):
for idx, test_runner in enumerate(
spack.detection.detection_tests(pkg_name, spack.repo.PATH)
):
debug_log(f"[{__file__}]: running test {idx} for package {pkg_name}")
specs = test_runner.execute()
expected_specs = test_runner.expected_specs
@@ -1112,75 +1111,4 @@ def _test_detection_by_executable(pkgs, debug_log, error_cls):
details = [msg.format(s, idx) for s in sorted(not_expected)]
errors.append(error_cls(summary=summary, details=details))
matched_detection = []
for candidate in expected_specs:
try:
idx = specs.index(candidate)
matched_detection.append((candidate, specs[idx]))
except (AttributeError, ValueError):
pass
def _compare_extra_attribute(_expected, _detected, *, _spec):
result = []
# Check items are of the same type
if not isinstance(_detected, type(_expected)):
_summary = f'{pkg_name}: error when trying to detect "{_expected}"'
_details = [f"{_detected} was detected instead"]
return [error_cls(summary=_summary, details=_details)]
# If they are string expected is a regex
if isinstance(_expected, str):
try:
_regex = re.compile(_expected)
except re.error:
_summary = f'{pkg_name}: illegal regex in "{_spec}" extra attributes'
_details = [f"{_expected} is not a valid regex"]
return [error_cls(summary=_summary, details=_details)]
if not _regex.match(_detected):
_summary = (
f'{pkg_name}: error when trying to match "{_expected}" '
f"in extra attributes"
)
_details = [f"{_detected} does not match the regex"]
return [error_cls(summary=_summary, details=_details)]
if isinstance(_expected, dict):
_not_detected = set(_expected.keys()) - set(_detected.keys())
if _not_detected:
_summary = f"{pkg_name}: cannot detect some attributes for spec {_spec}"
_details = [
f'"{_expected}" was expected',
f'"{_detected}" was detected',
] + [f'attribute "{s}" was not detected' for s in sorted(_not_detected)]
result.append(error_cls(summary=_summary, details=_details))
_common = set(_expected.keys()) & set(_detected.keys())
for _key in _common:
result.extend(
_compare_extra_attribute(_expected[_key], _detected[_key], _spec=_spec)
)
return result
for expected, detected in matched_detection:
# We might not want to test all attributes, so avoid not_expected
not_detected = set(expected.extra_attributes) - set(detected.extra_attributes)
if not_detected:
summary = f"{pkg_name}: cannot detect some attributes for spec {expected}"
details = [
f'"{s}" was not detected [test_id={idx}]' for s in sorted(not_detected)
]
errors.append(error_cls(summary=summary, details=details))
common = set(expected.extra_attributes) & set(detected.extra_attributes)
for key in common:
errors.extend(
_compare_extra_attribute(
expected.extra_attributes[key],
detected.extra_attributes[key],
_spec=expected,
)
)
return errors

View File

@@ -23,12 +23,12 @@
import warnings
from contextlib import closing
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from urllib.error import HTTPError, URLError
import llnl.util.filesystem as fsys
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, visit_directory_tree
from llnl.util.symlink import readlink
import spack.caches
import spack.cmd
@@ -658,7 +658,7 @@ def get_buildfile_manifest(spec):
# 2. paths are used as strings.
for rel_path in visitor.symlinks:
abs_path = os.path.join(root, rel_path)
link = readlink(abs_path)
link = os.readlink(abs_path)
if os.path.isabs(link) and link.startswith(spack.store.STORE.layout.root):
data["link_to_relocate"].append(rel_path)
@@ -898,8 +898,9 @@ def url_read_method(url):
try:
_, _, spec_file = web_util.read_from_url(url)
contents = codecs.getreader("utf-8")(spec_file).read()
except web_util.SpackWebError as e:
tty.error(f"Error reading specfile: {url}: {e}")
except (URLError, web_util.SpackWebError) as url_err:
tty.error("Error reading specfile: {0}".format(url))
tty.error(url_err)
return contents
try:
@@ -2000,7 +2001,6 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
with spack.util.path.filter_padding():
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
extract_tarball(spec, download_result, force)
spec.package.windows_establish_runtime_linkage()
spack.hooks.post_install(spec, False)
spack.store.STORE.db.add(spec, spack.store.STORE.layout)
@@ -2039,17 +2039,21 @@ def try_direct_fetch(spec, mirrors=None):
try:
_, _, fs = web_util.read_from_url(buildcache_fetch_url_signed_json)
specfile_is_signed = True
except web_util.SpackWebError as e1:
except (URLError, web_util.SpackWebError, HTTPError) as url_err:
try:
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
except web_util.SpackWebError as e2:
except (URLError, web_util.SpackWebError, HTTPError) as url_err_x:
tty.debug(
f"Did not find {specfile_name} on {buildcache_fetch_url_signed_json}",
e1,
"Did not find {0} on {1}".format(
specfile_name, buildcache_fetch_url_signed_json
),
url_err,
level=2,
)
tty.debug(
f"Did not find {specfile_name} on {buildcache_fetch_url_json}", e2, level=2
"Did not find {0} on {1}".format(specfile_name, buildcache_fetch_url_json),
url_err_x,
level=2,
)
continue
specfile_contents = codecs.getreader("utf-8")(fs).read()
@@ -2134,9 +2138,6 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
for mirror in mirror_collection.values():
fetch_url = mirror.fetch_url
# TODO: oci:// does not support signing.
if fetch_url.startswith("oci://"):
continue
keys_url = url_util.join(
fetch_url, BUILD_CACHE_RELATIVE_PATH, BUILD_CACHE_KEYS_RELATIVE_PATH
)
@@ -2147,12 +2148,19 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
try:
_, _, json_file = web_util.read_from_url(keys_index)
json_index = sjson.load(codecs.getreader("utf-8")(json_file))
except web_util.SpackWebError as url_err:
except (URLError, web_util.SpackWebError) as url_err:
if web_util.url_exists(keys_index):
err_msg = [
"Unable to find public keys in {0},",
" caught exception attempting to read from {1}.",
]
tty.error(
f"Unable to find public keys in {url_util.format(fetch_url)},"
f" caught exception attempting to read from {url_util.format(keys_index)}."
"".join(err_msg).format(
url_util.format(fetch_url), url_util.format(keys_index)
)
)
tty.debug(url_err)
continue
@@ -2432,7 +2440,7 @@ def get_remote_hash(self):
url_index_hash = url_util.join(self.url, BUILD_CACHE_RELATIVE_PATH, "index.json.hash")
try:
response = self.urlopen(urllib.request.Request(url_index_hash, headers=self.headers))
except (TimeoutError, urllib.error.URLError):
except urllib.error.URLError:
return None
# Validate the hash
@@ -2454,7 +2462,7 @@ def conditional_fetch(self) -> FetchIndexResult:
try:
response = self.urlopen(urllib.request.Request(url_index, headers=self.headers))
except (TimeoutError, urllib.error.URLError) as e:
except urllib.error.URLError as e:
raise FetchIndexError("Could not fetch index from {}".format(url_index), e) from e
try:
@@ -2495,7 +2503,10 @@ def __init__(self, url, etag, urlopen=web_util.urlopen):
def conditional_fetch(self) -> FetchIndexResult:
# Just do a conditional fetch immediately
url = url_util.join(self.url, BUILD_CACHE_RELATIVE_PATH, "index.json")
headers = {"User-Agent": web_util.SPACK_USER_AGENT, "If-None-Match": f'"{self.etag}"'}
headers = {
"User-Agent": web_util.SPACK_USER_AGENT,
"If-None-Match": '"{}"'.format(self.etag),
}
try:
response = self.urlopen(urllib.request.Request(url, headers=headers))
@@ -2503,14 +2514,14 @@ def conditional_fetch(self) -> FetchIndexResult:
if e.getcode() == 304:
# Not modified; that means fresh.
return FetchIndexResult(etag=None, hash=None, data=None, fresh=True)
raise FetchIndexError(f"Could not fetch index {url}", e) from e
except (TimeoutError, urllib.error.URLError) as e:
raise FetchIndexError(f"Could not fetch index {url}", e) from e
raise FetchIndexError("Could not fetch index {}".format(url), e) from e
except urllib.error.URLError as e:
raise FetchIndexError("Could not fetch index {}".format(url), e) from e
try:
result = codecs.getreader("utf-8")(response).read()
except ValueError as e:
raise FetchIndexError(f"Remote index {url} is invalid", e) from e
raise FetchIndexError("Remote index {} is invalid".format(url), e) from e
headers = response.headers
etag_header_value = headers.get("Etag", None) or headers.get("etag", None)
@@ -2541,19 +2552,21 @@ def conditional_fetch(self) -> FetchIndexResult:
headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
)
)
except (TimeoutError, urllib.error.URLError) as e:
raise FetchIndexError(f"Could not fetch manifest from {url_manifest}", e) from e
except urllib.error.URLError as e:
raise FetchIndexError(
"Could not fetch manifest from {}".format(url_manifest), e
) from e
try:
manifest = json.loads(response.read())
except Exception as e:
raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e
raise FetchIndexError("Remote index {} is invalid".format(url_manifest), e) from e
# Get first blob hash, which should be the index.json
try:
index_digest = spack.oci.image.Digest.from_string(manifest["layers"][0]["digest"])
except Exception as e:
raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e
raise FetchIndexError("Remote index {} is invalid".format(url_manifest), e) from e
# Fresh?
if index_digest.digest == self.local_hash:

View File

@@ -5,13 +5,7 @@
"""Function and classes needed to bootstrap Spack itself."""
from .config import ensure_bootstrap_configuration, is_bootstrapping, store_path
from .core import (
all_core_root_specs,
ensure_clingo_importable_or_raise,
ensure_core_dependencies,
ensure_gpg_in_path_or_raise,
ensure_patchelf_in_path_or_raise,
)
from .core import all_core_root_specs, ensure_core_dependencies, ensure_patchelf_in_path_or_raise
from .environment import BootstrapEnvironment, ensure_environment_dependencies
from .status import status_message
@@ -19,8 +13,6 @@
"is_bootstrapping",
"ensure_bootstrap_configuration",
"ensure_core_dependencies",
"ensure_gpg_in_path_or_raise",
"ensure_clingo_importable_or_raise",
"ensure_patchelf_in_path_or_raise",
"all_core_root_specs",
"ensure_environment_dependencies",

View File

@@ -54,14 +54,10 @@ def _try_import_from_store(
installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
for candidate_spec in installed_specs:
# previously bootstrapped specs may not have a python-venv dependency.
if candidate_spec.dependencies("python-venv"):
python, *_ = candidate_spec.dependencies("python-venv")
else:
python, *_ = candidate_spec.dependencies("python")
pkg = candidate_spec["python"].package
module_paths = [
os.path.join(candidate_spec.prefix, python.package.purelib),
os.path.join(candidate_spec.prefix, python.package.platlib),
os.path.join(candidate_spec.prefix, pkg.purelib),
os.path.join(candidate_spec.prefix, pkg.platlib),
]
path_before = list(sys.path)

View File

@@ -173,14 +173,35 @@ def _read_metadata(self, package_name: str) -> Any:
return data
def _install_by_hash(
self, pkg_hash: str, pkg_sha256: str, bincache_platform: spack.platforms.Platform
self,
pkg_hash: str,
pkg_sha256: str,
index: List[spack.spec.Spec],
bincache_platform: spack.platforms.Platform,
) -> None:
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
# Reconstruct the compiler that we need to use for bootstrapping
compiler_entry = {
"modules": [],
"operating_system": str(index_spec.os),
"paths": {
"cc": "/dev/null",
"cxx": "/dev/null",
"f77": "/dev/null",
"fc": "/dev/null",
},
"spec": str(index_spec.compiler),
"target": str(index_spec.target.family),
}
with spack.platforms.use_platform(bincache_platform):
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query):
spack.binary_distribution.install_root_node(
match, unsigned=True, force=True, sha256=pkg_sha256
)
with spack.config.override("compilers", [{"compiler": compiler_entry}]):
spec_str = "/" + pkg_hash
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
matches = spack.store.find([spec_str], multiple=False, query_fn=query)
for match in matches:
spack.binary_distribution.install_root_node(
match, unsigned=True, force=True, sha256=pkg_sha256
)
def _install_and_test(
self,
@@ -211,7 +232,7 @@ def _install_and_test(
continue
for _, pkg_hash, pkg_sha256 in item["binaries"]:
self._install_by_hash(pkg_hash, pkg_sha256, bincache_platform)
self._install_by_hash(pkg_hash, pkg_sha256, index, bincache_platform)
info: ConfigDictionary = {}
if test_fn(query_spec=abstract_spec, query_info=info):
@@ -270,6 +291,10 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
with spack_python_interpreter():
# Add hint to use frontend operating system on Cray
concrete_spec = spack.spec.Spec(abstract_spec_str + " ^" + spec_for_current_python())
# This is needed to help the old concretizer taking the `setuptools` dependency
# only when bootstrapping from sources on Python 3.12
if spec_for_current_python() == "python@3.12":
concrete_spec.constrain("+force_setuptools")
if module == "clingo":
# TODO: remove when the old concretizer is deprecated # pylint: disable=fixme
@@ -534,41 +559,6 @@ def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
)
def ensure_winsdk_external_or_raise() -> None:
"""Ensure the Windows SDK + WGL are available on system
If both of these package are found, the Spack user or bootstrap
configuration (depending on where Spack is running)
will be updated to include all versions and variants detected.
If either the WDK or WSDK are not found, this method will raise
a RuntimeError.
**NOTE:** This modifies the Spack config in the current scope,
either user or environment depending on the calling context.
This is different from all other current bootstrap dependency
checks.
"""
if set(["win-sdk", "wgl"]).issubset(spack.config.get("packages").keys()):
return
externals = spack.detection.by_path(["win-sdk", "wgl"])
if not set(["win-sdk", "wgl"]) == externals.keys():
missing_packages_lst = []
if "wgl" not in externals:
missing_packages_lst.append("wgl")
if "win-sdk" not in externals:
missing_packages_lst.append("win-sdk")
missing_packages = " & ".join(missing_packages_lst)
raise RuntimeError(
f"Unable to find the {missing_packages}, please install these packages \
via the Visual Studio installer \
before proceeding with Spack or provide the path to a non standard install with \
'spack external find --path'"
)
# wgl/sdk are not required for bootstrapping Spack, but
# are required for building anything non trivial
# add to user config so they can be used by subsequent Spack ops
spack.detection.update_configuration(externals, buildable=False)
def ensure_core_dependencies() -> None:
"""Ensure the presence of all the core dependencies."""
if sys.platform.lower() == "linux":

View File

@@ -3,11 +3,13 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Bootstrap non-core Spack dependencies from an environment."""
import glob
import hashlib
import os
import pathlib
import sys
from typing import Iterable, List
import warnings
from typing import List
import archspec.cpu
@@ -26,16 +28,6 @@
class BootstrapEnvironment(spack.environment.Environment):
"""Environment to install dependencies of Spack for a given interpreter and architecture"""
def __init__(self) -> None:
if not self.spack_yaml().exists():
self._write_spack_yaml_file()
super().__init__(self.environment_root())
# Remove python package roots created before python-venv was introduced
for s in self.concrete_roots():
if "python" in s.package.extendees and not s.dependencies("python-venv"):
self.deconcretize(s)
@classmethod
def spack_dev_requirements(cls) -> List[str]:
"""Spack development requirements"""
@@ -67,19 +59,31 @@ def view_root(cls) -> pathlib.Path:
return cls.environment_root().joinpath("view")
@classmethod
def bin_dir(cls) -> pathlib.Path:
"""Paths to be added to PATH"""
return cls.view_root().joinpath("bin")
def pythonpaths(cls) -> List[str]:
"""Paths to be added to sys.path or PYTHONPATH"""
python_dir_part = f"python{'.'.join(str(x) for x in sys.version_info[:2])}"
glob_expr = str(cls.view_root().joinpath("**", python_dir_part, "**"))
result = glob.glob(glob_expr)
if not result:
msg = f"Cannot find any Python path in {cls.view_root()}"
warnings.warn(msg)
return result
def python_dirs(self) -> Iterable[pathlib.Path]:
python = next(s for s in self.all_specs_generator() if s.name == "python-venv").package
return {self.view_root().joinpath(p) for p in (python.platlib, python.purelib)}
@classmethod
def bin_dirs(cls) -> List[pathlib.Path]:
"""Paths to be added to PATH"""
return [cls.view_root().joinpath("bin")]
@classmethod
def spack_yaml(cls) -> pathlib.Path:
"""Environment spack.yaml file"""
return cls.environment_root().joinpath("spack.yaml")
def __init__(self) -> None:
if not self.spack_yaml().exists():
self._write_spack_yaml_file()
super().__init__(self.environment_root())
def update_installations(self) -> None:
"""Update the installations of this environment."""
log_enabled = tty.is_debug() or tty.is_verbose()
@@ -96,13 +100,21 @@ def update_installations(self) -> None:
self.install_all()
self.write(regenerate=True)
def load(self) -> None:
"""Update PATH and sys.path."""
# Make executables available (shouldn't need PYTHONPATH)
os.environ["PATH"] = f"{self.bin_dir()}{os.pathsep}{os.environ.get('PATH', '')}"
# Spack itself imports pytest
sys.path.extend(str(p) for p in self.python_dirs())
def update_syspath_and_environ(self) -> None:
"""Update ``sys.path`` and the PATH, PYTHONPATH environment variables to point to
the environment view.
"""
# Do minimal modifications to sys.path and environment variables. In particular, pay
# attention to have the smallest PYTHONPATH / sys.path possible, since that may impact
# the performance of the current interpreter
sys.path.extend(self.pythonpaths())
os.environ["PATH"] = os.pathsep.join(
[str(x) for x in self.bin_dirs()] + os.environ.get("PATH", "").split(os.pathsep)
)
os.environ["PYTHONPATH"] = os.pathsep.join(
os.environ.get("PYTHONPATH", "").split(os.pathsep)
+ [str(x) for x in self.pythonpaths()]
)
def _write_spack_yaml_file(self) -> None:
tty.msg(
@@ -152,4 +164,4 @@ def ensure_environment_dependencies() -> None:
_add_externals_if_missing()
with BootstrapEnvironment() as env:
env.update_installations()
env.load()
env.update_syspath_and_environ()

View File

@@ -43,7 +43,7 @@
from collections import defaultdict
from enum import Flag, auto
from itertools import chain
from typing import Dict, List, Set, Tuple
from typing import List, Tuple
import llnl.util.tty as tty
from llnl.string import plural
@@ -57,10 +57,8 @@
import spack.build_systems.meson
import spack.build_systems.python
import spack.builder
import spack.compilers
import spack.config
import spack.deptypes as dt
import spack.error
import spack.main
import spack.package_base
import spack.paths
@@ -68,11 +66,9 @@
import spack.repo
import spack.schema.environment
import spack.spec
import spack.stage
import spack.store
import spack.subprocess_context
import spack.user_environment
import spack.util.executable
import spack.util.path
import spack.util.pattern
from spack import traverse
@@ -82,7 +78,7 @@
from spack.installer import InstallError
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import (
SYSTEM_DIR_CASE_ENTRY,
SYSTEM_DIRS,
EnvironmentModifications,
env_flag,
filter_system_paths,
@@ -105,13 +101,9 @@
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = "SPACK_ENV_PATH"
SPACK_MANAGED_DIRS = "SPACK_MANAGED_DIRS"
SPACK_INCLUDE_DIRS = "SPACK_INCLUDE_DIRS"
SPACK_LINK_DIRS = "SPACK_LINK_DIRS"
SPACK_RPATH_DIRS = "SPACK_RPATH_DIRS"
SPACK_STORE_INCLUDE_DIRS = "SPACK_STORE_INCLUDE_DIRS"
SPACK_STORE_LINK_DIRS = "SPACK_STORE_LINK_DIRS"
SPACK_STORE_RPATH_DIRS = "SPACK_STORE_RPATH_DIRS"
SPACK_RPATH_DEPS = "SPACK_RPATH_DEPS"
SPACK_LINK_DEPS = "SPACK_LINK_DEPS"
SPACK_PREFIX = "SPACK_PREFIX"
@@ -424,7 +416,7 @@ def set_compiler_environment_variables(pkg, env):
env.set("SPACK_COMPILER_SPEC", str(spec.compiler))
env.set("SPACK_SYSTEM_DIRS", SYSTEM_DIR_CASE_ENTRY)
env.set("SPACK_SYSTEM_DIRS", ":".join(SYSTEM_DIRS))
compiler.setup_custom_environment(pkg, env)
@@ -480,12 +472,12 @@ def set_wrapper_variables(pkg, env):
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format("{name}-{hash:7}"))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get("config:ccache"):
# Enable ccache in the compiler wrapper
env.set(SPACK_CCACHE_BINARY, spack.util.executable.which_string("ccache", required=True))
else:
# Avoid cache pollution if a build system forces `ccache <compiler wrapper invocation>`.
env.set("CCACHE_DISABLE", "1")
ccache = Executable("ccache")
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Gather information about various types of dependencies
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
@@ -552,26 +544,9 @@ def update_compiler_args_for_dep(dep):
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
# Spack managed directories include the stage, store and upstream stores. We extend this with
# their real paths to make it more robust (e.g. /tmp vs /private/tmp on macOS).
spack_managed_dirs: Set[str] = {
spack.stage.get_stage_root(),
spack.store.STORE.db.root,
*(db.root for db in spack.store.STORE.db.upstream_dbs),
}
spack_managed_dirs.update([os.path.realpath(p) for p in spack_managed_dirs])
env.set(SPACK_MANAGED_DIRS, "|".join(f'"{p}/"*' for p in sorted(spack_managed_dirs)))
is_spack_managed = lambda p: any(p.startswith(store) for store in spack_managed_dirs)
link_dirs_spack, link_dirs_system = stable_partition(link_dirs, is_spack_managed)
include_dirs_spack, include_dirs_system = stable_partition(include_dirs, is_spack_managed)
rpath_dirs_spack, rpath_dirs_system = stable_partition(rpath_dirs, is_spack_managed)
env.set(SPACK_LINK_DIRS, ":".join(link_dirs_system))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs_system))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs_system))
env.set(SPACK_STORE_LINK_DIRS, ":".join(link_dirs_spack))
env.set(SPACK_STORE_INCLUDE_DIRS, ":".join(include_dirs_spack))
env.set(SPACK_STORE_RPATH_DIRS, ":".join(rpath_dirs_spack))
env.set(SPACK_LINK_DIRS, ":".join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
def set_package_py_globals(pkg, context: Context = Context.BUILD):
@@ -608,22 +583,10 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
# Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build)
link_dir = spack.paths.build_env_path
pkg_compiler = None
try:
pkg_compiler = pkg.compiler
except spack.compilers.NoCompilerForSpecError as e:
tty.debug(f"cannot set 'spack_cc': {str(e)}")
if pkg_compiler is not None:
module.spack_cc = os.path.join(link_dir, pkg_compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg_compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg_compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg_compiler.link_paths["fc"])
else:
module.spack_cc = None
module.spack_cxx = None
module.spack_f77 = None
module.spack_fc = None
module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
# Useful directories within the prefix are encapsulated in
# a Prefix object.
@@ -731,28 +694,12 @@ def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None, **kwa
return compiler(*compiler_args, output=compiler_output)
def _get_rpath_deps_from_spec(
spec: spack.spec.Spec, transitive_rpaths: bool
) -> List[spack.spec.Spec]:
if not transitive_rpaths:
return spec.dependencies(deptype=dt.LINK)
by_name: Dict[str, spack.spec.Spec] = {}
for dep in spec.traverse(root=False, deptype=dt.LINK):
lookup = by_name.get(dep.name)
if lookup is None:
by_name[dep.name] = dep
elif lookup.version < dep.version:
by_name[dep.name] = dep
return list(by_name.values())
def get_rpath_deps(pkg: spack.package_base.PackageBase) -> List[spack.spec.Spec]:
"""Return immediate or transitive dependencies (depending on the package) that need to be
rpath'ed. If a package occurs multiple times, the newest version is kept."""
return _get_rpath_deps_from_spec(pkg.spec, pkg.transitive_rpaths)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=("link"))]
else:
return pkg.spec.dependencies(deptype="link")
def get_rpaths(pkg):

View File

@@ -139,6 +139,11 @@ def initconfig_compiler_entries(self):
"endif()\n",
]
# We defined hipcc as top-level compiler for packages when +rocm.
# This avoid problems coming from rocm flags being applied to another compiler.
if "+rocm" in spec:
entries.insert(0, cmake_cache_path("CMAKE_CXX_COMPILER", self.spec["hip"].hipcc))
flags = spec.compiler_flags
# use global spack compiler flags

View File

@@ -16,7 +16,7 @@
class CargoPackage(spack.package_base.PackageBase):
"""Specialized class for packages built using cargo."""
"""Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -39,11 +39,16 @@ def _maybe_set_python_hints(pkg: spack.package_base.PackageBase, args: List[str]
"""Set the PYTHON_EXECUTABLE, Python_EXECUTABLE, and Python3_EXECUTABLE CMake variables
if the package has Python as build or link dep and ``find_python_hints`` is set to True. See
``find_python_hints`` for context."""
if not getattr(pkg, "find_python_hints", False) or not pkg.spec.dependencies(
"python", dt.BUILD | dt.LINK
):
if not getattr(pkg, "find_python_hints", False):
return
python_executable = pkg.spec["python"].command.path
pythons = pkg.spec.dependencies("python", dt.BUILD | dt.LINK)
if len(pythons) != 1:
return
try:
python_executable = pythons[0].package.command.path
except RuntimeError:
return
args.extend(
[
CMakeBuilder.define("PYTHON_EXECUTABLE", python_executable),

View File

@@ -1,144 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import itertools
import os
import pathlib
import re
import sys
from typing import Dict, List, Sequence, Tuple, Union
import llnl.util.tty as tty
from llnl.util.lang import classproperty
import spack.compiler
import spack.package_base
# Local "type" for type hints
Path = Union[str, pathlib.Path]
class CompilerPackage(spack.package_base.PackageBase):
"""A Package mixin for all common logic for packages that implement compilers"""
# TODO: how do these play nicely with other tags
tags: Sequence[str] = ["compiler"]
#: Optional suffix regexes for searching for this type of compiler.
#: Suffixes are used by some frameworks, e.g. macports uses an '-mp-X.Y'
#: version suffix for gcc.
compiler_suffixes: List[str] = [r"-.*"]
#: Optional prefix regexes for searching for this compiler
compiler_prefixes: List[str] = []
#: Compiler argument(s) that produces version information
#: If multiple arguments, the earlier arguments must produce errors when invalid
compiler_version_argument: Union[str, Tuple[str]] = "-dumpversion"
#: Regex used to extract version from compiler's output
compiler_version_regex: str = "(.*)"
#: Static definition of languages supported by this class
compiler_languages: Sequence[str] = ["c", "cxx", "fortran"]
def __init__(self, spec: "spack.spec.Spec"):
super().__init__(spec)
msg = f"Supported languages for {spec} are not a subset of possible supported languages"
msg += f" supports: {self.supported_languages}, valid values: {self.compiler_languages}"
assert set(self.supported_languages) <= set(self.compiler_languages), msg
@property
def supported_languages(self) -> Sequence[str]:
"""Dynamic definition of languages supported by this package"""
return self.compiler_languages
@classproperty
def compiler_names(cls) -> Sequence[str]:
"""Construct list of compiler names from per-language names"""
names = []
for language in cls.compiler_languages:
names.extend(getattr(cls, f"{language}_names"))
return names
@classproperty
def executables(cls) -> Sequence[str]:
"""Construct executables for external detection from names, prefixes, and suffixes."""
regexp_fmt = r"^({0}){1}({2})$"
prefixes = [""] + cls.compiler_prefixes
suffixes = [""] + cls.compiler_suffixes
if sys.platform == "win32":
ext = r"\.(?:exe|bat)"
suffixes += [suf + ext for suf in suffixes]
return [
regexp_fmt.format(prefix, re.escape(name), suffix)
for prefix, name, suffix in itertools.product(prefixes, cls.compiler_names, suffixes)
]
@classmethod
def determine_version(cls, exe: Path):
version_argument = cls.compiler_version_argument
if isinstance(version_argument, str):
version_argument = (version_argument,)
for va in version_argument:
try:
output = spack.compiler.get_compiler_version_output(exe, va)
match = re.search(cls.compiler_version_regex, output)
if match:
return ".".join(match.groups())
except spack.util.executable.ProcessError:
pass
except Exception as e:
tty.debug(
f"[{__file__}] Cannot detect a valid version for the executable "
f"{str(exe)}, for package '{cls.name}': {e}"
)
@classmethod
def compiler_bindir(cls, prefix: Path) -> Path:
"""Overridable method for the location of the compiler bindir within the preifx"""
return os.path.join(prefix, "bin")
@classmethod
def determine_compiler_paths(cls, exes: Sequence[Path]) -> Dict[str, Path]:
"""Compute the paths to compiler executables associated with this package
This is a helper method for ``determine_variants`` to compute the ``extra_attributes``
to include with each spec object."""
# There are often at least two copies (not symlinks) of each compiler executable in the
# same directory: one with a canonical name, e.g. "gfortran", and another one with the
# target prefix, e.g. "x86_64-pc-linux-gnu-gfortran". There also might be a copy of "gcc"
# with the version suffix, e.g. "x86_64-pc-linux-gnu-gcc-6.3.0". To ensure the consistency
# of values in the "paths" dictionary (i.e. we prefer all of them to reference copies
# with canonical names if possible), we iterate over the executables in the reversed sorted
# order:
# First pass over languages identifies exes that are perfect matches for canonical names
# Second pass checks for names with prefix/suffix
# Second pass is sorted by language name length because longer named languages
# e.g. cxx can often contain the names of shorter named languages
# e.g. c (e.g. clang/clang++)
paths = {}
exes = sorted(exes, reverse=True)
languages = {
lang: getattr(cls, f"{lang}_names")
for lang in sorted(cls.compiler_languages, key=len, reverse=True)
}
for exe in exes:
for lang, names in languages.items():
if os.path.basename(exe) in names:
paths[lang] = exe
break
else:
for lang, names in languages.items():
if any(name in os.path.basename(exe) for name in names):
paths[lang] = exe
break
return paths
@classmethod
def determine_variants(cls, exes: Sequence[Path], version_str: str) -> Tuple:
# path determination is separated so it can be reused in subclasses
return "", {"compilers": cls.determine_compiler_paths(exes=exes)}

View File

@@ -21,7 +21,7 @@
class MakefilePackage(spack.package_base.PackageBase):
"""Specialized class for packages built using Makefiles."""
"""Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -145,7 +145,7 @@ def install(self, pkg, spec, prefix):
opts += self.nmake_install_args()
if self.makefile_name:
opts.append("/F{}".format(self.makefile_name))
opts.append(self.define("PREFIX", fs.windows_sfn(prefix)))
opts.append(self.define("PREFIX", prefix))
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).nmake(
*opts, *self.install_targets, ignore_quotes=self.ignore_quotes

View File

@@ -14,7 +14,7 @@
from llnl.util.link_tree import LinkTree
from spack.build_environment import dso_suffix
from spack.directives import conflicts, license, redistribute, variant
from spack.directives import conflicts, license, variant
from spack.package_base import InstallError
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
@@ -30,7 +30,7 @@ class IntelOneApiPackage(Package):
# oneAPI license does not allow mirroring outside of the
# organization (e.g. University/Company).
redistribute(source=False, binary=False)
redistribute_source = False
for c in [
"target=ppc64:",

View File

@@ -138,21 +138,16 @@ def view_file_conflicts(self, view, merge_map):
return conflicts
def add_files_to_view(self, view, merge_map, skip_if_exists=True):
# Patch up shebangs if the package extends Python and we put a Python interpreter in the
# view.
if not self.extendee_spec:
return super().add_files_to_view(view, merge_map, skip_if_exists)
python, *_ = self.spec.dependencies("python-venv") or self.spec.dependencies("python")
if python.external:
# Patch up shebangs to the python linked in the view only if python is built by Spack.
if not self.extendee_spec or self.extendee_spec.external:
return super().add_files_to_view(view, merge_map, skip_if_exists)
# We only patch shebangs in the bin directory.
copied_files: Dict[Tuple[int, int], str] = {} # File identifier -> source
delayed_links: List[Tuple[str, str]] = [] # List of symlinks from merge map
bin_dir = self.spec.prefix.bin
bin_dir = self.spec.prefix.bin
python_prefix = self.extendee_spec.prefix
for src, dst in merge_map.items():
if skip_if_exists and os.path.lexists(dst):
continue
@@ -173,7 +168,7 @@ def add_files_to_view(self, view, merge_map, skip_if_exists=True):
copied_files[(s.st_dev, s.st_ino)] = dst
shutil.copy2(src, dst)
fs.filter_file(
python.prefix, os.path.abspath(view.get_projection_for_spec(self.spec)), dst
python_prefix, os.path.abspath(view.get_projection_for_spec(self.spec)), dst
)
else:
view.link(src, dst)
@@ -204,13 +199,14 @@ def remove_files_from_view(self, view, merge_map):
ignore_namespace = True
bin_dir = self.spec.prefix.bin
global_view = self.extendee_spec.prefix == view.get_projection_for_spec(self.spec)
to_remove = []
for src, dst in merge_map.items():
if ignore_namespace and namespace_init(dst):
continue
if not fs.path_contains_subdirectory(src, bin_dir):
if global_view or not fs.path_contains_subdirectory(src, bin_dir):
to_remove.append(dst)
else:
os.remove(dst)
@@ -366,12 +362,6 @@ def list_url(cls) -> Optional[str]: # type: ignore[override]
return f"https://pypi.org/simple/{name}/"
return None
@property
def python_spec(self):
"""Get python-venv if it exists or python otherwise."""
python, *_ = self.spec.dependencies("python-venv") or self.spec.dependencies("python")
return python
@property
def headers(self) -> HeaderList:
"""Discover header files in platlib."""
@@ -381,9 +371,8 @@ def headers(self) -> HeaderList:
# Headers should only be in include or platlib, but no harm in checking purelib too
include = self.prefix.join(self.spec["python"].package.include).join(name)
python = self.python_spec
platlib = self.prefix.join(python.package.platlib).join(name)
purelib = self.prefix.join(python.package.purelib).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
headers_list = map(fs.find_all_headers, [include, platlib, purelib])
headers = functools.reduce(operator.add, headers_list)
@@ -402,9 +391,8 @@ def libs(self) -> LibraryList:
name = self.spec.name[3:]
# Libraries should only be in platlib, but no harm in checking purelib too
python = self.python_spec
platlib = self.prefix.join(python.package.platlib).join(name)
purelib = self.prefix.join(python.package.purelib).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
find_all_libraries = functools.partial(fs.find_all_libraries, recursive=True)
libs_list = map(find_all_libraries, [platlib, purelib])
@@ -516,8 +504,6 @@ def global_options(self, spec: Spec, prefix: Prefix) -> Iterable[str]:
def install(self, pkg: PythonPackage, spec: Spec, prefix: Prefix) -> None:
"""Install everything from build directory."""
pip = spec["python"].command
pip.add_default_arg("-m", "pip")
args = PythonPipBuilder.std_args(pkg) + [f"--prefix={prefix}"]
@@ -533,6 +519,14 @@ def install(self, pkg: PythonPackage, spec: Spec, prefix: Prefix) -> None:
else:
args.append(".")
pip = spec["python"].command
# Hide user packages, since we don't have build isolation. This is
# necessary because pip / setuptools may run hooks from arbitrary
# packages during the build. There is no equivalent variable to hide
# system packages, so this is not reliable for external Python.
pip.add_default_env("PYTHONNOUSERSITE", "1")
pip.add_default_arg("-m")
pip.add_default_arg("pip")
with fs.working_dir(self.build_directory):
pip(*args)

View File

@@ -80,7 +80,6 @@
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.package_base import PackageBase
from spack.util.environment import EnvironmentModifications
class ROCmPackage(PackageBase):
@@ -157,23 +156,30 @@ def hip_flags(amdgpu_target):
archs = ",".join(amdgpu_target)
return "--amdgpu-target={0}".format(archs)
def asan_on(self, env: EnvironmentModifications):
llvm_path = self.spec["llvm-amdgpu"].prefix
# ASAN
@staticmethod
def asan_on(env, llvm_path):
env.set("CC", llvm_path + "/bin/clang")
env.set("CXX", llvm_path + "/bin/clang++")
env.set("ASAN_OPTIONS", "detect_leaks=0")
for root, _, files in os.walk(llvm_path):
for root, dirs, files in os.walk(llvm_path):
if "libclang_rt.asan-x86_64.so" in files:
asan_lib_path = root
env.prepend_path("LD_LIBRARY_PATH", asan_lib_path)
if "rhel" in self.spec.os or "sles" in self.spec.os:
SET_DWARF_VERSION_4 = "-gdwarf-5"
else:
SET_DWARF_VERSION_4 = ""
SET_DWARF_VERSION_4 = ""
try:
# This will throw an error if imported on a non-Linux platform.
import distro
env.set("CFLAGS", f"-fsanitize=address -shared-libasan -g {SET_DWARF_VERSION_4}")
env.set("CXXFLAGS", f"-fsanitize=address -shared-libasan -g {SET_DWARF_VERSION_4}")
distname = distro.id()
except ImportError:
distname = "unknown"
if "rhel" in distname or "sles" in distname:
SET_DWARF_VERSION_4 = "-gdwarf-5"
env.set("CFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("CXXFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("LDFLAGS", "-Wl,--enable-new-dtags -fuse-ld=lld -fsanitize=address -g -Wl,")
# HIP version vs Architecture

View File

@@ -16,8 +16,8 @@
import tempfile
import time
import zipfile
from collections import defaultdict, namedtuple
from typing import Dict, List, Optional, Set, Tuple
from collections import namedtuple
from typing import List, Optional
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
from urllib.request import HTTPHandler, Request, build_opener
@@ -44,7 +44,6 @@
from spack import traverse
from spack.error import SpackError
from spack.reporters import CDash, CDashConfiguration
from spack.reporters.cdash import SPACK_CDASH_TIMEOUT
from spack.reporters.cdash import build_stamp as cdash_build_stamp
# See https://docs.gitlab.com/ee/ci/yaml/#retry for descriptions of conditions
@@ -114,24 +113,54 @@ def _remove_reserved_tags(tags):
return [tag for tag in tags if tag not in SPACK_RESERVED_TAGS]
def _spec_ci_label(s):
def _spec_deps_key(s):
return f"{s.name}/{s.dag_hash(7)}"
PlainNodes = Dict[str, spack.spec.Spec]
PlainEdges = Dict[str, Set[str]]
def _add_dependency(spec_label, dep_label, deps):
if spec_label == dep_label:
return
if spec_label not in deps:
deps[spec_label] = set()
deps[spec_label].add(dep_label)
def stage_spec_jobs(specs: List[spack.spec.Spec]) -> Tuple[PlainNodes, PlainEdges, List[Set[str]]]:
"""Turn a DAG into a list of stages (set of nodes), the list is ordered topologically, so that
each node in a stage has dependencies only in previous stages.
def _get_spec_dependencies(specs, deps, spec_labels):
spec_deps_obj = _compute_spec_deps(specs)
if spec_deps_obj:
dependencies = spec_deps_obj["dependencies"]
specs = spec_deps_obj["specs"]
for entry in specs:
spec_labels[entry["label"]] = entry["spec"]
for entry in dependencies:
_add_dependency(entry["spec"], entry["depends"], deps)
def stage_spec_jobs(specs):
"""Take a set of release specs and generate a list of "stages", where the
jobs in any stage are dependent only on jobs in previous stages. This
allows us to maximize build parallelism within the gitlab-ci framework.
Arguments:
specs: Specs to build
specs (Iterable): Specs to build
Returns: A tuple of information objects describing the specs, dependencies
and stages:
spec_labels: A dictionary mapping the spec labels (which are formatted
as pkg-name/hash-prefix) to concrete specs.
deps: A dictionary where the keys should also have appeared as keys in
the spec_labels dictionary, and the values are the set of
dependencies for that spec.
stages: An ordered list of sets, each of which contains all the jobs to
built in that stage. The jobs are expressed in the same format as
the keys in the spec_labels and deps objects.
Returns: A tuple (nodes, edges, stages) where ``nodes`` maps labels to specs, ``edges`` maps
labels to a set of labels of dependencies, and ``stages`` is a topologically ordered list
of sets of labels.
"""
# The convenience method below, "_remove_satisfied_deps()", does not modify
@@ -148,12 +177,17 @@ def _remove_satisfied_deps(deps, satisfied_list):
return new_deps
nodes, edges = _extract_dag(specs)
deps = {}
spec_labels = {}
# Save the original edges, as we need to return them at the end of the function. In the loop
# below, the "dependencies" variable is rebound rather than mutated, so "edges" is not mutated.
dependencies = edges
unstaged = set(nodes.keys())
_get_spec_dependencies(specs, deps, spec_labels)
# Save the original deps, as we need to return them at the end of the
# function. In the while loop below, the "dependencies" variable is
# overwritten rather than being modified each time through the loop,
# thus preserving the original value of "deps" saved here.
dependencies = deps
unstaged = set(spec_labels.keys())
stages = []
while dependencies:
@@ -169,7 +203,7 @@ def _remove_satisfied_deps(deps, satisfied_list):
if unstaged:
stages.append(unstaged.copy())
return nodes, edges, stages
return spec_labels, deps, stages
def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions):
@@ -201,22 +235,87 @@ def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisi
tty.msg(msg)
def _extract_dag(specs: List[spack.spec.Spec]) -> Tuple[PlainNodes, PlainEdges]:
"""Extract a sub-DAG as plain old Python objects with external nodes removed."""
nodes: PlainNodes = {}
edges: PlainEdges = defaultdict(set)
def _compute_spec_deps(spec_list):
"""
Computes all the dependencies for the spec(s) and generates a JSON
object which provides both a list of unique spec names as well as a
comprehensive list of all the edges in the dependency graph. For
example, given a single spec like 'readline@7.0', this function
generates the following JSON object:
for edge in traverse.traverse_edges(specs, cover="edges"):
if (edge.parent and edge.parent.external) or edge.spec.external:
continue
child_id = _spec_ci_label(edge.spec)
nodes[child_id] = edge.spec
if edge.parent:
parent_id = _spec_ci_label(edge.parent)
nodes[parent_id] = edge.parent
edges[parent_id].add(child_id)
.. code-block:: JSON
return nodes, edges
{
"dependencies": [
{
"depends": "readline/ip6aiun",
"spec": "readline/ip6aiun"
},
{
"depends": "ncurses/y43rifz",
"spec": "readline/ip6aiun"
},
{
"depends": "ncurses/y43rifz",
"spec": "readline/ip6aiun"
},
{
"depends": "pkgconf/eg355zb",
"spec": "ncurses/y43rifz"
},
{
"depends": "pkgconf/eg355zb",
"spec": "readline/ip6aiun"
}
],
"specs": [
{
"spec": "readline@7.0%apple-clang@9.1.0 arch=darwin-highs...",
"label": "readline/ip6aiun"
},
{
"spec": "ncurses@6.1%apple-clang@9.1.0 arch=darwin-highsi...",
"label": "ncurses/y43rifz"
},
{
"spec": "pkgconf@1.5.4%apple-clang@9.1.0 arch=darwin-high...",
"label": "pkgconf/eg355zb"
}
]
}
"""
spec_labels = {}
specs = []
dependencies = []
def append_dep(s, d):
dependencies.append({"spec": s, "depends": d})
for spec in spec_list:
for s in spec.traverse(deptype="all"):
if s.external:
tty.msg(f"Will not stage external pkg: {s}")
continue
skey = _spec_deps_key(s)
spec_labels[skey] = s
for d in s.dependencies(deptype="all"):
dkey = _spec_deps_key(d)
if d.external:
tty.msg(f"Will not stage external dep: {d}")
continue
append_dep(skey, dkey)
for spec_label, concrete_spec in spec_labels.items():
specs.append({"label": spec_label, "spec": concrete_spec})
deps_json_obj = {"specs": specs, "dependencies": dependencies}
return deps_json_obj
def _spec_matches(spec, match_string):
@@ -228,7 +327,7 @@ def _format_job_needs(
):
needs_list = []
for dep_job in dep_jobs:
dep_spec_key = _spec_ci_label(dep_job)
dep_spec_key = _spec_deps_key(dep_job)
rebuild = rebuild_decisions[dep_spec_key].rebuild
if not prune_dag or rebuild:
@@ -684,22 +783,6 @@ def generate_gitlab_ci_yaml(
"instead.",
)
def ensure_expected_target_path(path):
"""Returns passed paths with all Windows path separators exchanged
for posix separators only if copy_only_pipeline is enabled
This is required as copy_only_pipelines are a unique scenario where
the generate job and child pipelines are run on different platforms.
To make this compatible w/ Windows, we cannot write Windows style path separators
that will be consumed on by the Posix copy job runner.
TODO (johnwparent): Refactor config + cli read/write to deal only in posix
style paths
"""
if copy_only_pipeline and path:
path = path.replace("\\", "/")
return path
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
deprecated_mirror_config = False
buildcache_destination = None
@@ -823,7 +906,7 @@ def ensure_expected_target_path(path):
if scope not in include_scopes and scope not in env_includes:
include_scopes.insert(0, scope)
env_includes.extend(include_scopes)
env_yaml_root["spack"]["include"] = [ensure_expected_target_path(i) for i in env_includes]
env_yaml_root["spack"]["include"] = env_includes
if "gitlab-ci" in env_yaml_root["spack"] and "ci" not in env_yaml_root["spack"]:
env_yaml_root["spack"]["ci"] = env_yaml_root["spack"].pop("gitlab-ci")
@@ -1111,7 +1194,7 @@ def main_script_replacements(cmd):
if cdash_handler and cdash_handler.auth_token:
try:
cdash_handler.populate_buildgroup(all_job_names)
except (SpackError, HTTPError, URLError, TimeoutError) as err:
except (SpackError, HTTPError, URLError) as err:
tty.warn(f"Problem populating buildgroup: {err}")
else:
tty.warn("Unable to populate buildgroup without CDash credentials")
@@ -1244,9 +1327,6 @@ def main_script_replacements(cmd):
"SPACK_REBUILD_EVERYTHING": str(rebuild_everything),
"SPACK_REQUIRE_SIGNING": os.environ.get("SPACK_REQUIRE_SIGNING", "False"),
}
output_vars = output_object["variables"]
for item, val in output_vars.items():
output_vars[item] = ensure_expected_target_path(val)
# TODO: Remove this block in Spack 0.23
if deprecated_mirror_config and remote_mirror_override:
@@ -1303,6 +1383,7 @@ def main_script_replacements(cmd):
sorted_output = {}
for output_key, output_value in sorted(output_object.items()):
sorted_output[output_key] = output_value
if known_broken_specs_encountered:
tty.error("This pipeline generated hashes known to be broken on develop:")
display_broken_spec_messages(broken_specs_url, known_broken_specs_encountered)
@@ -1497,12 +1578,6 @@ def copy_test_logs_to_artifacts(test_stage, job_test_dir):
copy_files_to_artifacts(os.path.join(test_stage, "*", "*.txt"), job_test_dir)
def win_quote(quote_str: str) -> str:
if IS_WINDOWS:
quote_str = f'"{quote_str}"'
return quote_str
def download_and_extract_artifacts(url, work_dir):
"""Look for gitlab artifacts.zip at the given url, and attempt to download
and extract the contents into the given work_dir
@@ -1525,7 +1600,7 @@ def download_and_extract_artifacts(url, work_dir):
request = Request(url, headers=headers)
request.get_method = lambda: "GET"
response = opener.open(request, timeout=SPACK_CDASH_TIMEOUT)
response = opener.open(request)
response_code = response.getcode()
if response_code != 200:
@@ -1967,9 +2042,9 @@ def compose_command_err_handling(args):
# but we need to handle EXEs (git, etc) ourselves
catch_exe_failure = (
"""
if ($LASTEXITCODE -ne 0){{
throw 'Command {} has failed'
}}
if ($LASTEXITCODE -ne 0){
throw "Command {} has failed"
}
"""
if IS_WINDOWS
else ""
@@ -2095,7 +2170,7 @@ def read_broken_spec(broken_spec_url):
"""
try:
_, _, fs = web_util.read_from_url(broken_spec_url)
except web_util.SpackWebError:
except (URLError, web_util.SpackWebError, HTTPError):
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
return None
@@ -2201,13 +2276,13 @@ def __init__(self, ci_cdash):
def args(self):
return [
"--cdash-upload-url",
win_quote(self.upload_url),
self.upload_url,
"--cdash-build",
win_quote(self.build_name),
self.build_name,
"--cdash-site",
win_quote(self.site),
self.site,
"--cdash-buildstamp",
win_quote(self.build_stamp),
self.build_stamp,
]
@property # type: ignore
@@ -2273,7 +2348,7 @@ def create_buildgroup(self, opener, headers, url, group_name, group_type):
request = Request(url, data=enc_data, headers=headers)
response = opener.open(request, timeout=SPACK_CDASH_TIMEOUT)
response = opener.open(request)
response_code = response.getcode()
if response_code not in [200, 201]:
@@ -2319,7 +2394,7 @@ def populate_buildgroup(self, job_names):
request = Request(url, data=enc_data, headers=headers)
request.get_method = lambda: "PUT"
response = opener.open(request, timeout=SPACK_CDASH_TIMEOUT)
response = opener.open(request)
response_code = response.getcode()
if response_code != 200:

View File

@@ -334,7 +334,8 @@ def display_specs(specs, args=None, **kwargs):
variants (bool): Show variants with specs
indent (int): indent each line this much
groups (bool): display specs grouped by arch/compiler (default True)
decorator (typing.Callable): function to call to decorate specs
decorators (dict): dictionary mappng specs to decorators
header_callback (typing.Callable): called at start of arch/compiler groups
all_headers (bool): show headers even when arch/compiler aren't defined
output (typing.IO): A file object to write to. Default is ``sys.stdout``
@@ -383,13 +384,15 @@ def get_arg(name, default=None):
vfmt = "{variants}" if variants else ""
format_string = nfmt + "{@version}" + ffmt + vfmt
transform = {"package": decorator, "fullpackage": decorator}
def fmt(s, depth=0):
"""Formatter function for all output specs"""
string = ""
if hashes:
string += gray_hash(s, hlen) + " "
string += depth * " "
string += decorator(s, s.cformat(format_string))
string += s.cformat(format_string, transform=transform)
return string
def format_list(specs):
@@ -448,7 +451,7 @@ def filter_loaded_specs(specs):
return [x for x in specs if x.dag_hash() in hashes]
def print_how_many_pkgs(specs, pkg_type="", suffix=""):
def print_how_many_pkgs(specs, pkg_type=""):
"""Given a list of specs, this will print a message about how many
specs are in that list.
@@ -459,7 +462,7 @@ def print_how_many_pkgs(specs, pkg_type="", suffix=""):
category, e.g. if pkg_type is "installed" then the message
would be "3 installed packages"
"""
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package") + suffix)
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package"))
def spack_is_git_repo():

View File

@@ -84,7 +84,7 @@ def externals(parser, args):
return
pkgs = args.name or spack.repo.PATH.all_package_names()
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs, debug_log=tty.debug)
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
_process_reports(reports)

View File

@@ -13,6 +13,7 @@
import shutil
import sys
import tempfile
import urllib.request
from typing import Dict, List, Optional, Tuple, Union
import llnl.util.tty as tty
@@ -53,7 +54,6 @@
from spack.oci.oci import (
copy_missing_layers_with_retry,
get_manifest_and_config_with_retry,
list_tags,
upload_blob_with_retry,
upload_manifest_with_retry,
)
@@ -133,11 +133,6 @@ def setup_parser(subparser: argparse.ArgumentParser):
help="when pushing to an OCI registry, tag an image containing all root specs and their "
"runtime dependencies",
)
push.add_argument(
"--private",
action="store_true",
help="for a private mirror, include non-redistributable packages",
)
arguments.add_common_arguments(push, ["specs", "jobs"])
push.set_defaults(func=push_fn)
@@ -372,25 +367,6 @@ def _make_pool() -> MaybePool:
return NoPool()
def _skip_no_redistribute_for_public(specs):
remaining_specs = list()
removed_specs = list()
for spec in specs:
if spec.package.redistribute_binary:
remaining_specs.append(spec)
else:
removed_specs.append(spec)
if removed_specs:
colified_output = tty.colify.colified(list(s.name for s in removed_specs), indent=4)
tty.debug(
"The following specs will not be added to the binary cache"
" because they cannot be redistributed:\n"
f"{colified_output}\n"
"You can use `--private` to include them."
)
return remaining_specs
def push_fn(args):
"""create a binary package and push it to a mirror"""
if args.spec_file:
@@ -441,8 +417,6 @@ def push_fn(args):
root="package" in args.things_to_install,
dependencies="dependencies" in args.things_to_install,
)
if not args.private:
specs = _skip_no_redistribute_for_public(specs)
# When pushing multiple specs, print the url once ahead of time, as well as how
# many specs are being pushed.
@@ -813,7 +787,7 @@ def _push_oci(
def extra_config(spec: Spec):
spec_dict = spec.to_dict(hash=ht.dag_hash)
spec_dict["buildcache_layout_version"] = bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION
spec_dict["buildcache_layout_version"] = 1
spec_dict["binary_cache_checksum"] = {
"hash_algorithm": "sha256",
"hash": checksums[spec.dag_hash()].compressed_digest.digest,
@@ -856,7 +830,10 @@ def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
tags = list_tags(image_ref)
request = urllib.request.Request(url=image_ref.tags_url())
response = spack.oci.opener.urlopen(request)
spack.oci.opener.ensure_status(request, response, 200)
tags = json.load(response)["tags"]
# Fetch all image config files in parallel
spec_dicts = pool.starmap(

View File

@@ -31,6 +31,7 @@
level = "long"
SPACK_COMMAND = "spack"
MAKE_COMMAND = "make"
INSTALL_FAIL_CODE = 1
FAILED_CREATE_BUILDCACHE_CODE = 100
@@ -39,12 +40,6 @@ def deindent(desc):
return desc.replace(" ", "")
def unicode_escape(path: str) -> str:
"""Returns transformed path with any unicode
characters replaced with their corresponding escapes"""
return path.encode("unicode-escape").decode("utf-8")
def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help="CI sub-commands")
@@ -556,35 +551,75 @@ def ci_rebuild(args):
# No hash match anywhere means we need to rebuild spec
# Start with spack arguments
spack_cmd = [SPACK_COMMAND, "--color=always", "--backtrace", "--verbose", "install"]
spack_cmd = [SPACK_COMMAND, "--color=always", "--backtrace", "--verbose"]
config = cfg.get("config")
if not config["verify_ssl"]:
spack_cmd.append("-k")
install_args = [f'--use-buildcache={spack_ci.win_quote("package:never,dependencies:only")}']
install_args = []
can_verify = spack_ci.can_verify_binaries()
verify_binaries = can_verify and spack_is_pr_pipeline is False
if not verify_binaries:
install_args.append("--no-check-signature")
slash_hash = spack_ci.win_quote("/" + job_spec.dag_hash())
slash_hash = "/{}".format(job_spec.dag_hash())
# Arguments when installing dependencies from cache
deps_install_args = install_args
# Arguments when installing the root from sources
deps_install_args = install_args + ["--only=dependencies"]
root_install_args = install_args + ["--keep-stage", "--only=package"]
root_install_args = install_args + [
"--keep-stage",
"--only=package",
"--use-buildcache=package:never,dependencies:only",
]
if cdash_handler:
# Add additional arguments to `spack install` for CDash reporting.
root_install_args.extend(cdash_handler.args())
root_install_args.append(slash_hash)
# ["x", "y"] -> "'x' 'y'"
args_to_string = lambda args: " ".join("'{}'".format(arg) for arg in args)
commands = [
# apparently there's a race when spack bootstraps? do it up front once
[SPACK_COMMAND, "-e", unicode_escape(env.path), "bootstrap", "now"],
spack_cmd + deps_install_args + [slash_hash],
spack_cmd + root_install_args + [slash_hash],
[SPACK_COMMAND, "-e", env.path, "bootstrap", "now"],
[
SPACK_COMMAND,
"-e",
env.path,
"env",
"depfile",
"-o",
"Makefile",
"--use-buildcache=package:never,dependencies:only",
slash_hash, # limit to spec we're building
],
[
# --output-sync requires GNU make 4.x.
# Old make errors when you pass it a flag it doesn't recognize,
# but it doesn't error or warn when you set unrecognized flags in
# this variable.
"export",
"GNUMAKEFLAGS=--output-sync=recurse",
],
[
MAKE_COMMAND,
"SPACK={}".format(args_to_string(spack_cmd)),
"SPACK_COLOR=always",
"SPACK_INSTALL_FLAGS={}".format(args_to_string(deps_install_args)),
"-j$(nproc)",
"install-deps/{}".format(
spack.environment.depfile.MakefileSpec(job_spec).safe_format(
"{name}-{version}-{hash}"
)
),
],
spack_cmd + ["install"] + root_install_args,
]
tty.debug("Installing {0} from source".format(job_spec.name))
install_exit_code = spack_ci.process_command("install", commands, repro_dir)

View File

@@ -11,6 +11,7 @@
from argparse import ArgumentParser, Namespace
from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import ArgparseRstWriter, ArgparseWriter, Command
from llnl.util.tty.colify import colify
@@ -866,6 +867,9 @@ def _commands(parser: ArgumentParser, args: Namespace) -> None:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)

View File

@@ -563,13 +563,12 @@ def add_concretizer_args(subparser):
help="reuse installed packages/buildcaches when possible",
)
subgroup.add_argument(
"--fresh-roots",
"--reuse-deps",
action=ConfigSetAction,
dest="concretizer:reuse",
const="dependencies",
default=None,
help="concretize with fresh roots and reused dependencies",
help="reuse installed dependencies only",
)
subgroup.add_argument(
"--deprecated",

View File

@@ -10,13 +10,13 @@
import sys
import tempfile
from pathlib import Path
from typing import List, Optional
from typing import Optional
import llnl.string as string
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
from llnl.util.tty.color import cescape, colorize
from llnl.util.tty.color import colorize
import spack.cmd
import spack.cmd.common
@@ -61,7 +61,14 @@
#
def env_create_setup_parser(subparser):
"""create a new environment"""
subparser.add_argument("env_name", metavar="env", help="name or directory of environment")
subparser.add_argument(
"env_name",
metavar="env",
help=(
"name of managed environment or directory of the anonymous env "
"(when using --dir/-d) to activate"
),
)
subparser.add_argument(
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
)
@@ -87,9 +94,6 @@ def env_create_setup_parser(subparser):
default=None,
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
subparser.add_argument(
"--include-concrete", action="append", help="name of old environment to copy specs from"
)
def env_create(args):
@@ -107,32 +111,19 @@ def env_create(args):
# the environment should not include a view.
with_view = None
include_concrete = None
if hasattr(args, "include_concrete"):
include_concrete = args.include_concrete
env = _env_create(
args.env_name,
init_file=args.envfile,
dir=args.dir or os.path.sep in args.env_name or args.env_name in (".", ".."),
dir=args.dir,
with_view=with_view,
keep_relative=args.keep_relative,
include_concrete=include_concrete,
)
# Generate views, only really useful for environments created from spack.lock files.
env.regenerate_views()
def _env_create(
name_or_path: str,
*,
init_file: Optional[str] = None,
dir: bool = False,
with_view: Optional[str] = None,
keep_relative: bool = False,
include_concrete: Optional[List[str]] = None,
):
def _env_create(name_or_path, *, init_file=None, dir=False, with_view=None, keep_relative=False):
"""Create a new environment, with an optional yaml description.
Arguments:
@@ -144,31 +135,22 @@ def _env_create(
keep_relative (bool): if True, develop paths are copied verbatim into
the new environment file, otherwise they may be made absolute if the
new environment is in a different location
include_concrete (list): list of the included concrete environments
"""
if not dir:
env = ev.create(
name_or_path,
init_file=init_file,
with_view=with_view,
keep_relative=keep_relative,
include_concrete=include_concrete,
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg(
colorize(
f"Created environment @c{{{cescape(name_or_path)}}} in: @c{{{cescape(env.path)}}}"
)
)
else:
env = ev.create_in_dir(
name_or_path,
init_file=init_file,
with_view=with_view,
keep_relative=keep_relative,
include_concrete=include_concrete,
)
tty.msg(colorize(f"Created independent environment in: @c{{{cescape(env.path)}}}"))
tty.msg(f"Activate with: {colorize(f'@c{{spack env activate {cescape(name_or_path)}}}')}")
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % (name_or_path))
return env
env = ev.create_in_dir(
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg("Created environment in %s" % env.path)
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % env.path)
return env
@@ -454,12 +436,6 @@ def env_remove_setup_parser(subparser):
"""remove an existing environment"""
subparser.add_argument("rm_env", metavar="env", nargs="+", help="environment(s) to remove")
arguments.add_common_arguments(subparser, ["yes_to_all"])
subparser.add_argument(
"-f",
"--force",
action="store_true",
help="remove the environment even if it is included in another environment",
)
def env_remove(args):
@@ -468,34 +444,14 @@ def env_remove(args):
This removes an environment managed by Spack. Directory environments
and manifests embedded in repositories should be removed manually.
"""
remove_envs = []
valid_envs = []
read_envs = []
bad_envs = []
for env_name in ev.all_environment_names():
for env_name in args.rm_env:
try:
env = ev.read(env_name)
valid_envs.append(env)
if env_name in args.rm_env:
remove_envs.append(env)
read_envs.append(env)
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
if env_name in args.rm_env:
bad_envs.append(env_name)
# Check if remove_env is included from another env before trying to remove
for env in valid_envs:
for remove_env in remove_envs:
# don't check if environment is included to itself
if env.name == remove_env.name:
continue
if remove_env.path in env.included_concrete_envs:
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
if args.force:
tty.warn(msg)
else:
tty.die(msg)
bad_envs.append(env_name)
if not args.yes_to_all:
environments = string.plural(len(args.rm_env), "environment", show_n=False)
@@ -504,7 +460,7 @@ def env_remove(args):
if not answer:
tty.die("Will not remove any environments")
for env in remove_envs:
for env in read_envs:
name = env.name
if env.active:
tty.die(f"Environment {name} can't be removed while activated.")

View File

@@ -14,7 +14,6 @@
import spack.cmd as cmd
import spack.environment as ev
import spack.repo
import spack.store
from spack.cmd.common import arguments
from spack.database import InstallStatuses
@@ -70,12 +69,6 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["long", "very_long", "tags", "namespaces"])
subparser.add_argument(
"-r",
"--only-roots",
action="store_true",
help="don't show full list of installed specs in an environment",
)
subparser.add_argument(
"-c",
"--show-concretized",
@@ -196,22 +189,26 @@ def query_arguments(args):
return q_args
def make_env_decorator(env):
def setup_env(env):
"""Create a function for decorating specs when in an environment."""
roots = set(env.roots())
removed = set(env.removed_specs())
def strip_build(seq):
return set(s.copy(deps=("link", "run")) for s in seq)
added = set(strip_build(env.added_specs()))
roots = set(strip_build(env.roots()))
removed = set(strip_build(env.removed_specs()))
def decorator(spec, fmt):
# add +/-/* to show added/removed/root specs
if any(spec.dag_hash() == r.dag_hash() for r in roots):
return color.colorize(f"@*{{{fmt}}}")
return color.colorize("@*{%s}" % fmt)
elif spec in removed:
return color.colorize(f"@K{{{fmt}}}")
return color.colorize("@K{%s}" % fmt)
else:
return fmt
return "%s" % fmt
return decorator
return decorator, added, roots, removed
def display_env(env, args, decorator, results):
@@ -226,54 +223,10 @@ def display_env(env, args, decorator, results):
"""
tty.msg("In environment %s" % env.name)
num_roots = len(env.user_specs) or "No"
tty.msg(f"{num_roots} root specs")
concrete_specs = {
root: concrete_root
for root, concrete_root in zip(env.concretized_user_specs, env.concrete_roots())
}
def root_decorator(spec, string):
"""Decorate root specs with their install status if needed"""
concrete = concrete_specs.get(spec)
if concrete:
status = color.colorize(concrete.install_status().value)
hash = concrete.dag_hash()
else:
status = color.colorize(spack.spec.InstallStatus.absent.value)
hash = "-" * 32
# TODO: status has two extra spaces on the end of it, but fixing this and other spec
# TODO: space format idiosyncrasies is complicated. Fix this eventually
status = status[:-2]
if args.long or args.very_long:
hash = color.colorize(f"@K{{{hash[: 7 if args.long else None]}}}")
return f"{status} {hash} {string}"
else:
return f"{status} {string}"
with spack.store.STORE.db.read_transaction():
cmd.display_specs(
env.user_specs,
args,
# these are overrides of CLI args
paths=False,
long=False,
very_long=False,
# these enforce details in the root specs to show what the user asked for
namespaces=True,
show_flags=True,
show_full_compiler=True,
decorator=root_decorator,
variants=True,
)
print()
if env.included_concrete_envs:
tty.msg("Included specs")
if not env.user_specs:
tty.msg("No root specs")
else:
tty.msg("Root specs")
# Root specs cannot be displayed with prefixes, since those are not
# set for abstract specs. Same for hashes
@@ -283,10 +236,10 @@ def root_decorator(spec, string):
# Roots are displayed with variants, etc. so that we can see
# specifically what the user asked for.
cmd.display_specs(
env.included_user_specs,
env.user_specs,
root_args,
decorator=lambda s, f: color.colorize("@*{%s}" % f),
namespace=True,
namespaces=True,
show_flags=True,
show_full_compiler=True,
variants=True,
@@ -301,7 +254,7 @@ def root_decorator(spec, string):
# Display a header for the installed packages section IF there are installed
# packages. If there aren't any, we'll just end up printing "0 installed packages"
# later.
if results and not args.only_roots:
if results:
tty.msg("Installed packages")
@@ -310,10 +263,9 @@ def find(parser, args):
results = args.specs(**q_args)
env = ev.active_environment()
if not env and args.only_roots:
tty.die("-r / --only-roots requires an active environment")
decorator = make_env_decorator(env) if env else lambda s, f: f
decorator = lambda s, f: f
if env:
decorator, _, roots, _ = setup_env(env)
# use groups by default except with format.
if args.groups is None:
@@ -340,12 +292,9 @@ def find(parser, args):
if env:
display_env(env, args, decorator, results)
count_suffix = " (not shown)"
if not args.only_roots:
cmd.display_specs(results, args, decorator=decorator, all_headers=True)
count_suffix = ""
cmd.display_specs(results, args, decorator=decorator, all_headers=True)
# print number of installed packages last (as the list may be long)
if sys.stdout.isatty() and args.groups:
pkg_type = "loaded" if args.loaded else "installed"
spack.cmd.print_how_many_pkgs(results, pkg_type, suffix=count_suffix)
spack.cmd.print_how_many_pkgs(results, pkg_type)

View File

@@ -263,8 +263,8 @@ def _fmt_name_and_default(variant):
return color.colorize(f"@c{{{variant.name}}} @C{{[{_fmt_value(variant.default)}]}}")
def _fmt_when(when: "spack.spec.Spec", indent: int):
return color.colorize(f"{indent * ' '}@B{{when}} {color.cescape(str(when))}")
def _fmt_when(when, indent):
return color.colorize(f"{indent * ' '}@B{{when}} {color.cescape(when)}")
def _fmt_variant_description(variant, width, indent):
@@ -441,7 +441,7 @@ def get_url(version):
return "No URL"
url = get_url(preferred) if pkg.has_code else ""
line = version(" {0}".format(pad(preferred))) + color.cescape(str(url))
line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cwrite(line)
print()
@@ -464,7 +464,7 @@ def get_url(version):
continue
for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(str(url))
line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line)
@@ -475,7 +475,10 @@ def print_virtuals(pkg, args):
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
for when, specs in reversed(sorted(pkg.provided.items())):
line = " %s provides %s" % (when.cformat(), ", ".join(s.cformat() for s in specs))
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:
@@ -494,9 +497,7 @@ def print_licenses(pkg, args):
pad = padder(pkg.licenses, 4)
for when_spec in pkg.licenses:
license_identifier = pkg.licenses[when_spec]
line = license(" {0}".format(pad(license_identifier))) + color.cescape(
str(when_spec)
)
line = license(" {0}".format(pad(license_identifier))) + color.cescape(when_spec)
color.cprint(line)

View File

@@ -71,11 +71,6 @@ def setup_parser(subparser):
help="the number of versions to fetch for each spec, choose 'all' to"
" retrieve all versions of each package",
)
create_parser.add_argument(
"--private",
action="store_true",
help="for a private mirror, include non-redistributable packages",
)
arguments.add_common_arguments(create_parser, ["specs"])
arguments.add_concretizer_args(create_parser)
@@ -113,11 +108,6 @@ def setup_parser(subparser):
"and source use `--type binary --type source` (default)"
),
)
add_parser.add_argument(
"--autopush",
action="store_true",
help=("set mirror to push automatically after installation"),
)
add_parser_signed = add_parser.add_mutually_exclusive_group(required=False)
add_parser_signed.add_argument(
"--unsigned",
@@ -185,21 +175,6 @@ def setup_parser(subparser):
),
)
set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'")
set_parser_autopush = set_parser.add_mutually_exclusive_group(required=False)
set_parser_autopush.add_argument(
"--autopush",
help="set mirror to push automatically after installation",
action="store_true",
default=None,
dest="autopush",
)
set_parser_autopush.add_argument(
"--no-autopush",
help="set mirror to not push automatically after installation",
action="store_false",
default=None,
dest="autopush",
)
set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False)
set_parser_unsigned.add_argument(
"--unsigned",
@@ -243,7 +218,6 @@ def mirror_add(args):
or args.type
or args.oci_username
or args.oci_password
or args.autopush
or args.signed is not None
):
connection = {"url": args.url}
@@ -260,8 +234,6 @@ def mirror_add(args):
if args.type:
connection["binary"] = "binary" in args.type
connection["source"] = "source" in args.type
if args.autopush:
connection["autopush"] = args.autopush
if args.signed is not None:
connection["signed"] = args.signed
mirror = spack.mirror.Mirror(connection, name=args.name)
@@ -298,8 +270,6 @@ def _configure_mirror(args):
changes["access_pair"] = [args.oci_username, args.oci_password]
if getattr(args, "signed", None) is not None:
changes["signed"] = args.signed
if getattr(args, "autopush", None) is not None:
changes["autopush"] = args.autopush
# argparse cannot distinguish between --binary and --no-binary when same dest :(
# notice that set-url does not have these args, so getattr
@@ -364,6 +334,7 @@ def concrete_specs_from_user(args):
specs = filter_externals(specs)
specs = list(set(specs))
specs.sort(key=lambda s: (s.name, s.version))
specs, _ = lang.stable_partition(specs, predicate_fn=not_excluded_fn(args))
return specs
@@ -408,50 +379,36 @@ def concrete_specs_from_cli_or_file(args):
return specs
class IncludeFilter:
def __init__(self, args):
self.exclude_specs = []
if args.exclude_file:
self.exclude_specs.extend(specs_from_text_file(args.exclude_file, concretize=False))
if args.exclude_specs:
self.exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split()))
self.private = args.private
def not_excluded_fn(args):
"""Return a predicate that evaluate to True if a spec was not explicitly
excluded by the user.
"""
exclude_specs = []
if args.exclude_file:
exclude_specs.extend(specs_from_text_file(args.exclude_file, concretize=False))
if args.exclude_specs:
exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split()))
def __call__(self, x):
return all([self._not_license_excluded(x), self._not_cmdline_excluded(x)])
def not_excluded(x):
return not any(x.satisfies(y) for y in exclude_specs)
def _not_license_excluded(self, x):
"""True if the spec is for a private mirror, or as long as the
package does not explicitly forbid redistributing source."""
if self.private:
return True
elif x.package_class.redistribute_source(x):
return True
else:
tty.debug(
"Skip adding {0} to mirror: the package.py file"
" indicates that a public mirror should not contain"
" it.".format(x.name)
)
return False
def _not_cmdline_excluded(self, x):
"""True if a spec was not explicitly excluded by the user."""
return not any(x.satisfies(y) for y in self.exclude_specs)
return not_excluded
def concrete_specs_from_environment():
def concrete_specs_from_environment(selection_fn):
env = ev.active_environment()
assert env, "an active environment is required"
mirror_specs = env.all_specs()
mirror_specs = filter_externals(mirror_specs)
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=selection_fn)
return mirror_specs
def all_specs_with_all_versions():
def all_specs_with_all_versions(selection_fn):
specs = [spack.spec.Spec(n) for n in spack.repo.all_package_names()]
mirror_specs = spack.mirror.get_all_versions(specs)
mirror_specs.sort(key=lambda s: (s.name, s.version))
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=selection_fn)
return mirror_specs
@@ -472,6 +429,12 @@ def versions_per_spec(args):
return num_versions
def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions):
present, mirrored, error = spack.mirror.create(path, mirror_specs, skip_unstable_versions)
tty.msg("Summary for mirror in {}".format(path))
process_mirror_stats(present, mirrored, error)
def process_mirror_stats(present, mirrored, error):
p, m, e = len(present), len(mirrored), len(error)
tty.msg(
@@ -517,28 +480,30 @@ def mirror_create(args):
# When no directory is provided, the source dir is used
path = args.directory or spack.caches.fetch_cache_location()
mirror_specs, mirror_fn = _specs_and_action(args)
mirror_fn(mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions)
def _specs_and_action(args):
include_fn = IncludeFilter(args)
if args.all and not ev.active_environment():
mirror_specs = all_specs_with_all_versions()
mirror_fn = create_mirror_for_all_specs
elif args.all and ev.active_environment():
mirror_specs = concrete_specs_from_environment()
mirror_fn = create_mirror_for_individual_specs
else:
mirror_specs = concrete_specs_from_user(args)
mirror_fn = create_mirror_for_individual_specs
create_mirror_for_all_specs(
path=path,
skip_unstable_versions=args.skip_unstable_versions,
selection_fn=not_excluded_fn(args),
)
return
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=include_fn)
return mirror_specs, mirror_fn
if args.all and ev.active_environment():
create_mirror_for_all_specs_inside_environment(
path=path,
skip_unstable_versions=args.skip_unstable_versions,
selection_fn=not_excluded_fn(args),
)
return
mirror_specs = concrete_specs_from_user(args)
create_mirror_for_individual_specs(
mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions
)
def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions):
def create_mirror_for_all_specs(path, skip_unstable_versions, selection_fn):
mirror_specs = all_specs_with_all_versions(selection_fn=selection_fn)
mirror_cache, mirror_stats = spack.mirror.mirror_cache_and_stats(
path, skip_unstable_versions=skip_unstable_versions
)
@@ -550,10 +515,11 @@ def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions):
process_mirror_stats(*mirror_stats.stats())
def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions):
present, mirrored, error = spack.mirror.create(path, mirror_specs, skip_unstable_versions)
tty.msg("Summary for mirror in {}".format(path))
process_mirror_stats(present, mirrored, error)
def create_mirror_for_all_specs_inside_environment(path, skip_unstable_versions, selection_fn):
mirror_specs = concrete_specs_from_environment(selection_fn=selection_fn)
create_mirror_for_individual_specs(
mirror_specs, path=path, skip_unstable_versions=skip_unstable_versions
)
def mirror_destroy(args):

View File

@@ -23,7 +23,7 @@
# tutorial configuration parameters
tutorial_branch = "releases/v0.22"
tutorial_branch = "releases/v0.21"
tutorial_mirror = "file:///mirror"
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")

View File

@@ -214,6 +214,8 @@ def unit_test(parser, args, unknown_args):
# Ensure clingo is available before switching to the
# mock configuration used by unit tests
# Note: skip on windows here because for the moment,
# clingo is wholly unsupported from bootstrap
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_core_dependencies()
if pytest is None:

View File

@@ -38,10 +38,10 @@
import spack.cmd
import spack.environment as ev
import spack.filesystem_view as fsv
import spack.schema.projections
import spack.store
from spack.config import validate
from spack.filesystem_view import YamlFilesystemView, view_func_parser
from spack.util import spack_yaml as s_yaml
description = "project packages to a compact naming scheme on the filesystem"
@@ -193,13 +193,17 @@ def view(parser, args):
ordered_projections = {}
# What method are we using for this view
link_type = args.action if args.action in actions_link else "symlink"
view = fsv.YamlFilesystemView(
if args.action in actions_link:
link_fn = view_func_parser(args.action)
else:
link_fn = view_func_parser("symlink")
view = YamlFilesystemView(
path,
spack.store.STORE.layout,
projections=ordered_projections,
ignore_conflicts=getattr(args, "ignore_conflicts", False),
link_type=link_type,
link=link_fn,
verbose=args.verbose,
)

View File

@@ -20,10 +20,8 @@
import spack.compilers
import spack.error
import spack.schema.environment
import spack.spec
import spack.util.executable
import spack.util.libc
import spack.util.module_cmd
import spack.version
from spack.util.environment import filter_system_paths
@@ -109,6 +107,7 @@ def _parse_link_paths(string):
"""
lib_search_paths = False
raw_link_dirs = []
tty.debug("parsing implicit link info")
for line in string.splitlines():
if lib_search_paths:
if line.startswith("\t"):
@@ -123,7 +122,7 @@ def _parse_link_paths(string):
continue
if _LINKER_LINE_IGNORE.match(line):
continue
tty.debug(f"implicit link dirs: link line: {line}")
tty.debug("linker line: %s" % line)
next_arg = False
for arg in line.split():
@@ -139,12 +138,15 @@ def _parse_link_paths(string):
link_dir_arg = _LINK_DIR_ARG.match(arg)
if link_dir_arg:
link_dir = link_dir_arg.group("dir")
tty.debug("linkdir: %s" % link_dir)
raw_link_dirs.append(link_dir)
link_dir_arg = _LIBPATH_ARG.match(arg)
if link_dir_arg:
link_dir = link_dir_arg.group("dir")
tty.debug("libpath: %s", link_dir)
raw_link_dirs.append(link_dir)
tty.debug("found raw link dirs: %s" % ", ".join(raw_link_dirs))
implicit_link_dirs = list()
visited = set()
@@ -154,7 +156,7 @@ def _parse_link_paths(string):
implicit_link_dirs.append(normalized_path)
visited.add(normalized_path)
tty.debug(f"implicit link dirs: result: {', '.join(implicit_link_dirs)}")
tty.debug("found link dirs: %s" % ", ".join(implicit_link_dirs))
return implicit_link_dirs
@@ -415,35 +417,17 @@ def real_version(self):
self._real_version = self.version
return self._real_version
def implicit_rpaths(self) -> List[str]:
def implicit_rpaths(self):
if self.enable_implicit_rpaths is False:
return []
output = self.compiler_verbose_output
if not output:
return []
link_dirs = _parse_non_system_link_dirs(output)
# Put CXX first since it has the most linking issues
# And because it has flags that affect linking
link_dirs = self._get_compiler_link_paths()
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
return list(paths_containing_libs(link_dirs, all_required_libs))
@property
def default_libc(self) -> Optional["spack.spec.Spec"]:
"""Determine libc targeted by the compiler from link line"""
output = self.compiler_verbose_output
if not output:
return None
dynamic_linker = spack.util.libc.parse_dynamic_linker(output)
if not dynamic_linker:
return None
return spack.util.libc.libc_from_dynamic_linker(dynamic_linker)
@property
def required_libs(self):
"""For executables created with this compiler, the compiler libraries
@@ -452,17 +436,17 @@ def required_libs(self):
# By default every compiler returns the empty list
return []
@property
def compiler_verbose_output(self) -> Optional[str]:
"""Verbose output from compiling a dummy C source file. Output is cached."""
if not hasattr(self, "_compile_c_source_output"):
self._compile_c_source_output = self._compile_dummy_c_source()
return self._compile_c_source_output
def _compile_dummy_c_source(self) -> Optional[str]:
def _get_compiler_link_paths(self):
cc = self.cc if self.cc else self.cxx
if not cc or not self.verbose_flag:
return None
# Cannot determine implicit link paths without a compiler / verbose flag
return []
# What flag types apply to first_compiler, in what order
if cc == self.cc:
flags = ["cflags", "cppflags", "ldflags"]
else:
flags = ["cxxflags", "cppflags", "ldflags"]
try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
@@ -474,19 +458,20 @@ def _compile_dummy_c_source(self) -> Optional[str]:
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n"
)
cc_exe = spack.util.executable.Executable(cc)
for flag_type in ["cflags" if cc == self.cc else "cxxflags", "cppflags", "ldflags"]:
for flag_type in flags:
cc_exe.add_default_arg(*self.flags.get(flag_type, []))
with self.compiler_environment():
return cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
output = cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe:
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)
return None
return []
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@property
def verbose_flag(self) -> Optional[str]:
def verbose_flag(self):
"""
This property should be overridden in the compiler subclass if a
verbose flag is available.
@@ -684,8 +669,8 @@ def __str__(self):
@contextlib.contextmanager
def compiler_environment(self):
# Avoid modifying os.environ if possible.
if not self.modules and not self.environment:
# yield immediately if no modules
if not self.modules:
yield
return
@@ -702,9 +687,13 @@ def compiler_environment(self):
spack.util.module_cmd.load_module(module)
# apply other compiler environment changes
spack.schema.environment.parse(self.environment).apply_modifications()
env = spack.util.environment.EnvironmentModifications()
env.extend(spack.schema.environment.parse(self.environment))
env.apply_modifications()
yield
except BaseException:
raise
finally:
# Restore environment regardless of whether inner code succeeded
os.environ.clear()

View File

@@ -10,7 +10,6 @@
import itertools
import multiprocessing.pool
import os
import warnings
from typing import Dict, List, Optional, Tuple
import archspec.cpu
@@ -110,33 +109,27 @@ def _to_dict(compiler):
return {"compiler": d}
def get_compiler_config(
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = False,
) -> List[Dict]:
def get_compiler_config(scope=None, init_config=False):
"""Return the compiler configuration for the specified architecture."""
config = configuration.get("compilers", scope=scope) or []
config = spack.config.CONFIG.get("compilers", scope=scope) or []
if config or not init_config:
return config
merged_config = configuration.get("compilers")
merged_config = spack.config.CONFIG.get("compilers")
if merged_config:
# Config is empty for this scope
# Do not init config because there is a non-empty scope
return config
_init_compiler_config(configuration, scope=scope)
config = configuration.get("compilers", scope=scope)
_init_compiler_config(scope=scope)
config = spack.config.CONFIG.get("compilers", scope=scope)
return config
def get_compiler_config_from_packages(
configuration: "spack.config.Configuration", *, scope: Optional[str] = None
) -> List[Dict]:
def get_compiler_config_from_packages(scope=None):
"""Return the compiler configuration from packages.yaml"""
config = configuration.get("packages", scope=scope)
config = spack.config.get("packages", scope=scope)
if not config:
return []
@@ -164,66 +157,43 @@ def _compiler_config_from_package_config(config):
def _compiler_config_from_external(config):
extra_attributes_key = "extra_attributes"
compilers_key = "compilers"
c_key, cxx_key, fortran_key = "c", "cxx", "fortran"
# Allow `@x.y.z` instead of `@=x.y.z`
spec = spack.spec.parse_with_version_concrete(config["spec"])
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
compiler_spec = spack.spec.CompilerSpec(
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
)
err_header = f"The external spec '{spec}' cannot be used as a compiler"
extra_attributes = config.get("extra_attributes", {})
prefix = config.get("prefix", None)
# If extra_attributes is not there I might not want to use this entry as a compiler,
# therefore just leave a debug message, but don't be loud with a warning.
if extra_attributes_key not in config:
tty.debug(f"[{__file__}] {err_header}: missing the '{extra_attributes_key}' key")
compiler_class = class_for_compiler_name(compiler_spec.name)
paths = extra_attributes.get("paths", {})
compiler_langs = ["cc", "cxx", "fc", "f77"]
for lang in compiler_langs:
if paths.setdefault(lang, None):
continue
if not prefix:
continue
# Check for files that satisfy the naming scheme for this compiler
bindir = os.path.join(prefix, "bin")
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
if regex.match(f):
paths[lang] = os.path.join(bindir, f)
if all(v is None for v in paths.values()):
return None
extra_attributes = config[extra_attributes_key]
# If I have 'extra_attributes' warn if 'compilers' is missing, or we don't have a C compiler
if compilers_key not in extra_attributes:
warnings.warn(
f"{err_header}: missing the '{compilers_key}' key under '{extra_attributes_key}'"
)
return None
attribute_compilers = extra_attributes[compilers_key]
if c_key not in attribute_compilers:
warnings.warn(
f"{err_header}: missing the C compiler path under "
f"'{extra_attributes_key}:{compilers_key}'"
)
return None
c_compiler = attribute_compilers[c_key]
# C++ and Fortran compilers are not mandatory, so let's just leave a debug trace
if cxx_key not in attribute_compilers:
tty.debug(f"[{__file__}] The external spec {spec} does not have a C++ compiler")
if fortran_key not in attribute_compilers:
tty.debug(f"[{__file__}] The external spec {spec} does not have a Fortran compiler")
# compilers format has cc/fc/f77, externals format has "c/fortran"
paths = {
"cc": c_compiler,
"cxx": attribute_compilers.get(cxx_key, None),
"fc": attribute_compilers.get(fortran_key, None),
"f77": attribute_compilers.get(fortran_key, None),
}
if not spec.architecture:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
target = host_platform.target("default_target").microarchitecture
else:
target = spec.architecture.target
target = spec.target
if not target:
target = spack.platforms.host().target("default_target")
target = target.microarchitecture
host_platform = spack.platforms.host()
target = host_platform.target("default_target").microarchitecture
operating_system = spec.os
if not operating_system:
@@ -246,15 +216,13 @@ def _compiler_config_from_external(config):
return compiler_entry
def _init_compiler_config(
configuration: "spack.config.Configuration", *, scope: Optional[str]
) -> None:
def _init_compiler_config(*, scope):
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
compilers_dict.append(_to_dict(compiler))
configuration.set("compilers", compilers_dict, scope=scope)
spack.config.set("compilers", compilers_dict, scope=scope)
def compiler_config_files():
@@ -265,7 +233,7 @@ def compiler_config_files():
compiler_config = config.get("compilers", scope=name)
if compiler_config:
config_files.append(config.get_config_filename(name, "compilers"))
compiler_config_from_packages = get_compiler_config_from_packages(config, scope=name)
compiler_config_from_packages = get_compiler_config_from_packages(scope=name)
if compiler_config_from_packages:
config_files.append(config.get_config_filename(name, "packages"))
return config_files
@@ -278,9 +246,7 @@ def add_compilers_to_config(compilers, scope=None):
compilers: a list of Compiler objects.
scope: configuration scope to modify.
"""
compiler_config = get_compiler_config(
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
compiler_config = get_compiler_config(scope, init_config=False)
for compiler in compilers:
if not compiler.cc:
tty.debug(f"{compiler.spec} does not have a C compiler")
@@ -329,9 +295,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
True if one or more compiler entries were actually removed, False otherwise
"""
assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config(
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
compiler_config = get_compiler_config(scope, init_config=False)
filtered_compiler_config = [
compiler_entry
for compiler_entry in compiler_config
@@ -346,28 +310,21 @@ def _remove_compiler_from_scope(compiler_spec, scope):
# We need to preserve the YAML type for comments, hence we are copying the
# items in the list that has just been retrieved
compiler_config[:] = filtered_compiler_config
spack.config.CONFIG.set("compilers", compiler_config, scope=scope)
spack.config.set("compilers", compiler_config, scope=scope)
return True
def all_compilers_config(
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = True,
) -> List["spack.compiler.Compiler"]:
def all_compilers_config(scope=None, init_config=True):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
from_packages_yaml = get_compiler_config_from_packages(configuration, scope=scope)
from_packages_yaml = get_compiler_config_from_packages(scope)
if from_packages_yaml:
init_config = False
from_compilers_yaml = get_compiler_config(configuration, scope=scope, init_config=init_config)
from_compilers_yaml = get_compiler_config(scope, init_config)
result = from_compilers_yaml + from_packages_yaml
# Dedupe entries by the compiler they represent
# If the entry is invalid, treat it as unique for deduplication
key = lambda c: _compiler_from_config_entry(c["compiler"] or id(c))
key = lambda c: _compiler_from_config_entry(c["compiler"])
return list(llnl.util.lang.dedupe(result, key=key))
@@ -375,7 +332,7 @@ def all_compiler_specs(scope=None, init_config=True):
# Return compiler specs from the merged config.
return [
spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True)
for s in all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config)
for s in all_compilers_config(scope, init_config)
]
@@ -535,20 +492,11 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
def all_compilers(scope=None, init_config=True):
return all_compilers_from(
configuration=spack.config.CONFIG, scope=scope, init_config=init_config
)
def all_compilers_from(configuration, scope=None, init_config=True):
compilers = []
for items in all_compilers_config(
configuration=configuration, scope=scope, init_config=init_config
):
config = all_compilers_config(scope, init_config=init_config)
compilers = list()
for items in config:
items = items["compiler"]
compiler = _compiler_from_config_entry(items) # can be None in error case
if compiler:
compilers.append(compiler)
compilers.append(_compiler_from_config_entry(items))
return compilers
@@ -559,7 +507,7 @@ def compilers_for_spec(
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
config = all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config)
config = all_compilers_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config))
compilers = []
@@ -569,7 +517,7 @@ def compilers_for_spec(
def compilers_for_arch(arch_spec, scope=None):
config = all_compilers_config(spack.config.CONFIG, scope=scope)
config = all_compilers_config(scope)
return list(get_compilers(config, arch_spec=arch_spec))
@@ -655,10 +603,7 @@ def _compiler_from_config_entry(items):
compiler = _compiler_cache.get(config_id, None)
if compiler is None:
try:
compiler = compiler_from_dict(items)
except UnknownCompilerError as e:
warnings.warn(e.message)
compiler = compiler_from_dict(items)
_compiler_cache[config_id] = compiler
return compiler
@@ -711,9 +656,7 @@ def get_compilers(config, cspec=None, arch_spec=None):
raise ValueError(msg)
continue
compiler = _compiler_from_config_entry(items)
if compiler:
compilers.append(compiler)
compilers.append(_compiler_from_config_entry(items))
return compilers
@@ -990,11 +933,10 @@ def _default_make_compilers(cmp_id, paths):
make_mixed_toolchain(flat_compilers)
# Finally, create the compiler list
compilers: List["spack.compiler.Compiler"] = []
compilers = []
for compiler_id, _, compiler in flat_compilers:
make_compilers = getattr(compiler_id.os, "make_compilers", _default_make_compilers)
candidates = make_compilers(compiler_id, compiler)
compilers.extend(x for x in candidates if x.cc is not None)
compilers.extend(make_compilers(compiler_id, compiler))
return compilers

View File

@@ -38,10 +38,10 @@ class Clang(Compiler):
cxx_names = ["clang++"]
# Subclasses use possible names of Fortran 77 compiler
f77_names = ["flang-new", "flang"]
f77_names = ["flang"]
# Subclasses use possible names of Fortran 90 compiler
fc_names = ["flang-new", "flang"]
fc_names = ["flang"]
version_argument = "--version"
@@ -171,11 +171,10 @@ def extract_version_from_output(cls, output):
match = re.search(
# Normal clang compiler versions are left as-is
r"(?:clang|flang-new) version ([^ )\n]+)-svn[~.\w\d-]*|"
r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details)
r"(?:clang|flang-new) version ([^ )\n]+?)-[~.\w\d-]*|"
r"(?:clang|flang-new) version ([^ )\n]+)",
r"clang version ([^ )\n]+?)-[~.\w\d-]*|" r"clang version ([^ )\n]+)",
output,
)
if match:

View File

@@ -8,7 +8,7 @@
import subprocess
import sys
import tempfile
from typing import Dict, List
from typing import Dict, List, Set
import archspec.cpu
@@ -20,7 +20,15 @@
from spack.error import SpackError
from spack.version import Version, VersionRange
FC_PATH: Dict[str, str] = dict()
avail_fc_version: Set[str] = set()
fc_path: Dict[str, str] = dict()
fortran_mapping = {
"2021.3.0": "19.29.30133",
"2021.2.1": "19.28.29913",
"2021.2.0": "19.28.29334",
"2021.1.0": "19.28.29333",
}
class CmdCall:
@@ -107,13 +115,15 @@ def command_str(self):
return f"{script} {self.arch} {self.sdk_ver} {self.vcvars_ver}"
def get_valid_fortran_pth():
"""Assign maximum available fortran compiler version"""
# TODO (johnwparent): validate compatibility w/ try compiler
# functionality when added
def get_valid_fortran_pth(comp_ver):
cl_ver = str(comp_ver)
sort_fn = lambda fc_ver: Version(fc_ver)
sort_fc_ver = sorted(list(FC_PATH.keys()), key=sort_fn)
return FC_PATH[sort_fc_ver[-1]] if sort_fc_ver else None
sort_fc_ver = sorted(list(avail_fc_version), key=sort_fn)
for ver in sort_fc_ver:
if ver in fortran_mapping:
if Version(cl_ver) <= Version(fortran_mapping[ver]):
return fc_path[ver]
return None
class Msvc(Compiler):
@@ -157,9 +167,11 @@ def __init__(self, *args, **kwargs):
# This positional argument "paths" is later parsed and process by the base class
# via the call to `super` later in this method
paths = args[3]
latest_fc = get_valid_fortran_pth()
new_pth = [pth if pth else latest_fc for pth in paths[2:]]
paths[2:] = new_pth
# This positional argument "cspec" is also parsed and handled by the base class
# constructor
cspec = args[0]
new_pth = [pth if pth else get_valid_fortran_pth(cspec.version) for pth in paths]
paths[:] = new_pth
# Initialize, deferring to base class but then adding the vcvarsallfile
# file based on compiler executable path.
super().__init__(*args, **kwargs)
@@ -171,7 +183,7 @@ def __init__(self, *args, **kwargs):
# and stores their path, but their respective VCVARS
# file must be invoked before useage.
env_cmds = []
compiler_root = os.path.join(os.path.dirname(self.cc), "../../../../../..")
compiler_root = os.path.join(self.cc, "../../../../../../..")
vcvars_script_path = os.path.join(compiler_root, "Auxiliary", "Build", "vcvars64.bat")
# get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower()
@@ -186,34 +198,11 @@ def __init__(self, *args, **kwargs):
# paths[2] refers to the fc path and is a generic check
# for a fortran compiler
if paths[2]:
def get_oneapi_root(pth: str):
"""From within a prefix known to be a oneAPI path
determine the oneAPI root path from arbitrary point
under root
Args:
pth: path prefixed within oneAPI root
"""
if not pth:
return ""
while os.path.basename(pth) and os.path.basename(pth) != "oneAPI":
pth = os.path.dirname(pth)
return pth
# If this found, it sets all the vars
oneapi_root = get_oneapi_root(self.fc)
if not oneapi_root:
raise RuntimeError(f"Non-oneAPI Fortran compiler {self.fc} assigned to MSVC")
oneapi_root = os.path.join(self.cc, "../../..")
oneapi_root_setvars = os.path.join(oneapi_root, "setvars.bat")
# some oneAPI exes return a version more precise than their
# install paths specify, so we determine path from
# the install path rather than the fc executable itself
numver = r"\d+\.\d+(?:\.\d+)?"
pattern = f"((?:{numver})|(?:latest))"
version_from_path = re.search(pattern, self.fc).group(1)
oneapi_version_setvars = os.path.join(
oneapi_root, "compiler", version_from_path, "env", "vars.bat"
oneapi_root, "compiler", str(self.ifx_version), "env", "vars.bat"
)
# order matters here, the specific version env must be invoked first,
# otherwise it will be ignored if the root setvars sets up the oneapi
@@ -325,19 +314,23 @@ def setup_custom_environment(self, pkg, env):
@classmethod
def fc_version(cls, fc):
# We're using intel for the Fortran compilers, which exist if
# ONEAPI_ROOT is a meaningful variable
if not sys.platform == "win32":
return "unknown"
fc_ver = cls.default_version(fc)
FC_PATH[fc_ver] = fc
try:
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths
except AttributeError:
raise SpackError(
"Windows compiler search paths not established, "
"please report this behavior to github.com/spack/spack"
)
clp = spack.util.executable.which_string("cl", path=sps)
return cls.default_version(clp) if clp else fc_ver
avail_fc_version.add(fc_ver)
fc_path[fc_ver] = fc
if os.getenv("ONEAPI_ROOT"):
try:
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths
except AttributeError:
raise SpackError("Windows compiler search paths not established")
clp = spack.util.executable.which_string("cl", path=sps)
ver = cls.default_version(clp)
else:
ver = fc_ver
return ver
@classmethod
def f77_version(cls, f77):

View File

@@ -64,7 +64,7 @@ def verbose_flag(self):
#
# This way, we at least enable the implicit rpath detection, which is
# based on compilation of a C file (see method
# spack.compiler._compile_dummy_c_source): in the case of a mixed
# spack.compiler._get_compiler_link_paths): in the case of a mixed
# NAG/GCC toolchain, the flag will be passed to g++ (e.g.
# 'g++ -Wl,-v ./main.c'), otherwise, the flag will be passed to nagfor
# (e.g. 'nagfor -Wl,-v ./main.c' - note that nagfor recognizes '.c'

View File

@@ -74,10 +74,6 @@ class Concretizer:
#: during concretization. Used for testing and for mirror creation
check_for_compiler_existence = None
#: Packages that the old concretizer cannot deal with correctly, and cannot build anyway.
#: Those will not be considered as providers for virtuals.
non_buildable_packages = {"glibc", "musl"}
def __init__(self, abstract_spec=None):
if Concretizer.check_for_compiler_existence is None:
Concretizer.check_for_compiler_existence = not spack.config.get(
@@ -117,11 +113,7 @@ def _valid_virtuals_and_externals(self, spec):
pref_key = lambda spec: 0 # no-op pref key
if spec.virtual:
candidates = [
s
for s in spack.repo.PATH.providers_for(spec)
if s.name not in self.non_buildable_packages
]
candidates = spack.repo.PATH.providers_for(spec)
if not candidates:
raise spack.error.UnsatisfiableProviderSpecError(candidates[0], spec)

View File

@@ -1562,9 +1562,8 @@ def ensure_latest_format_fn(section: str) -> Callable[[YamlConfigDict], bool]:
def use_configuration(
*scopes_or_paths: Union[ConfigScope, str]
) -> Generator[Configuration, None, None]:
"""Use the configuration scopes passed as arguments within the context manager.
This function invalidates caches, and is therefore very slow.
"""Use the configuration scopes passed as arguments within the
context manager.
Args:
*scopes_or_paths: scope objects or paths to be used

View File

@@ -12,26 +12,26 @@
},
"os_package_manager": "yum_amazon"
},
"fedora:40": {
"fedora:38": {
"bootstrap": {
"template": "container/fedora.dockerfile",
"image": "docker.io/fedora:40"
"template": "container/fedora_38.dockerfile",
"image": "docker.io/fedora:38"
},
"os_package_manager": "dnf",
"build": "spack/fedora40",
"build": "spack/fedora38",
"final": {
"image": "docker.io/fedora:40"
"image": "docker.io/fedora:38"
}
},
"fedora:39": {
"fedora:37": {
"bootstrap": {
"template": "container/fedora.dockerfile",
"image": "docker.io/fedora:39"
"template": "container/fedora_37.dockerfile",
"image": "docker.io/fedora:37"
},
"os_package_manager": "dnf",
"build": "spack/fedora39",
"build": "spack/fedora37",
"final": {
"image": "docker.io/fedora:39"
"image": "docker.io/fedora:37"
}
},
"rockylinux:9": {
@@ -116,13 +116,6 @@
},
"os_package_manager": "apt"
},
"ubuntu:24.04": {
"bootstrap": {
"template": "container/ubuntu_2404.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-noble"
},
"ubuntu:22.04": {
"bootstrap": {
"template": "container/ubuntu_2204.dockerfile"
@@ -136,6 +129,13 @@
},
"build": "spack/ubuntu-focal",
"os_package_manager": "apt"
},
"ubuntu:18.04": {
"bootstrap": {
"template": "container/ubuntu_1804.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-bionic"
}
},
"os_package_managers": {

View File

@@ -25,7 +25,6 @@
import socket
import sys
import time
from json import JSONDecoder
from typing import (
Any,
Callable,
@@ -819,8 +818,7 @@ def _read_from_file(self, filename):
"""
try:
with open(filename, "r") as f:
# In the future we may use a stream of JSON objects, hence `raw_decode` for compat.
fdata, _ = JSONDecoder().raw_decode(f.read())
fdata = sjson.load(f)
except Exception as e:
raise CorruptDatabaseError("error parsing database:", str(e)) from e
@@ -835,24 +833,27 @@ def check(cond, msg):
# High-level file checks
db = fdata["database"]
check("installs" in db, "no 'installs' in JSON DB.")
check("version" in db, "no 'version' in JSON DB.")
installs = db["installs"]
# TODO: better version checking semantics.
version = vn.Version(db["version"])
if version > _DB_VERSION:
raise InvalidDatabaseVersionError(self, _DB_VERSION, version)
elif version < _DB_VERSION and not any(
old == version and new == _DB_VERSION for old, new in _SKIP_REINDEX
):
tty.warn(f"Spack database version changed from {version} to {_DB_VERSION}. Upgrading.")
elif version < _DB_VERSION:
if not any(old == version and new == _DB_VERSION for old, new in _SKIP_REINDEX):
tty.warn(
"Spack database version changed from %s to %s. Upgrading."
% (version, _DB_VERSION)
)
self.reindex(spack.store.STORE.layout)
installs = dict(
(k, v.to_dict(include_fields=self._record_fields)) for k, v in self._data.items()
)
else:
check("installs" in db, "no 'installs' in JSON DB.")
installs = db["installs"]
self.reindex(spack.store.STORE.layout)
installs = dict(
(k, v.to_dict(include_fields=self._record_fields))
for k, v in self._data.items()
)
spec_reader = reader(version)

View File

@@ -83,15 +83,26 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
return path_to_dict(search_paths)
def get_elf_compat(path):
"""For ELF files, get a triplet (EI_CLASS, EI_DATA, e_machine) and see if
it is host-compatible."""
# On ELF platforms supporting, we try to be a bit smarter when it comes to shared
# libraries, by dropping those that are not host compatible.
with open(path, "rb") as f:
elf = elf_utils.parse_elf(f, only_header=True)
return (elf.is_64_bit, elf.is_little_endian, elf.elf_hdr.e_machine)
def accept_elf(path, host_compat):
"""Accept an ELF file if the header matches the given compat triplet. In case it's not an ELF
(e.g. static library, or some arbitrary file, fall back to is_readable_file)."""
"""Accept an ELF file if the header matches the given compat triplet,
obtained with :py:func:`get_elf_compat`. In case it's not an ELF (e.g.
static library, or some arbitrary file, fall back to is_readable_file)."""
# Fast path: assume libraries at least have .so in their basename.
# Note: don't replace with splitext, because of libsmth.so.1.2.3 file names.
if ".so" not in os.path.basename(path):
return llnl.util.filesystem.is_readable_file(path)
try:
return host_compat == elf_utils.get_elf_compat(path)
return host_compat == get_elf_compat(path)
except (OSError, elf_utils.ElfParsingError):
return llnl.util.filesystem.is_readable_file(path)
@@ -144,7 +155,7 @@ def libraries_in_ld_and_system_library_path(
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
try:
host_compat = elf_utils.get_elf_compat(sys.executable)
host_compat = get_elf_compat(sys.executable)
accept = lambda path: accept_elf(path, host_compat)
except (OSError, elf_utils.ElfParsingError):
accept = llnl.util.filesystem.is_readable_file

View File

@@ -11,7 +11,6 @@
from llnl.util import filesystem
import spack.platforms
import spack.repo
import spack.spec
from spack.util import spack_yaml
@@ -33,8 +32,6 @@ class ExpectedTestResult(NamedTuple):
#: Spec to be detected
spec: str
#: Attributes expected in the external spec
extra_attributes: Dict[str, str]
class DetectionTest(NamedTuple):
@@ -103,10 +100,7 @@ def _create_executable_scripts(self, mock_executables: MockExecutables) -> List[
@property
def expected_specs(self) -> List[spack.spec.Spec]:
return [
spack.spec.Spec.from_detection(item.spec, extra_attributes=item.extra_attributes)
for item in self.test.results
]
return [spack.spec.Spec(r.spec) for r in self.test.results]
def detection_tests(pkg_name: str, repository: spack.repo.RepoPath) -> List[Runner]:
@@ -123,13 +117,9 @@ def detection_tests(pkg_name: str, repository: spack.repo.RepoPath) -> List[Runn
"""
result = []
detection_tests_content = read_detection_tests(pkg_name, repository)
current_platform = str(spack.platforms.host())
tests_by_path = detection_tests_content.get("paths", [])
for single_test_data in tests_by_path:
if current_platform not in single_test_data.get("platforms", [current_platform]):
continue
mock_executables = []
for layout in single_test_data["layout"]:
mock_executables.append(
@@ -137,11 +127,7 @@ def detection_tests(pkg_name: str, repository: spack.repo.RepoPath) -> List[Runn
)
expected_results = []
for assertion in single_test_data["results"]:
expected_results.append(
ExpectedTestResult(
spec=assertion["spec"], extra_attributes=assertion.get("extra_attributes", {})
)
)
expected_results.append(ExpectedTestResult(spec=assertion["spec"]))
current_test = DetectionTest(
pkg_name=pkg_name, layout=mock_executables, results=expected_results

View File

@@ -27,7 +27,6 @@ class OpenMpi(Package):
* ``variant``
* ``version``
* ``requires``
* ``redistribute``
"""
import collections
@@ -64,7 +63,6 @@ class OpenMpi(Package):
__all__ = [
"DirectiveError",
"DirectiveMeta",
"DisableRedistribute",
"version",
"conflicts",
"depends_on",
@@ -77,7 +75,6 @@ class OpenMpi(Package):
"resource",
"build_system",
"requires",
"redistribute",
]
#: These are variant names used by Spack internally; packages can't use them
@@ -97,7 +94,7 @@ class OpenMpi(Package):
PatchesType = Optional[Union[Patcher, str, List[Union[Patcher, str]]]]
SUPPORTED_LANGUAGES = ("fortran", "cxx", "c")
SUPPORTED_LANGUAGES = ("fortran", "cxx")
def _make_when_spec(value: WhenType) -> Optional["spack.spec.Spec"]:
@@ -601,68 +598,9 @@ def _execute_depends_on(pkg: "spack.package_base.PackageBase"):
return _execute_depends_on
#: Store whether a given Spec source/binary should not be redistributed.
class DisableRedistribute:
def __init__(self, source, binary):
self.source = source
self.binary = binary
@directive("disable_redistribute")
def redistribute(source=None, binary=None, when: WhenType = None):
"""Can be used inside a Package definition to declare that
the package source and/or compiled binaries should not be
redistributed.
By default, Packages allow source/binary distribution (i.e. in
mirrors). Because of this, and because overlapping enable/
disable specs are not allowed, this directive only allows users
to explicitly disable redistribution for specs.
"""
return lambda pkg: _execute_redistribute(pkg, source, binary, when)
def _execute_redistribute(
pkg: "spack.package_base.PackageBase", source=None, binary=None, when: WhenType = None
):
if source is None and binary is None:
return
elif (source is True) or (binary is True):
raise DirectiveError(
"Source/binary distribution are true by default, they can only "
"be explicitly disabled."
)
if source is None:
source = True
if binary is None:
binary = True
when_spec = _make_when_spec(when)
if not when_spec:
return
if source is False:
max_constraint = spack.spec.Spec(f"{pkg.name}@{when_spec.versions}")
if not max_constraint.satisfies(when_spec):
raise DirectiveError("Source distribution can only be disabled for versions")
if when_spec in pkg.disable_redistribute:
disable = pkg.disable_redistribute[when_spec]
if not source:
disable.source = True
if not binary:
disable.binary = True
else:
pkg.disable_redistribute[when_spec] = DisableRedistribute(
source=not source, binary=not binary
)
@directive(("extendees", "dependencies"))
def extends(spec, when=None, type=("build", "run"), patches=None):
"""Same as depends_on, but also adds this package to the extendee list.
In case of Python, also adds a dependency on python-venv.
keyword arguments can be passed to extends() so that extension
packages can pass parameters to the extendee's extension
@@ -678,11 +616,6 @@ def _execute_extends(pkg):
_depends_on(pkg, spec, when=when, type=type, patches=patches)
spec_obj = spack.spec.Spec(spec)
# When extending python, also add a dependency on python-venv. This is done so that
# Spack environment views are Python virtual environments.
if spec_obj.name == "python" and not pkg.name == "python-venv":
_depends_on(pkg, "python-venv", when=when, type=("build", "run"))
# TODO: the values of the extendees dictionary are not used. Remove in next refactor.
pkg.extendees[spec_obj.name] = (spec_obj, None)

View File

@@ -15,7 +15,6 @@
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.symlink import readlink
import spack.config
import spack.hash_types as ht
@@ -182,7 +181,7 @@ def deprecated_file_path(self, deprecated_spec, deprecator_spec=None):
base_dir = (
self.path_for_spec(deprecator_spec)
if deprecator_spec
else readlink(deprecated_spec.prefix)
else os.readlink(deprecated_spec.prefix)
)
yaml_path = os.path.join(

View File

@@ -34,9 +34,6 @@
* ``spec``: a string representation of the abstract spec that was concretized
4. ``concrete_specs``: a dictionary containing the specs in the environment.
5. ``include_concrete`` (dictionary): an optional dictionary that includes the roots
and concrete specs from the included environments, keyed by the path to that
environment
Compatibility
-------------
@@ -53,37 +50,26 @@
- ``v2``
- ``v3``
- ``v4``
- ``v5``
* - ``v0.12:0.14``
-
-
-
-
-
* - ``v0.15:0.16``
-
-
-
-
-
* - ``v0.17``
-
-
-
-
-
* - ``v0.18:``
-
-
-
-
-
* - ``v0.22:``
-
-
-
-
-
Version 1
---------
@@ -348,118 +334,6 @@
}
}
}
Version 5
---------
Version 5 doesn't change the top-level lockfile format, but an optional dictionary is
added. The dictionary has the ``root`` and ``concrete_specs`` of the included
environments, which are keyed by the path to that environment. Since this is optional
if the environment does not have any included environments ``include_concrete`` will
not be a part of the lockfile.
.. code-block:: json
{
"_meta": {
"file-type": "spack-lockfile",
"lockfile-version": 5,
"specfile-version": 3
},
"roots": [
{
"hash": "<dag_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<dag_hash 2>",
"spec": "<abstract spec 2>"
}
],
"concrete_specs": {
"<dag_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_1",
"hash": "<dag_hash for depname_1>",
"type": ["build", "link"]
},
{
"name": "depname_2",
"hash": "<dag_hash for depname_2>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 1>",
},
"<daghash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_3",
"hash": "<dag_hash for depname_3>",
"type": ["build", "link"]
},
{
"name": "depname_4",
"hash": "<dag_hash for depname_4>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 2>"
}
}
"include_concrete": {
"<path to environment>": {
"roots": [
{
"hash": "<dag_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<dag_hash 2>",
"spec": "<abstract spec 2>"
}
],
"concrete_specs": {
"<dag_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_1",
"hash": "<dag_hash for depname_1>",
"type": ["build", "link"]
},
{
"name": "depname_2",
"hash": "<dag_hash for depname_2>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 1>",
},
"<daghash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_3",
"hash": "<dag_hash for depname_3>",
"type": ["build", "link"]
},
{
"name": "depname_4",
"hash": "<dag_hash for depname_4>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 2>"
}
}
}
}
}
"""
from .environment import (

View File

@@ -16,13 +16,13 @@
import urllib.parse
import urllib.request
import warnings
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import llnl.util.tty.color as clr
from llnl.util.link_tree import ConflictingSpecsError
from llnl.util.symlink import readlink, symlink
from llnl.util.symlink import symlink
import spack.compilers
import spack.concretize
@@ -30,7 +30,6 @@
import spack.deptypes as dt
import spack.error
import spack.fetch_strategy
import spack.filesystem_view as fsv
import spack.hash_types as ht
import spack.hooks
import spack.main
@@ -53,6 +52,7 @@
import spack.util.url
import spack.version
from spack import traverse
from spack.filesystem_view import SimpleFilesystemView, inverse_view_func_parser, view_func_parser
from spack.installer import PackageInstaller
from spack.schema.env import TOP_LEVEL_KEY
from spack.spec import Spec
@@ -106,16 +106,17 @@ def environment_name(path: Union[str, pathlib.Path]) -> str:
return path_str
def ensure_no_disallowed_env_config_mods(scopes: List[spack.config.ConfigScope]) -> None:
def check_disallowed_env_config_mods(scopes):
for scope in scopes:
config = scope.get_section("config")
if config and "environments_root" in config["config"]:
raise SpackEnvironmentError(
"Spack environments are prohibited from modifying 'config:environments_root' "
"because it can make the definition of the environment ill-posed. Please "
"remove from your environment and place it in a permanent scope such as "
"defaults, system, site, etc."
)
with spack.config.use_configuration(scope):
if spack.config.get("config:environments_root"):
raise SpackEnvironmentError(
"Spack environments are prohibited from modifying 'config:environments_root' "
"because it can make the definition of the environment ill-posed. Please "
"remove from your environment and place it in a permanent scope such as "
"defaults, system, site, etc."
)
return scopes
def default_manifest_yaml():
@@ -159,8 +160,6 @@ def default_manifest_yaml():
default_view_name = "default"
# Default behavior to link all packages into views (vs. only root packages)
default_view_link = "all"
# The name for any included concrete specs
included_concrete_name = "include_concrete"
def installed_specs():
@@ -295,7 +294,6 @@ def create(
init_file: Optional[Union[str, pathlib.Path]] = None,
with_view: Optional[Union[str, pathlib.Path, bool]] = None,
keep_relative: bool = False,
include_concrete: Optional[List[str]] = None,
) -> "Environment":
"""Create a managed environment in Spack and returns it.
@@ -312,15 +310,10 @@ def create(
string, it specifies the path to the view
keep_relative: if True, develop paths are copied verbatim into the new environment file,
otherwise they are made absolute
include_concrete: list of concrete environment names/paths to be included
"""
environment_dir = environment_dir_from_name(name, exists_ok=False)
return create_in_dir(
environment_dir,
init_file=init_file,
with_view=with_view,
keep_relative=keep_relative,
include_concrete=include_concrete,
environment_dir, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
@@ -329,7 +322,6 @@ def create_in_dir(
init_file: Optional[Union[str, pathlib.Path]] = None,
with_view: Optional[Union[str, pathlib.Path, bool]] = None,
keep_relative: bool = False,
include_concrete: Optional[List[str]] = None,
) -> "Environment":
"""Create an environment in the directory passed as input and returns it.
@@ -343,7 +335,6 @@ def create_in_dir(
string, it specifies the path to the view
keep_relative: if True, develop paths are copied verbatim into the new environment file,
otherwise they are made absolute
include_concrete: concrete environment names/paths to be included
"""
initialize_environment_dir(root, envfile=init_file)
@@ -356,12 +347,6 @@ def create_in_dir(
if with_view is not None:
manifest.set_default_view(with_view)
if include_concrete is not None:
set_included_envs_to_env_paths(include_concrete)
validate_included_envs_exists(include_concrete)
validate_included_envs_concrete(include_concrete)
manifest.set_include_concrete(include_concrete)
manifest.flush()
except (spack.config.ConfigFormatError, SpackEnvironmentConfigError) as e:
@@ -435,67 +420,6 @@ def ensure_env_root_path_exists():
fs.mkdirp(env_root_path())
def set_included_envs_to_env_paths(include_concrete: List[str]) -> None:
"""If the included environment(s) is the environment name
it is replaced by the path to the environment
Args:
include_concrete: list of env name or path to env"""
for i, env_name in enumerate(include_concrete):
if is_env_dir(env_name):
include_concrete[i] = env_name
elif exists(env_name):
include_concrete[i] = root(env_name)
def validate_included_envs_exists(include_concrete: List[str]) -> None:
"""Checks that all of the included environments exist
Args:
include_concrete: list of already existing concrete environments to include
Raises:
SpackEnvironmentError: if any of the included environments do not exist
"""
missing_envs = set()
for i, env_name in enumerate(include_concrete):
if not is_env_dir(env_name):
missing_envs.add(env_name)
if missing_envs:
msg = "The following environment(s) are missing: {0}".format(", ".join(missing_envs))
raise SpackEnvironmentError(msg)
def validate_included_envs_concrete(include_concrete: List[str]) -> None:
"""Checks that all of the included environments are concrete
Args:
include_concrete: list of already existing concrete environments to include
Raises:
SpackEnvironmentError: if any of the included environments are not concrete
"""
non_concrete_envs = set()
for env_path in include_concrete:
if not os.path.exists(Environment(env_path).lock_path):
non_concrete_envs.add(Environment(env_path).name)
if non_concrete_envs:
msg = "The following environment(s) are not concrete: {0}\n" "Please run:".format(
", ".join(non_concrete_envs)
)
for env in non_concrete_envs:
msg += f"\n\t`spack -e {env} concretize`"
raise SpackEnvironmentError(msg)
def all_environment_names():
"""List the names of environments that currently exist."""
# just return empty if the env path does not exist. A read-only
@@ -606,7 +530,7 @@ def __init__(
self.projections = projections
self.select = select
self.exclude = exclude
self.link_type = fsv.canonicalize_link_type(link_type)
self.link_type = view_func_parser(link_type)
self.link = link
def select_fn(self, spec):
@@ -640,7 +564,7 @@ def to_dict(self):
if self.exclude:
ret["exclude"] = self.exclude
if self.link_type:
ret["link_type"] = self.link_type
ret["link_type"] = inverse_view_func_parser(self.link_type)
if self.link != default_view_link:
ret["link"] = self.link
return ret
@@ -662,7 +586,7 @@ def _current_root(self):
if not os.path.islink(self.root):
return None
root = readlink(self.root)
root = os.readlink(self.root)
if os.path.isabs(root):
return root
@@ -690,7 +614,7 @@ def get_projection_for_spec(self, spec):
to exist on the filesystem."""
return self._view(self.root).get_projection_for_spec(spec)
def view(self, new: Optional[str] = None) -> fsv.SimpleFilesystemView:
def view(self, new: Optional[str] = None) -> SimpleFilesystemView:
"""
Returns a view object for the *underlying* view directory. This means that the
self.root symlink is followed, and that the view has to exist on the filesystem
@@ -710,14 +634,14 @@ def view(self, new: Optional[str] = None) -> fsv.SimpleFilesystemView:
)
return self._view(path)
def _view(self, root: str) -> fsv.SimpleFilesystemView:
def _view(self, root: str) -> SimpleFilesystemView:
"""Returns a view object for a given root dir."""
return fsv.SimpleFilesystemView(
return SimpleFilesystemView(
root,
spack.store.STORE.layout,
ignore_conflicts=True,
projections=self.projections,
link_type=self.link_type,
link=self.link_type,
)
def __contains__(self, spec):
@@ -898,18 +822,6 @@ def __init__(self, manifest_dir: Union[str, pathlib.Path]) -> None:
self.specs_by_hash: Dict[str, Spec] = {}
#: Repository for this environment (memoized)
self._repo = None
#: Environment paths for concrete (lockfile) included environments
self.included_concrete_envs: List[str] = []
#: First-level included concretized spec data from/to the lockfile.
self.included_concrete_spec_data: Dict[str, Dict[str, List[str]]] = {}
#: User specs from included environments from the last concretization
self.included_concretized_user_specs: Dict[str, List[Spec]] = {}
#: Roots from included environments with the last concretization, in order
self.included_concretized_order: Dict[str, List[str]] = {}
#: Concretized specs by hash from the included environments
self.included_specs_by_hash: Dict[str, Dict[str, Spec]] = {}
#: Previously active environment
self._previous_active = None
self._dev_specs = None
@@ -947,7 +859,7 @@ def _read(self):
if os.path.exists(self.lock_path):
with open(self.lock_path) as f:
read_lock_version = self._read_lockfile(f)["_meta"]["lockfile-version"]
read_lock_version = self._read_lockfile(f)
if read_lock_version == 1:
tty.debug(f"Storing backup of {self.lock_path} at {self._lock_backup_v1_path}")
@@ -1015,20 +927,6 @@ def add_view(name, values):
if self.views == dict():
self.views[default_view_name] = ViewDescriptor(self.path, self.view_path_default)
def _process_concrete_includes(self):
"""Extract and load into memory included concrete spec data."""
self.included_concrete_envs = self.manifest[TOP_LEVEL_KEY].get(included_concrete_name, [])
if self.included_concrete_envs:
if os.path.exists(self.lock_path):
with open(self.lock_path) as f:
data = self._read_lockfile(f)
if included_concrete_name in data:
self.included_concrete_spec_data = data[included_concrete_name]
else:
self.include_concrete_envs()
def _construct_state_from_manifest(self):
"""Set up user specs and views from the manifest file."""
self.spec_lists = collections.OrderedDict()
@@ -1045,31 +943,6 @@ def _construct_state_from_manifest(self):
self.spec_lists[user_speclist_name] = user_specs
self._process_view(spack.config.get("view", True))
self._process_concrete_includes()
def all_concretized_user_specs(self) -> List[Spec]:
"""Returns all of the concretized user specs of the environment and
its included environment(s)."""
concretized_user_specs = self.concretized_user_specs[:]
for included_specs in self.included_concretized_user_specs.values():
for included in included_specs:
# Don't duplicate included spec(s)
if included not in concretized_user_specs:
concretized_user_specs.append(included)
return concretized_user_specs
def all_concretized_orders(self) -> List[str]:
"""Returns all of the concretized order of the environment and
its included environment(s)."""
concretized_order = self.concretized_order[:]
for included_concretized_order in self.included_concretized_order.values():
for included in included_concretized_order:
# Don't duplicate included spec(s)
if included not in concretized_order:
concretized_order.append(included)
return concretized_order
@property
def user_specs(self):
@@ -1094,26 +967,6 @@ def _read_dev_specs(self):
dev_specs[name] = local_entry
return dev_specs
@property
def included_user_specs(self) -> SpecList:
"""Included concrete user (or root) specs from last concretization."""
spec_list = SpecList()
if not self.included_concrete_envs:
return spec_list
def add_root_specs(included_concrete_specs):
# add specs from the include *and* any nested includes it may have
for env, info in included_concrete_specs.items():
for root_list in info["roots"]:
spec_list.add(root_list["spec"])
if "include_concrete" in info:
add_root_specs(info["include_concrete"])
add_root_specs(self.included_concrete_spec_data)
return spec_list
def clear(self, re_read=False):
"""Clear the contents of the environment
@@ -1125,15 +978,9 @@ def clear(self, re_read=False):
self.spec_lists[user_speclist_name] = SpecList()
self._dev_specs = {}
self.concretized_order = [] # roots of last concretize, in order
self.concretized_user_specs = [] # user specs from last concretize
self.concretized_order = [] # roots of last concretize, in order
self.specs_by_hash = {} # concretized specs by hash
self.included_concrete_spec_data = {} # concretized specs from lockfile of included envs
self.included_concretized_order = {} # root specs of the included envs, keyed by env path
self.included_concretized_user_specs = {} # user specs from last concretize's included env
self.included_specs_by_hash = {} # concretized specs by hash from the included envs
self.invalidate_repository_cache()
self._previous_active = None # previously active environment
if not re_read:
@@ -1187,43 +1034,6 @@ def scope_name(self):
"""Name of the config scope of this environment's manifest file."""
return self.manifest.scope_name
def include_concrete_envs(self):
"""Copy and save the included envs' specs internally"""
root_hash_seen = set()
concrete_hash_seen = set()
self.included_concrete_spec_data = {}
for env_path in self.included_concrete_envs:
# Check that environment exists
if not is_env_dir(env_path):
raise SpackEnvironmentError(f"Unable to find env at {env_path}")
env = Environment(env_path)
self.included_concrete_spec_data[env_path] = {"roots": [], "concrete_specs": {}}
# Copy unique root specs from env
for root_dict in env._concrete_roots_dict():
if root_dict["hash"] not in root_hash_seen:
self.included_concrete_spec_data[env_path]["roots"].append(root_dict)
root_hash_seen.add(root_dict["hash"])
# Copy unique concrete specs from env
for dag_hash, spec_details in env._concrete_specs_dict().items():
if dag_hash not in concrete_hash_seen:
self.included_concrete_spec_data[env_path]["concrete_specs"].update(
{dag_hash: spec_details}
)
concrete_hash_seen.add(dag_hash)
# Copy transitive include data
transitive = env.included_concrete_spec_data
if transitive:
self.included_concrete_spec_data[env_path]["include_concrete"] = transitive
self._read_lockfile_dict(self._to_lockfile_dict())
self.write()
def destroy(self):
"""Remove this environment from Spack entirely."""
shutil.rmtree(self.path)
@@ -1423,10 +1233,6 @@ def concretize(self, force=False, tests=False):
for spec in set(self.concretized_user_specs) - set(self.user_specs):
self.deconcretize(spec, concrete=False)
# If a combined env, check updated spec is in the linked envs
if self.included_concrete_envs:
self.include_concrete_envs()
# Pick the right concretization strategy
if self.unify == "when_possible":
return self._concretize_together_where_possible(tests=tests)
@@ -1610,7 +1416,7 @@ def _concretize_separately(self, tests=False):
# Ensure we don't try to bootstrap clingo in parallel
if spack.config.get("config:concretizer", "clingo") == "clingo":
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
spack.bootstrap.ensure_core_dependencies()
# Ensure all the indexes have been built or updated, since
# otherwise the processes in the pool may timeout on waiting
@@ -1621,7 +1427,7 @@ def _concretize_separately(self, tests=False):
# Ensure we have compilers in compilers.yaml to avoid that
# processes try to write the config file in parallel
_ = spack.compilers.get_compiler_config(spack.config.CONFIG, init_config=True)
_ = spack.compilers.get_compiler_config(init_config=True)
# Early return if there is nothing to do
if len(args) == 0:
@@ -1899,14 +1705,8 @@ def _partition_roots_by_install_status(self):
of per spec."""
installed, uninstalled = [], []
with spack.store.STORE.db.read_transaction():
for concretized_hash in self.all_concretized_orders():
if concretized_hash in self.specs_by_hash:
spec = self.specs_by_hash[concretized_hash]
else:
for env_path in self.included_specs_by_hash.keys():
if concretized_hash in self.included_specs_by_hash[env_path]:
spec = self.included_specs_by_hash[env_path][concretized_hash]
break
for concretized_hash in self.concretized_order:
spec = self.specs_by_hash[concretized_hash]
if not spec.installed or (
spec.satisfies("dev_path=*") or spec.satisfies("^dev_path=*")
):
@@ -1986,14 +1786,8 @@ def added_specs(self):
def concretized_specs(self):
"""Tuples of (user spec, concrete spec) for all concrete specs."""
for s, h in zip(self.all_concretized_user_specs(), self.all_concretized_orders()):
if h in self.specs_by_hash:
yield (s, self.specs_by_hash[h])
else:
for env_path in self.included_specs_by_hash.keys():
if h in self.included_specs_by_hash[env_path]:
yield (s, self.included_specs_by_hash[env_path][h])
break
for s, h in zip(self.concretized_user_specs, self.concretized_order):
yield (s, self.specs_by_hash[h])
def concrete_roots(self):
"""Same as concretized_specs, except it returns the list of concrete
@@ -2122,7 +1916,8 @@ def _get_environment_specs(self, recurse_dependencies=True):
If these specs appear under different user_specs, only one copy
is added to the list returned.
"""
specs = [self.specs_by_hash[h] for h in self.all_concretized_orders()]
specs = [self.specs_by_hash[h] for h in self.concretized_order]
if recurse_dependencies:
specs.extend(
traverse.traverse_nodes(
@@ -2132,23 +1927,16 @@ def _get_environment_specs(self, recurse_dependencies=True):
return specs
def _concrete_specs_dict(self):
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = {}
for s in traverse.traverse_nodes(self.specs_by_hash.values(), key=traverse.by_dag_hash):
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
# Assumes no legacy formats, since this was just created.
spec_dict[ht.dag_hash.name] = s.dag_hash()
concrete_specs[s.dag_hash()] = spec_dict
return concrete_specs
def _concrete_roots_dict(self):
hash_spec_list = zip(self.concretized_order, self.concretized_user_specs)
return [{"hash": h, "spec": str(s)} for h, s in hash_spec_list]
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = self._concrete_specs_dict()
root_specs = self._concrete_roots_dict()
spack_dict = {"version": spack.spack_version}
spack_commit = spack.main.get_spack_commit()
@@ -2169,81 +1957,36 @@ def _to_lockfile_dict(self):
# spack version information
"spack": spack_dict,
# users specs + hashes are the 'roots' of the environment
"roots": root_specs,
"roots": [{"hash": h, "spec": str(s)} for h, s in hash_spec_list],
# Concrete specs by hash, including dependencies
"concrete_specs": concrete_specs,
}
if self.included_concrete_envs:
data[included_concrete_name] = self.included_concrete_spec_data
return data
def _read_lockfile(self, file_or_json):
"""Read a lockfile from a file or from a raw string."""
lockfile_dict = sjson.load(file_or_json)
self._read_lockfile_dict(lockfile_dict)
return lockfile_dict
def set_included_concretized_user_specs(
self,
env_name: str,
env_info: Dict[str, Dict[str, Any]],
included_json_specs_by_hash: Dict[str, Dict[str, Any]],
) -> Dict[str, Dict[str, Any]]:
"""Sets all of the concretized user specs from included environments
to include those from nested included environments.
Args:
env_name: the name (technically the path) of the included environment
env_info: included concrete environment data
included_json_specs_by_hash: concrete spec data keyed by hash
Returns: updated specs_by_hash
"""
self.included_concretized_order[env_name] = []
self.included_concretized_user_specs[env_name] = []
def add_specs(name, info, specs_by_hash):
# Add specs from the environment as well as any of its nested
# environments.
for root_info in info["roots"]:
self.included_concretized_order[name].append(root_info["hash"])
self.included_concretized_user_specs[name].append(Spec(root_info["spec"]))
if "concrete_specs" in info:
specs_by_hash.update(info["concrete_specs"])
if included_concrete_name in info:
for included_name, included_info in info[included_concrete_name].items():
if included_name not in self.included_concretized_order:
self.included_concretized_order[included_name] = []
self.included_concretized_user_specs[included_name] = []
add_specs(included_name, included_info, specs_by_hash)
add_specs(env_name, env_info, included_json_specs_by_hash)
return included_json_specs_by_hash
return lockfile_dict["_meta"]["lockfile-version"]
def _read_lockfile_dict(self, d):
"""Read a lockfile dictionary into this environment."""
self.specs_by_hash = {}
self.included_specs_by_hash = {}
self.included_concretized_user_specs = {}
self.included_concretized_order = {}
roots = d["roots"]
self.concretized_user_specs = [Spec(r["spec"]) for r in roots]
self.concretized_order = [r["hash"] for r in roots]
json_specs_by_hash = d["concrete_specs"]
included_json_specs_by_hash = {}
if included_concrete_name in d:
for env_name, env_info in d[included_concrete_name].items():
included_json_specs_by_hash.update(
self.set_included_concretized_user_specs(
env_name, env_info, included_json_specs_by_hash
)
)
# Track specs by their lockfile key. Currently spack uses the finest
# grained hash as the lockfile key, while older formats used the build
# hash or a previous incarnation of the DAG hash (one that did not
# include build deps or package hash).
specs_by_hash = {}
# Track specs by their DAG hash, allows handling DAG hash collisions
first_seen = {}
current_lockfile_format = d["_meta"]["lockfile-version"]
try:
reader = READER_CLS[current_lockfile_format]
@@ -2256,39 +1999,6 @@ def _read_lockfile_dict(self, d):
msg += " You need to use a newer Spack version."
raise SpackEnvironmentError(msg)
first_seen, self.concretized_order = self.filter_specs(
reader, json_specs_by_hash, self.concretized_order
)
for spec_dag_hash in self.concretized_order:
self.specs_by_hash[spec_dag_hash] = first_seen[spec_dag_hash]
if any(self.included_concretized_order.values()):
first_seen = {}
for env_name, concretized_order in self.included_concretized_order.items():
filtered_spec, self.included_concretized_order[env_name] = self.filter_specs(
reader, included_json_specs_by_hash, concretized_order
)
first_seen.update(filtered_spec)
for env_path, spec_hashes in self.included_concretized_order.items():
self.included_specs_by_hash[env_path] = {}
for spec_dag_hash in spec_hashes:
self.included_specs_by_hash[env_path].update(
{spec_dag_hash: first_seen[spec_dag_hash]}
)
def filter_specs(self, reader, json_specs_by_hash, order_concretized):
# Track specs by their lockfile key. Currently spack uses the finest
# grained hash as the lockfile key, while older formats used the build
# hash or a previous incarnation of the DAG hash (one that did not
# include build deps or package hash).
specs_by_hash = {}
# Track specs by their DAG hash, allows handling DAG hash collisions
first_seen = {}
# First pass: Put each spec in the map ignoring dependencies
for lockfile_key, node_dict in json_specs_by_hash.items():
spec = reader.from_node_dict(node_dict)
@@ -2311,8 +2021,7 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
# keep. This is only required as long as we support older lockfile
# formats where the mapping from DAG hash to lockfile key is possibly
# one-to-many.
for lockfile_key in order_concretized:
for lockfile_key in self.concretized_order:
for s in specs_by_hash[lockfile_key].traverse():
if s.dag_hash() not in first_seen:
first_seen[s.dag_hash()] = s
@@ -2320,10 +2029,12 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
# Now make sure concretized_order and our internal specs dict
# contains the keys used by modern spack (i.e. the dag_hash
# that includes build deps and package hash).
self.concretized_order = [
specs_by_hash[h_key].dag_hash() for h_key in self.concretized_order
]
order_concretized = [specs_by_hash[h_key].dag_hash() for h_key in order_concretized]
return first_seen, order_concretized
for spec_dag_hash in self.concretized_order:
self.specs_by_hash[spec_dag_hash] = first_seen[spec_dag_hash]
def write(self, regenerate: bool = True) -> None:
"""Writes an in-memory environment to its location on disk.
@@ -2336,7 +2047,7 @@ def write(self, regenerate: bool = True) -> None:
regenerate: regenerate views and run post-write hooks as well as writing if True.
"""
self.manifest_uptodate_or_warn()
if self.specs_by_hash or self.included_concrete_envs:
if self.specs_by_hash:
self.ensure_env_directory_exists(dot_env=True)
self.update_environment_repository()
self.manifest.flush()
@@ -2752,10 +2463,6 @@ def __init__(self, manifest_dir: Union[pathlib.Path, str]) -> None:
self.scope_name = f"env:{environment_name(self.manifest_dir)}"
self.config_stage_dir = os.path.join(env_subdir_path(manifest_dir), "config")
#: Configuration scopes associated with this environment. Note that these are not
#: invalidated by a re-read of the manifest file.
self._config_scopes: Optional[List[spack.config.ConfigScope]] = None
if not self.manifest_file.exists():
msg = f"cannot find '{manifest_name}' in {self.manifest_dir}"
raise SpackEnvironmentError(msg)
@@ -2835,19 +2542,6 @@ def override_user_spec(self, user_spec: str, idx: int) -> None:
raise SpackEnvironmentError(msg) from e
self.changed = True
def set_include_concrete(self, include_concrete: List[str]) -> None:
"""Sets the included concrete environments in the manifest to the value(s) passed as input.
Args:
include_concrete: list of already existing concrete environments to include
"""
self.pristine_configuration[included_concrete_name] = []
for env_path in include_concrete:
self.pristine_configuration[included_concrete_name].append(env_path)
self.changed = True
def add_definition(self, user_spec: str, list_name: str) -> None:
"""Appends a user spec to the first active definition matching the name passed as argument.
@@ -3031,56 +2725,54 @@ def included_config_scopes(self) -> List[spack.config.ConfigScope]:
for i, config_path in enumerate(reversed(includes)):
# allow paths to contain spack config/environment variables, etc.
config_path = substitute_path_variables(config_path)
include_url = urllib.parse.urlparse(config_path)
# If scheme is not valid, config_path is not a url
# of a type Spack is generally aware
if spack.util.url.validate_scheme(include_url.scheme):
# Transform file:// URLs to direct includes.
if include_url.scheme == "file":
config_path = urllib.request.url2pathname(include_url.path)
# Transform file:// URLs to direct includes.
if include_url.scheme == "file":
config_path = urllib.request.url2pathname(include_url.path)
# Any other URL should be fetched.
elif include_url.scheme in ("http", "https", "ftp"):
# Stage any remote configuration file(s)
staged_configs = (
os.listdir(self.config_stage_dir)
if os.path.exists(self.config_stage_dir)
else []
# Any other URL should be fetched.
elif include_url.scheme in ("http", "https", "ftp"):
# Stage any remote configuration file(s)
staged_configs = (
os.listdir(self.config_stage_dir)
if os.path.exists(self.config_stage_dir)
else []
)
remote_path = urllib.request.url2pathname(include_url.path)
basename = os.path.basename(remote_path)
if basename in staged_configs:
# Do NOT re-stage configuration files over existing
# ones with the same name since there is a risk of
# losing changes (e.g., from 'spack config update').
tty.warn(
"Will not re-stage configuration from {0} to avoid "
"losing changes to the already staged file of the "
"same name.".format(remote_path)
)
remote_path = urllib.request.url2pathname(include_url.path)
basename = os.path.basename(remote_path)
if basename in staged_configs:
# Do NOT re-stage configuration files over existing
# ones with the same name since there is a risk of
# losing changes (e.g., from 'spack config update').
tty.warn(
"Will not re-stage configuration from {0} to avoid "
"losing changes to the already staged file of the "
"same name.".format(remote_path)
)
# Recognize the configuration stage directory
# is flattened to ensure a single copy of each
# configuration file.
config_path = self.config_stage_dir
if basename.endswith(".yaml"):
config_path = os.path.join(config_path, basename)
else:
staged_path = spack.config.fetch_remote_configs(
config_path, str(self.config_stage_dir), skip_existing=True
)
if not staged_path:
raise SpackEnvironmentError(
"Unable to fetch remote configuration {0}".format(config_path)
)
config_path = staged_path
elif include_url.scheme:
raise ValueError(
f"Unsupported URL scheme ({include_url.scheme}) for "
f"environment include: {config_path}"
# Recognize the configuration stage directory
# is flattened to ensure a single copy of each
# configuration file.
config_path = self.config_stage_dir
if basename.endswith(".yaml"):
config_path = os.path.join(config_path, basename)
else:
staged_path = spack.config.fetch_remote_configs(
config_path, str(self.config_stage_dir), skip_existing=True
)
if not staged_path:
raise SpackEnvironmentError(
"Unable to fetch remote configuration {0}".format(config_path)
)
config_path = staged_path
elif include_url.scheme:
raise ValueError(
f"Unsupported URL scheme ({include_url.scheme}) for "
f"environment include: {config_path}"
)
# treat relative paths as relative to the environment
if not os.path.isabs(config_path):
@@ -3116,19 +2808,16 @@ def included_config_scopes(self) -> List[spack.config.ConfigScope]:
@property
def env_config_scopes(self) -> List[spack.config.ConfigScope]:
"""A list of all configuration scopes for the environment manifest. On the first call this
instantiates all the scopes, on subsequent calls it returns the cached list."""
if self._config_scopes is not None:
return self._config_scopes
scopes: List[spack.config.ConfigScope] = [
*self.included_config_scopes,
spack.config.SingleFileScope(
self.scope_name, str(self.manifest_file), spack.schema.env.schema, [TOP_LEVEL_KEY]
),
]
ensure_no_disallowed_env_config_mods(scopes)
self._config_scopes = scopes
return scopes
"""A list of all configuration scopes for the environment manifest.
Returns: All configuration scopes associated with the environment
"""
config_name = self.scope_name
env_scope = spack.config.SingleFileScope(
config_name, str(self.manifest_file), spack.schema.env.schema, [TOP_LEVEL_KEY]
)
return check_disallowed_env_config_mods(self.included_config_scopes + [env_scope])
def prepare_config_scope(self) -> None:
"""Add the manifest's scopes to the global configuration search path."""

View File

@@ -554,7 +554,7 @@ def fetch(self):
try:
response = self._urlopen(self.url)
except (TimeoutError, urllib.error.URLError) as e:
except urllib.error.URLError as e:
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)

View File

@@ -10,9 +10,8 @@
import shutil
import stat
import sys
from typing import Callable, Dict, Optional
from typing import Optional
from llnl.string import comma_or
from llnl.util import tty
from llnl.util.filesystem import (
mkdirp,
@@ -50,20 +49,19 @@
_projections_path = ".spack/projections.yaml"
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional["spack.spec.Spec"]], None]
def view_symlink(src: str, dst: str, *args, **kwargs) -> None:
def view_symlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
symlink(src, dst)
def view_hardlink(src: str, dst: str, *args, **kwargs) -> None:
def view_hardlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
os.link(src, dst)
def view_copy(
src: str, dst: str, view: "FilesystemView", spec: Optional["spack.spec.Spec"] = None
) -> None:
def view_copy(src: str, dst: str, view, spec: Optional[spack.spec.Spec] = None):
"""
Copy a file from src to dst.
@@ -106,40 +104,27 @@ def view_copy(
tty.debug(f"Can't change the permissions for {dst}")
#: supported string values for `link_type` in an env, mapped to canonical values
_LINK_TYPES = {
"hardlink": "hardlink",
"hard": "hardlink",
"copy": "copy",
"relocate": "copy",
"add": "symlink",
"symlink": "symlink",
"soft": "symlink",
}
_VALID_LINK_TYPES = sorted(set(_LINK_TYPES.values()))
def canonicalize_link_type(link_type: str) -> str:
"""Return canonical"""
canonical = _LINK_TYPES.get(link_type)
if not canonical:
raise ValueError(
f"Invalid link type: '{link_type}. Must be one of {comma_or(_VALID_LINK_TYPES)}'"
)
return canonical
def function_for_link_type(link_type: str) -> LinkCallbackType:
link_type = canonicalize_link_type(link_type)
if link_type == "hardlink":
def view_func_parser(parsed_name):
# What method are we using for this view
if parsed_name in ("hardlink", "hard"):
return view_hardlink
elif link_type == "symlink":
return view_symlink
elif link_type == "copy":
elif parsed_name in ("copy", "relocate"):
return view_copy
elif parsed_name in ("add", "symlink", "soft"):
return view_symlink
else:
raise ValueError(f"invalid link type for view: '{parsed_name}'")
assert False, "invalid link type" # need mypy Literal values
def inverse_view_func_parser(view_type):
# get string based on view type
if view_type is view_hardlink:
link_name = "hardlink"
elif view_type is view_copy:
link_name = "copy"
else:
link_name = "symlink"
return link_name
class FilesystemView:
@@ -155,16 +140,7 @@ class FilesystemView:
directory structure.
"""
def __init__(
self,
root: str,
layout: "spack.directory_layout.DirectoryLayout",
*,
projections: Optional[Dict] = None,
ignore_conflicts: bool = False,
verbose: bool = False,
link_type: str = "symlink",
):
def __init__(self, root, layout, **kwargs):
"""
Initialize a filesystem view under the given `root` directory with
corresponding directory `layout`.
@@ -173,14 +149,15 @@ def __init__(
"""
self._root = root
self.layout = layout
self.projections = {} if projections is None else projections
self.ignore_conflicts = ignore_conflicts
self.verbose = verbose
self.projections = kwargs.get("projections", {})
self.ignore_conflicts = kwargs.get("ignore_conflicts", False)
self.verbose = kwargs.get("verbose", False)
# Setup link function to include view
self.link_type = link_type
self.link = ft.partial(function_for_link_type(link_type), view=self)
link_func = kwargs.get("link", view_symlink)
self.link = ft.partial(link_func, view=self)
def add_specs(self, *specs, **kwargs):
"""
@@ -278,24 +255,8 @@ class YamlFilesystemView(FilesystemView):
Filesystem view to work with a yaml based directory layout.
"""
def __init__(
self,
root: str,
layout: "spack.directory_layout.DirectoryLayout",
*,
projections: Optional[Dict] = None,
ignore_conflicts: bool = False,
verbose: bool = False,
link_type: str = "symlink",
):
super().__init__(
root,
layout,
projections=projections,
ignore_conflicts=ignore_conflicts,
verbose=verbose,
link_type=link_type,
)
def __init__(self, root, layout, **kwargs):
super().__init__(root, layout, **kwargs)
# Super class gets projections from the kwargs
# YAML specific to get projections from YAML file
@@ -677,6 +638,9 @@ class SimpleFilesystemView(FilesystemView):
"""A simple and partial implementation of FilesystemView focused on performance and immutable
views, where specs cannot be removed after they were added."""
def __init__(self, root, layout, **kwargs):
super().__init__(root, layout, **kwargs)
def _sanity_check_view_projection(self, specs):
"""A very common issue is that we end up with two specs of the same package, that project
to the same prefix. We want to catch that as early as possible and give a sensible error to
@@ -698,6 +662,9 @@ def add_specs(self, *specs: spack.spec.Spec) -> None:
return
# Drop externals
for s in specs:
if s.external:
tty.warn("Skipping external package: " + s.short_spec)
specs = [s for s in specs if not s.external]
self._sanity_check_view_projection(specs)

View File

@@ -1,31 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import spack.binary_distribution as bindist
import spack.mirror
def post_install(spec, explicit):
# Push package to all buildcaches with autopush==True
# Do nothing if spec is an external package
if spec.external:
return
# Do nothing if package was not installed from source
pkg = spec.package
if pkg.installed_from_binary_cache:
return
# Push the package to all autopush mirrors
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
bindist.push_or_raise(
spec,
mirror.push_url,
bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
)
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")

View File

@@ -1,8 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
def post_install(spec, explicit=None):
spec.package.windows_establish_runtime_linkage()

View File

@@ -488,10 +488,6 @@ def _process_binary_cache_tarball(
with timer.measure("install"), spack.util.path.filter_padding():
binary_distribution.extract_tarball(pkg.spec, download_result, force=False, timer=timer)
pkg.windows_establish_runtime_linkage()
if hasattr(pkg, "_post_buildcache_install_hook"):
pkg._post_buildcache_install_hook()
pkg.installed_from_binary_cache = True
spack.store.STORE.db.add(pkg.spec, spack.store.STORE.layout, explicit=explicit)
@@ -980,11 +976,7 @@ def __init__(
# a dependency of the build task. Here we add it to self.dependencies
compiler_spec = self.pkg.spec.compiler
arch_spec = self.pkg.spec.architecture
strict = spack.concretize.Concretizer().check_for_compiler_existence
if (
not spack.compilers.compilers_for_spec(compiler_spec, arch_spec=arch_spec)
and not strict
):
if not spack.compilers.compilers_for_spec(compiler_spec, arch_spec=arch_spec):
# The compiler is in the queue, identify it as dependency
dep = spack.compilers.pkg_spec_for_compiler(compiler_spec)
dep.constrain(f"platform={str(arch_spec.platform)}")
@@ -1699,6 +1691,10 @@ def _install_task(self, task: BuildTask, install_status: InstallStatus) -> None:
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
pkg, build_process, install_args
)
# Currently this is how RPATH-like behavior is achieved on Windows, after install
# establish runtime linkage via Windows Runtime link object
# Note: this is a no-op on non Windows platforms
pkg.windows_establish_runtime_linkage()
# Note: PARENT of the build process adds the new package to
# the database, so that we don't need to re-read from file.
spack.store.STORE.db.add(pkg.spec, spack.store.STORE.layout, explicit=explicit)

View File

@@ -427,7 +427,7 @@ def make_argument_parser(**kwargs):
parser.add_argument(
"--color",
action="store",
default=None,
default=os.environ.get("SPACK_COLOR", "auto"),
choices=("always", "never", "auto"),
help="when to colorize output (default: auto)",
)
@@ -622,8 +622,7 @@ def setup_main_options(args):
# with color
color.try_enable_terminal_color_on_windows()
# when to use color (takes always, auto, or never)
if args.color is not None:
color.set_color_when(args.color)
color.set_color_when(args.color)
def allows_unknown_args(command):

View File

@@ -137,12 +137,6 @@ def source(self):
def signed(self) -> bool:
return isinstance(self._data, str) or self._data.get("signed", True)
@property
def autopush(self) -> bool:
if isinstance(self._data, str):
return False
return self._data.get("autopush", False)
@property
def fetch_url(self):
"""Get the valid, canonicalized fetch URL"""
@@ -156,7 +150,7 @@ def push_url(self):
def _update_connection_dict(self, current_data: dict, new_data: dict, top_level: bool):
keys = ["url", "access_pair", "access_token", "profile", "endpoint_url"]
if top_level:
keys += ["binary", "source", "signed", "autopush"]
keys += ["binary", "source", "signed"]
changed = False
for key in keys:
if key in new_data and current_data.get(key) != new_data[key]:
@@ -292,7 +286,6 @@ def __init__(
scope=None,
binary: Optional[bool] = None,
source: Optional[bool] = None,
autopush: Optional[bool] = None,
):
"""Initialize a mirror collection.
@@ -304,27 +297,21 @@ def __init__(
If None, do not filter on binary mirrors.
source: If True, only include source mirrors.
If False, omit source mirrors.
If None, do not filter on source mirrors.
autopush: If True, only include mirrors that have autopush enabled.
If False, omit mirrors that have autopush enabled.
If None, do not filter on autopush."""
mirrors_data = (
mirrors.items()
if mirrors is not None
else spack.config.get("mirrors", scope=scope).items()
)
mirrors = (Mirror(data=mirror, name=name) for name, mirror in mirrors_data)
If None, do not filter on source mirrors."""
self._mirrors = {
name: Mirror(data=mirror, name=name)
for name, mirror in (
mirrors.items()
if mirrors is not None
else spack.config.get("mirrors", scope=scope).items()
)
}
def _filter(m: Mirror):
if source is not None and m.source != source:
return False
if binary is not None and m.binary != binary:
return False
if autopush is not None and m.autopush != autopush:
return False
return True
if source is not None:
self._mirrors = {k: v for k, v in self._mirrors.items() if v.source == source}
self._mirrors = {m.name: m for m in mirrors if _filter(m)}
if binary is not None:
self._mirrors = {k: v for k, v in self._mirrors.items() if v.binary == binary}
def __eq__(self, other):
return self._mirrors == other._mirrors

View File

@@ -83,17 +83,6 @@ def configuration(module_set_name):
)
_FORMAT_STRING_RE = re.compile(r"({[^}]*})")
def _format_env_var_name(spec, var_name_fmt):
"""Format the variable name, but uppercase any formatted fields."""
fmt_parts = _FORMAT_STRING_RE.split(var_name_fmt)
return "".join(
spec.format(part).upper() if _FORMAT_STRING_RE.match(part) else part for part in fmt_parts
)
def _check_tokens_are_valid(format_string, message):
"""Checks that the tokens used in the format string are valid in
the context of module file and environment variable naming.
@@ -748,12 +737,20 @@ def environment_modifications(self):
exclude = self.conf.exclude_env_vars
# We may have tokens to substitute in environment commands
# Prepare a suitable transformation dictionary for the names
# of the environment variables. This means turn the valid
# tokens uppercase.
transform = {}
for token in _valid_tokens:
transform[token] = lambda s, string: str.upper(string)
for x in env:
# Ensure all the tokens are valid in this context
msg = "some tokens cannot be expanded in an environment variable name"
_check_tokens_are_valid(x.name, message=msg)
x.name = _format_env_var_name(self.spec, x.name)
# Transform them
x.name = self.spec.format(x.name, transform=transform)
if self.modification_needs_formatting(x):
try:
# Not every command has a value

Some files were not shown because too many files have changed in this diff Show More