Compare commits

..

1 Commits

Author SHA1 Message Date
Gregory Becker
54e5439dd6 Spec.format: conditional format strings 2023-08-22 11:22:36 -07:00
1193 changed files with 10475 additions and 34434 deletions

View File

@@ -10,8 +10,3 @@ updates:
directory: "/lib/spack/docs"
schedule:
interval: "daily"
# Requirements to run style checks
- package-ecosystem: "pip"
directory: "/.github/workflows/style"
schedule:
interval: "daily"

View File

@@ -22,8 +22,8 @@ jobs:
matrix:
operating_system: ["ubuntu-latest", "macos-latest"]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
@@ -34,7 +34,6 @@ jobs:
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage run $(which spack) audit externals
coverage combine
coverage xml
- name: Package audits (without coverage)
@@ -42,7 +41,6 @@ jobs:
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -42,8 +42,8 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -80,8 +80,8 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup repo
@@ -145,8 +145,8 @@ jobs:
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -158,13 +158,13 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack external find --not-buildable cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -179,11 +179,11 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
@@ -204,7 +204,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup repo
@@ -214,7 +214,7 @@ jobs:
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
@@ -247,7 +247,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -265,7 +265,6 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
@@ -284,7 +283,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -303,8 +302,8 @@ jobs:
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack -d gpg list
tree ~/.spack/bootstrap/store/
@@ -317,11 +316,10 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
@@ -335,13 +333,13 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack -d gpg list
tree ~/.spack/bootstrap/store/

View File

@@ -56,7 +56,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -86,19 +86,19 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # @v1
uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # @v1
uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # @v1
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -106,13 +106,13 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # @v1
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # @v2
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -35,7 +35,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,7 +0,0 @@
black==23.9.1
clingo==5.6.2
flake8==6.1.0
isort==5.12.0
mypy==1.6.1
types-six==1.16.21.9
vermin==1.5.2

View File

@@ -15,7 +15,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
concretizer: ['clingo']
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
@@ -45,16 +45,12 @@ jobs:
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.11'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -98,10 +94,10 @@ jobs:
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -137,7 +133,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -156,10 +152,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -189,12 +185,12 @@ jobs:
runs-on: macos-latest
strategy:
matrix:
python-version: ["3.11"]
python-version: ["3.10"]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages

View File

@@ -18,15 +18,15 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/style/requirements.txt
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
@@ -35,17 +35,16 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/style/requirements.txt
python3 -m pip install --upgrade pip setuptools types-six black==23.1.0 mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
@@ -69,7 +68,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Setup repo and non-root user
run: |
git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
with:
python-version: 3.9
- name: Install Python packages
@@ -39,10 +39,10 @@ jobs:
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
with:
python-version: 3.9
- name: Install Python packages
@@ -63,10 +63,10 @@ jobs:
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
with:
python-version: 3.9
- name: Install Python packages
@@ -75,5 +75,6 @@ jobs:
- name: Build Test
run: |
spack compiler find
spack -d external find cmake ninja
spack external find cmake
spack external find ninja
spack -d install abseil-cpp

View File

@@ -27,53 +27,12 @@
# And here's the CITATION.cff format:
#
cff-version: 1.2.0
type: software
message: "If you are referencing Spack in a publication, please cite the paper below."
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
abstract: >-
Large HPC centers spend considerable time supporting software for thousands of users, but the complexity of HPC software is quickly outpacing the capabilities of existing software management tools.
Scientific applications require specific versions of compilers, MPI, and other dependency libraries, so using a single, standard software stack is infeasible.
However, managing many configurations is difficult because the configuration space is combinatorial in size.
We introduce Spack, a tool used at Lawrence Livermore National Laboratory to manage this complexity.
Spack provides a novel, re- cursive specification syntax to invoke parametric builds of packages and dependencies.
It allows any number of builds to coexist on the same system, and it ensures that installed packages can find their dependencies, regardless of the environment.
We show through real-world use cases that Spack supports diverse and demanding applications, bringing order to HPC software chaos.
preferred-citation:
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
type: conference-paper
url: "https://tgamblin.github.io/pubs/spack-sc15.pdf"
doi: "10.1145/2807591.2807623"
url: "https://github.com/spack/spack"
authors:
- family-names: "Gamblin"
given-names: "Todd"
- family-names: "LeGendre"
given-names: "Matthew"
- family-names: "Collette"
given-names: "Michael R."
- family-names: "Lee"
given-names: "Gregory L."
- family-names: "Moody"
given-names: "Adam"
- family-names: "de Supinski"
given-names: "Bronis R."
- family-names: "Futral"
given-names: "Scott"
conference:
name: "Supercomputing 2015 (SC15)"
city: "Austin"
region: "Texas"
country: "US"
date-start: 2015-11-15
date-end: 2015-11-20
month: 11
year: 2015
identifiers:
- description: "The concept DOI of the work."
type: doi
value: 10.1145/2807591.2807623
- description: "The DOE Document Release Number of the work"
type: other
value: "LLNL-CONF-669890"
authors:
- family-names: "Gamblin"
given-names: "Todd"
- family-names: "LeGendre"
@@ -88,3 +47,12 @@ authors:
given-names: "Bronis R."
- family-names: "Futral"
given-names: "Scott"
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
conference:
name: "Supercomputing 2015 (SC15)"
city: "Austin"
region: "Texas"
country: "USA"
month: November 15-20
year: 2015
notes: LLNL-CONF-669890

View File

@@ -7,7 +7,6 @@
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
[![Matrix](https://img.shields.io/matrix/spack-space%3Amatrix.org?label=Matrix)](https://matrix.to/#/#spack-space:matrix.org)
Spack is a multi-platform package manager that builds and installs
multiple versions and configurations of software. It works on Linux,
@@ -63,10 +62,7 @@ Resources:
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
To get an invitation, visit [slack.spack.io](https://slack.spack.io).
* **Matrix space**: [#spack-space:matrix.org](https://matrix.to/#/#spack-space:matrix.org):
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions):
not just for discussions, also Q&A.
* [**Github Discussions**](https://github.com/spack/spack/discussions): not just for discussions, also Q&A.
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!

View File

@@ -2,26 +2,24 @@
## Supported Versions
We provide security updates for `develop` and for the last two
stable (`0.x`) release series of Spack. Security updates will be
made available as patch (`0.x.1`, `0.x.2`, etc.) releases.
We provide security updates for the following releases.
For more on Spack's release structure, see
[`README.md`](https://github.com/spack/spack#releases).
| Version | Supported |
| ------- | ------------------ |
| develop | :white_check_mark: |
| 0.19.x | :white_check_mark: |
| 0.18.x | :white_check_mark: |
## Reporting a Vulnerability
You can report a vulnerability using GitHub's private reporting
feature:
To report a vulnerability or other security
issue, email maintainers@spack.io.
1. Go to [github.com/spack/spack/security](https://github.com/spack/spack/security).
2. Click "Report a vulnerability" in the upper right corner of that page.
3. Fill out the form and submit your draft security advisory.
More details are available in
[GitHub's docs](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability).
You can expect to hear back about security issues within two days.
If your security issue is accepted, we will do our best to release
a fix within a week. If fixing the issue will take longer than
this, we will discuss timeline options with you.
You can expect to hear back within two days.
If your security issue is accepted, we will do
our best to release a fix within a week. If
fixing the issue will take longer than this,
we will discuss timeline options with you.

View File

@@ -14,7 +14,7 @@
::
@echo off
set spack="%SPACK_ROOT%"\bin\spack
set spack=%SPACK_ROOT%\bin\spack
::#######################################################################
:: This is a wrapper around the spack command that forwards calls to

View File

@@ -39,26 +39,12 @@ function Read-SpackArgs {
return $SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs
}
function Set-SpackEnv {
# This method is responsible
# for processing the return from $(spack <command>)
# which are returned as System.Object[]'s containing
# a list of env commands
# Invoke-Expression can only handle one command at a time
# so we iterate over the list to invoke the env modification
# expressions one at a time
foreach($envop in $args[0]){
Invoke-Expression $envop
}
}
function Invoke-SpackCD {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" cd -h
python $Env:SPACK_ROOT/bin/spack cd -h
}
else {
$LOC = $(python "$Env:SPACK_ROOT/bin/spack" location $SpackSubCommandArgs)
$LOC = $(python $Env:SPACK_ROOT/bin/spack location $SpackSubCommandArgs)
if (($NULL -ne $LOC)){
if ( Test-Path -Path $LOC){
Set-Location $LOC
@@ -75,7 +61,7 @@ function Invoke-SpackCD {
function Invoke-SpackEnv {
if (Compare-CommonArgs $SpackSubCommandArgs[0]) {
python "$Env:SPACK_ROOT/bin/spack" env -h
python $Env:SPACK_ROOT/bin/spack env -h
}
else {
$SubCommandSubCommand = $SpackSubCommandArgs[0]
@@ -83,46 +69,46 @@ function Invoke-SpackEnv {
switch ($SubCommandSubCommand) {
"activate" {
if (Compare-CommonArgs $SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
elseif ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
elseif (!$SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params env activate "--pwsh" $SubCommandSubCommandArgs)
Set-SpackEnv $SpackEnv
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params env activate "--pwsh" $SubCommandSubCommandArgs)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
"deactivate" {
if ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python"$Env:SPACK_ROOT/bin/spack" env deactivate $SubCommandSubCommandArgs
python $Env:SPACK_ROOT/bin/spack env deactivate $SubCommandSubCommandArgs
}
elseif($SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env deactivate -h
python $Env:SPACK_ROOT/bin/spack env deactivate -h
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params env deactivate "--pwsh")
Set-SpackEnv $SpackEnv
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params env deactivate --pwsh)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
default {python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}
}
}
function Invoke-SpackLoad {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
elseif ([bool]($SpackSubCommandArgs.Where({($_ -eq "--pwsh") -or ($_ -eq "--list")}))) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand "--pwsh" $SpackSubCommandArgs)
Set-SpackEnv $SpackEnv
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand "--pwsh" $SpackSubCommandArgs)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
@@ -130,7 +116,7 @@ function Invoke-SpackLoad {
$SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs = Read-SpackArgs $args
if (Compare-CommonArgs $SpackCMD_params) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
exit $LASTEXITCODE
}
@@ -142,5 +128,5 @@ switch($SpackSubCommand)
"env" {Invoke-SpackEnv}
"load" {Invoke-SpackLoad}
"unload" {Invoke-SpackLoad}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
default {python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}

View File

@@ -9,15 +9,15 @@ bootstrap:
# may not be able to bootstrap all the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.5'
metadata: $spack/share/spack/bootstrap/github-actions-v0.5
- name: 'github-actions-v0.4'
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
- name: 'github-actions-v0.3'
metadata: $spack/share/spack/bootstrap/github-actions-v0.3
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow
github-actions-v0.5: true
github-actions-v0.4: true
github-actions-v0.3: true
spack-install: true

View File

@@ -41,4 +41,4 @@ concretizer:
# "none": allows a single node for any package in the DAG.
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal
strategy: none

View File

@@ -1,3 +1,4 @@
package_list.html
command_index.rst
spack*.rst
llnl*.rst

View File

@@ -45,8 +45,7 @@ Listing available packages
To install software with Spack, you need to know what software is
available. You can see a list of available package names at the
`packages.spack.io <https://packages.spack.io>`_ website, or
using the ``spack list`` command.
:ref:`package-list` webpage, or using the ``spack list`` command.
.. _cmd-spack-list:
@@ -61,7 +60,7 @@ can install:
:ellipsis: 10
There are thousands of them, so we've truncated the output above, but you
can find a `full list here <https://packages.spack.io>`_.
can find a :ref:`full list here <package-list>`.
Packages are listed by name in alphabetical order.
A pattern to match with no wildcards, ``*`` or ``?``,
will be treated as though it started and ended with

View File

@@ -3,103 +3,6 @@
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _concretizer-options:
==========================================
Concretization Settings (concretizer.yaml)
==========================================
The ``concretizer.yaml`` configuration file allows to customize aspects of the
algorithm used to select the dependencies you install. The default configuration
is the following:
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
:language: yaml
--------------------------------
Reuse already installed packages
--------------------------------
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
.. code-block:: console
% spack install --reuse <spec>
to enable reuse for a single installation, and you can use:
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: true`` is the default.
------------------------------------------
Selection of the target microarchitectures
------------------------------------------
The options under the ``targets`` attribute control which targets are considered during a solve.
Currently the options in this section are only configurable from the ``concretizer.yaml`` file
and there are no corresponding command line arguments to enable them for a single solve.
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
If set to:
.. code-block:: yaml
concretizer:
targets:
granularity: microarchitectures
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
compatibility. If instead the option is set to:
.. code-block:: yaml
concretizer:
targets:
granularity: generic
Spack will consider only generic microarchitectures. For instance, when running on an
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
``x86_64_v3`` as the best target in the latter case.
The ``host_compatible`` option is a Boolean option that determines whether or not the
microarchitectures considered during the solve are constrained to be compatible with the
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
---------------
Duplicate nodes
---------------
The ``duplicates`` attribute controls whether the DAG can contain multiple configurations of
the same package. This is mainly relevant for build dependencies, which may have their version
pinned by some nodes, and thus be required at different versions by different nodes in the same
DAG.
The ``strategy`` option controls how the solver deals with duplicates. If the value is ``none``,
then a single configuration per package is allowed in the DAG. This means, for instance, that only
a single ``cmake`` or a single ``py-setuptools`` version is allowed. The result would be a slightly
faster concretization, at the expense of making a few specs unsolvable.
If the value is ``minimal`` Spack will allow packages tagged as ``build-tools`` to have duplicates.
This allows, for instance, to concretize specs whose nodes require different, and incompatible, ranges
of some build tool. For instance, in the figure below the latest `py-shapely` requires a newer `py-setuptools`,
while `py-numpy` still needs an older version:
.. figure:: images/shapely_duplicates.svg
:scale: 70 %
:align: center
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
default behavior is ``duplicates:strategy:minimal``.
.. _build-settings:
================================
@@ -329,6 +232,76 @@ Specific limitations include:
then Spack will not add a new external entry (``spack config blame packages``
can help locate all external entries).
.. _concretizer-options:
----------------------
Concretizer options
----------------------
``packages.yaml`` gives the concretizer preferences for specific packages,
but you can also use ``concretizer.yaml`` to customize aspects of the
algorithm it uses to select the dependencies you install:
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
:language: yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Reuse already installed packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
.. code-block:: console
% spack install --reuse <spec>
to enable reuse for a single installation, and you can use:
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: true`` is the default.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Selection of the target microarchitectures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The options under the ``targets`` attribute control which targets are considered during a solve.
Currently the options in this section are only configurable from the ``concretizer.yaml`` file
and there are no corresponding command line arguments to enable them for a single solve.
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
If set to:
.. code-block:: yaml
concretizer:
targets:
granularity: microarchitectures
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
compatibility. If instead the option is set to:
.. code-block:: yaml
concretizer:
targets:
granularity: generic
Spack will consider only generic microarchitectures. For instance, when running on an
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
``x86_64_v3`` as the best target in the latter case.
The ``host_compatible`` option is a Boolean option that determines whether or not the
microarchitectures considered during the solve are constrained to be compatible with the
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-requirements:
--------------------

View File

@@ -9,32 +9,9 @@
Bundle
------
``BundlePackage`` represents a set of packages that are expected to work
well together, such as a collection of commonly used software libraries.
The associated software is specified as dependencies.
If it makes sense, variants, conflicts, and requirements can be added to
the package. :ref:`Variants <variants>` ensure that common build options
are consistent across the packages supporting them. :ref:`Conflicts
and requirements <packaging_conflicts>` prevent attempts to build with known
bugs or limitations.
For example, if ``MyBundlePackage`` is known to only build on ``linux``,
it could use the ``require`` directive as follows:
.. code-block:: python
require("platform=linux", msg="MyBundlePackage only builds on linux")
Spack has a number of built-in bundle packages, such as:
* `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_
* `EcpProxyApps <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_
* `Libc <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/libc/package.py>`_
* `Xsdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/xsdk/package.py>`_
where ``Xsdk`` also inherits from ``CudaPackage`` and ``RocmPackage`` and
``Libc`` is a virtual bundle package for the C standard library.
``BundlePackage`` represents a set of packages that are expected to work well
together, such as a collection of commonly used software libraries. The
associated software is specified as bundle dependencies.
^^^^^^^^

View File

@@ -25,8 +25,8 @@ use Spack to build packages with the tools.
The Spack Python class ``IntelOneapiPackage`` is a base class that is
used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``,
``IntelOneapiTbb`` and other classes to implement the oneAPI
packages. Search for ``oneAPI`` at `<packages.spack.io>`_ for the full
list of available oneAPI packages, or use::
packages. See the :ref:`package-list` for the full list of available
oneAPI packages or use::
spack list -d oneAPI

View File

@@ -48,6 +48,9 @@
os.environ["COLIFY_SIZE"] = "25x120"
os.environ["COLUMNS"] = "120"
# Generate full package list if needed
subprocess.call(["spack", "list", "--format=html", "--update=package_list.html"])
# Generate a command index if an update is needed
subprocess.call(
[
@@ -211,7 +214,6 @@ def setup(sphinx):
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
]

View File

@@ -212,12 +212,18 @@ under the ``container`` attribute of environments:
final:
- libgomp
# Extra instructions
extra_instructions:
final: |
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ "' >> ~/.bashrc
# Labels for the image
labels:
app: "gromacs"
mpi: "mpich"
A detailed description of the options available can be found in the :ref:`container_config_options` section.
A detailed description of the options available can be found in the
:ref:`container_config_options` section.
-------------------
Setting Base Images
@@ -519,13 +525,6 @@ the example below:
COPY data /share/myapp/data
{% endblock %}
The Dockerfile is generated by running:
.. code-block:: console
$ spack -e /opt/environment containerize
Note that the environment must be active for spack to read the template.
The recipe that gets generated contains the two extra instruction that we added in our template extension:
.. code-block:: Dockerfile

View File

@@ -310,11 +310,53 @@ Once all of the dependencies are installed, you can try building the documentati
$ make clean
$ make
If you see any warning or error messages, you will have to correct those before your PR
is accepted. If you are editing the documentation, you should be running the
documentation tests to make sure there are no errors. Documentation changes can result
in some obfuscated warning messages. If you don't understand what they mean, feel free
to ask when you submit your PR.
If you see any warning or error messages, you will have to correct those before
your PR is accepted.
If you are editing the documentation, you should obviously be running the
documentation tests. But even if you are simply adding a new package, your
changes could cause the documentation tests to fail:
.. code-block:: console
package_list.rst:8745: WARNING: Block quote ends without a blank line; unexpected unindent.
At first, this error message will mean nothing to you, since you didn't edit
that file. Until you look at line 8745 of the file in question:
.. code-block:: rst
Description:
NetCDF is a set of software libraries and self-describing, machine-
independent data formats that support the creation, access, and sharing
of array-oriented scientific data.
Our documentation includes :ref:`a list of all Spack packages <package-list>`.
If you add a new package, its docstring is added to this page. The problem in
this case was that the docstring looked like:
.. code-block:: python
class Netcdf(Package):
"""
NetCDF is a set of software libraries and self-describing,
machine-independent data formats that support the creation,
access, and sharing of array-oriented scientific data.
"""
Docstrings cannot start with a newline character, or else Sphinx will complain.
Instead, they should look like:
.. code-block:: python
class Netcdf(Package):
"""NetCDF is a set of software libraries and self-describing,
machine-independent data formats that support the creation,
access, and sharing of array-oriented scientific data."""
Documentation changes can result in much more obfuscated warning messages.
If you don't understand what they mean, feel free to ask when you submit
your PR.
--------
Coverage

View File

@@ -1,113 +0,0 @@
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
==========================
Using External GPU Support
==========================
Many packages come with a ``+cuda`` or ``+rocm`` variant. With no added
configuration Spack will download and install the needed components.
It may be preferable to use existing system support: the following sections
help with using a system installation of GPU libraries.
-----------------------------------
Using an External ROCm Installation
-----------------------------------
Spack breaks down ROCm into many separate component packages. The following
is an example ``packages.yaml`` that organizes a consistent set of ROCm
components for use by dependent packages:
.. code-block:: yaml
packages:
all:
compiler: [rocmcc@=5.3.0]
variants: amdgpu_target=gfx90a
hip:
buildable: false
externals:
- spec: hip@5.3.0
prefix: /opt/rocm-5.3.0/hip
hsa-rocr-dev:
buildable: false
externals:
- spec: hsa-rocr-dev@5.3.0
prefix: /opt/rocm-5.3.0/
llvm-amdgpu:
buildable: false
externals:
- spec: llvm-amdgpu@5.3.0
prefix: /opt/rocm-5.3.0/llvm/
comgr:
buildable: false
externals:
- spec: comgr@5.3.0
prefix: /opt/rocm-5.3.0/
hipsparse:
buildable: false
externals:
- spec: hipsparse@5.3.0
prefix: /opt/rocm-5.3.0/
hipblas:
buildable: false
externals:
- spec: hipblas@5.3.0
prefix: /opt/rocm-5.3.0/
rocblas:
buildable: false
externals:
- spec: rocblas@5.3.0
prefix: /opt/rocm-5.3.0/
rocprim:
buildable: false
externals:
- spec: rocprim@5.3.0
prefix: /opt/rocm-5.3.0/rocprim/
This is in combination with the following compiler definition:
.. code-block:: yaml
compilers:
- compiler:
spec: rocmcc@=5.3.0
paths:
cc: /opt/rocm-5.3.0/bin/amdclang
cxx: /opt/rocm-5.3.0/bin/amdclang++
f77: null
fc: /opt/rocm-5.3.0/bin/amdflang
operating_system: rhel8
target: x86_64
This includes the following considerations:
- Each of the listed externals specifies ``buildable: false`` to force Spack
to use only the externals we defined.
- ``spack external find`` can automatically locate some of the ``hip``/``rocm``
packages, but not all of them, and furthermore not in a manner that
guarantees a complementary set if multiple ROCm installations are available.
- The ``prefix`` is the same for several components, but note that others
require listing one of the subdirectories as a prefix.
-----------------------------------
Using an External CUDA Installation
-----------------------------------
CUDA is split into fewer components and is simpler to specify:
.. code-block:: yaml
packages:
all:
variants:
- cuda_arch=70
cuda:
buildable: false
externals:
- spec: cuda@11.0.2
prefix: /opt/cuda/cuda-11.0.2/
where ``/opt/cuda/cuda-11.0.2/lib/`` contains ``libcudart.so``.

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 108 KiB

View File

@@ -54,16 +54,9 @@ or refer to the full manual below.
features
getting_started
basic_usage
Tutorial: Spack 101 <https://spack-tutorial.readthedocs.io>
replace_conda_homebrew
.. toctree::
:maxdepth: 2
:caption: Links
Tutorial (spack-tutorial.rtfd.io) <https://spack-tutorial.readthedocs.io>
Packages (packages.spack.io) <https://packages.spack.io>
Binaries (binaries.spack.io) <https://cache.spack.io>
.. toctree::
:maxdepth: 2
:caption: Reference
@@ -79,11 +72,11 @@ or refer to the full manual below.
repositories
binary_caches
command_index
package_list
chain
extensions
pipelines
signing
gpu_configuration
.. toctree::
:maxdepth: 2

View File

@@ -0,0 +1,17 @@
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _package-list:
============
Package List
============
This is a list of things you can install using Spack. It is
automatically generated based on the packages in this Spack
version.
.. raw:: html
:file: package_list.html

View File

@@ -363,42 +363,6 @@ one of these::
If Spack finds none of these variables set, it will look for ``vim``, ``vi``, ``emacs``,
``nano``, and ``notepad``, in that order.
^^^^^^^^^^^^^^^^^
Bundling software
^^^^^^^^^^^^^^^^^
If you have a collection of software expected to work well together with
no source code of its own, you can create a :ref:`BundlePackage <bundlepackage>`.
Examples where bundle packages can be useful include defining suites of
applications (e.g, `EcpProxyApps
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_), commonly used libraries
(e.g., `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_),
and software development kits (e.g., `EcpDataVisSdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-data-vis-sdk/package.py>`_).
These versioned packages primarily consist of dependencies on the associated
software packages. They can include :ref:`variants <variants>` to ensure
common build options are consistently applied to dependencies. Known build
failures, such as not building on a platform or when certain compilers or
variants are used, can be flagged with :ref:`conflicts <packaging_conflicts>`.
Build requirements, such as only building with specific compilers, can similarly
be flagged with :ref:`requires <packaging_conflicts>`.
The ``spack create --template bundle`` command will create a skeleton
``BundlePackage`` ``package.py`` for you:
.. code-block:: console
$ spack create --template bundle --name coolsdk
Now you can fill in the basic package documentation, version(s), and software
package dependencies along with any other relevant customizations.
.. note::
Remember that bundle packages have no software of their own so there
is nothing to download.
^^^^^^^^^^^^^^^^^^^^^^^^^
Non-downloadable software
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -646,16 +610,7 @@ add a line like this in the package class:
version("8.2.0", md5="1c9f62f0778697a09d36121ead88e08e")
version("8.1.2", md5="d47dd09ed7ae6e7fd6f9a816d7f5fdf6")
.. note::
By convention, we list versions in descending order, from newest to oldest.
.. note::
:ref:`Bundle packages <bundlepackage>` do not have source code so
there is nothing to fetch. Consequently, their version directives
consist solely of the version name (e.g., ``version("202309")``).
Versions should be listed in descending order, from newest to oldest.
^^^^^^^^^^^^^
Date Versions
@@ -2723,7 +2678,7 @@ Conflicts and requirements
--------------------------
Sometimes packages have known bugs, or limitations, that would prevent them
from building e.g. against other dependencies or with certain compilers. Spack
to build e.g. against other dependencies or with certain compilers. Spack
makes it possible to express such constraints with the ``conflicts`` directive.
Adding the following to a package:
@@ -3635,8 +3590,7 @@ regardless of the build system. The arguments for the phase are:
The arguments ``spec`` and ``prefix`` are passed only for convenience, as they always
correspond to ``self.spec`` and ``self.spec.prefix`` respectively.
If the ``package.py`` has build instructions in a separate
:ref:`builder class <multiple_build_systems>`, the signature for a phase changes slightly:
If the ``package.py`` encodes builders explicitly, the signature for a phase changes slightly:
.. code-block:: python
@@ -3646,6 +3600,56 @@ If the ``package.py`` has build instructions in a separate
In this case the package is passed as the second argument, and ``self`` is the builder instance.
.. _multiple_build_systems:
^^^^^^^^^^^^^^^^^^^^^^
Multiple build systems
^^^^^^^^^^^^^^^^^^^^^^
There are cases where a software actively supports two build systems, or changes build systems
as it evolves, or needs different build systems on different platforms. Spack allows dealing with
these cases natively, if a recipe is written using builders explicitly.
For instance, software that supports two build systems unconditionally should derive from
both ``*Package`` base classes, and declare the possible use of multiple build systems using
a directive:
.. code-block:: python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system("cmake", "autotools", default="cmake")
In this case the software can be built with both ``autotools`` and ``cmake``. Since the package
supports multiple build systems, it is necessary to declare which one is the default. The ``package.py``
will likely contain some overriding of default builder methods:
.. code-block:: python
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
pass
class AutotoolsBuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
pass
In more complex cases it might happen that the build system changes according to certain conditions,
for instance across versions. That can be expressed with conditional variant values:
.. code-block:: python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system(
conditional("cmake", when="@0.64:"),
conditional("autotools", when="@:0.63"),
default="cmake",
)
In the example the directive impose a change from ``Autotools`` to ``CMake`` going
from ``v0.63`` to ``v0.64``.
^^^^^^^^^^^^^^^^^^
Mixin base classes
^^^^^^^^^^^^^^^^^^
@@ -3692,106 +3696,6 @@ for instance:
In the example above ``Cp2k`` inherits all the conflicts and variants that ``CudaPackage`` defines.
.. _multiple_build_systems:
----------------------
Multiple build systems
----------------------
There are cases where a package actively supports two build systems, or changes build systems
as it evolves, or needs different build systems on different platforms. Spack allows dealing with
these cases by splitting the build instructions into separate builder classes.
For instance, software that supports two build systems unconditionally should derive from
both ``*Package`` base classes, and declare the possible use of multiple build systems using
a directive:
.. code-block:: python
class Example(CMakePackage, AutotoolsPackage):
variant("my_feature", default=True)
build_system("cmake", "autotools", default="cmake")
In this case the software can be built with both ``autotools`` and ``cmake``. Since the package
supports multiple build systems, it is necessary to declare which one is the default.
Additional build instructions are split into separate builder classes:
.. code-block:: python
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
return [
self.define_from_variant("MY_FEATURE", "my_feature")
]
class AutotoolsBuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
return self.with_or_without("my-feature", variant="my_feature")
In this example, ``spack install example +feature build_sytem=cmake`` will
pick the ``CMakeBuilder`` and invoke ``cmake -DMY_FEATURE:BOOL=ON``.
Similarly, ``spack install example +feature build_system=autotools`` will pick
the ``AutotoolsBuilder`` and invoke ``./configure --with-my-feature``.
Dependencies are always specified in the package class. When some dependencies
depend on the choice of the build system, it is possible to use when conditions as
usual:
.. code-block:: python
class Example(CMakePackage, AutotoolsPackage):
build_system("cmake", "autotools", default="cmake")
# Runtime dependencies
depends_on("ncurses")
depends_on("libxml2")
# Lowerbounds for cmake only apply when using cmake as the build system
with when("build_system=cmake"):
depends_on("cmake@3.18:", when="@2.0:", type="build")
depends_on("cmake@3:", type="build")
# Specify extra build dependencies used only in the configure script
with when("build_system=autotools"):
depends_on("perl", type="build")
depends_on("pkgconfig", type="build")
Very often projects switch from one build system to another, or add support
for a new build system from a certain version, which means that the choice
of the build system typically depends on a version range. Those situations can
be handled by using conditional values in the ``build_system`` directive:
.. code-block:: python
class Example(CMakePackage, AutotoolsPackage):
build_system(
conditional("cmake", when="@0.64:"),
conditional("autotools", when="@:0.63"),
default="cmake",
)
In the example the directive impose a change from ``Autotools`` to ``CMake`` going
from ``v0.63`` to ``v0.64``.
The ``build_system`` can be used as an ordinary variant, which also means that it can
be used in ``depends_on`` statements. This can be useful when a package *requires* that
its dependency has a CMake config file, meaning that the dependent can only build when the
dependency is built with CMake, and not Autotools. In that case, you can force the choice
of the build system in the dependent:
.. code-block:: python
class Dependent(CMakePackage):
depends_on("example build_system=cmake")
.. _install-environment:
-----------------------
@@ -4869,17 +4773,17 @@ For example, running:
results in spack checking that the installation created the following **file**:
* ``self.prefix.bin.reframe``
* ``self.prefix/bin/reframe``
and the following **directories**:
* ``self.prefix.bin``
* ``self.prefix.config``
* ``self.prefix.docs``
* ``self.prefix.reframe``
* ``self.prefix.tutorials``
* ``self.prefix.unittests``
* ``self.prefix.cscs-checks``
* ``self.prefix/bin``
* ``self.prefix/config``
* ``self.prefix/docs``
* ``self.prefix/reframe``
* ``self.prefix/tutorials``
* ``self.prefix/unittests``
* ``self.prefix/cscs-checks``
If **any** of these paths are missing, then Spack considers the installation
to have failed.
@@ -5023,7 +4927,7 @@ installed executable. The check is implemented as follows:
@on_package_attributes(run_tests=True)
def check_list(self):
with working_dir(self.stage.source_path):
reframe = Executable(self.prefix.bin.reframe)
reframe = Executable(join_path(self.prefix, "bin", "reframe"))
reframe("-l")
.. warning::
@@ -5243,8 +5147,8 @@ embedded test parts.
for example in ["ex1", "ex2"]:
with test_part(
self,
f"test_example_{example}",
purpose=f"run installed {example}",
"test_example_{0}".format(example),
purpose="run installed {0}".format(example),
):
exe = which(join_path(self.prefix.bin, example))
exe()
@@ -5322,10 +5226,11 @@ Below illustrates using this feature to compile an example.
...
cxx = which(os.environ["CXX"])
cxx(
f"-L{self.prefix.lib}",
f"-I{self.prefix.include}",
f"{exe}.cpp",
"-o", exe
"-L{0}".format(self.prefix.lib),
"-I{0}".format(self.prefix.include),
"{0}.cpp".format(exe),
"-o",
exe
)
cxx_example = which(exe)
cxx_example()
@@ -5342,14 +5247,14 @@ Saving build-time files
We highly recommend re-using build-time test sources and pared down
input files for testing installed software. These files are easier
to keep synchronized with software capabilities since they reside
within the software's repository.
within the software's repository.
If that is not possible, you can add test-related files to the package
repository (see :ref:`adding custom files <cache_custom_files>`). It
will be important to maintain them so they work across listed or supported
versions of the package.
You can use the ``cache_extra_test_sources`` helper to copy directories
You can use the ``cache_extra_test_sources`` method to copy directories
and or files from the source build stage directory to the package's
installation directory.
@@ -5357,15 +5262,10 @@ The signature for ``cache_extra_test_sources`` is:
.. code-block:: python
def cache_extra_test_sources(pkg, srcs):
where each argument has the following meaning:
* ``pkg`` is an instance of the package for the spec under test.
* ``srcs`` is a string *or* a list of strings corresponding to the
paths of subdirectories and or files needed for stand-alone testing.
def cache_extra_test_sources(self, srcs):
where ``srcs`` is a string *or* a list of strings corresponding to the
paths of subdirectories and or files needed for stand-alone testing.
The paths must be relative to the staged source directory. Contents of
subdirectories and files are copied to a special test cache subdirectory
of the installation prefix. They are automatically copied to the appropriate
@@ -5386,18 +5286,21 @@ and using ``foo.c`` in a test method is illustrated below.
srcs = ["tests",
join_path("examples", "foo.c"),
join_path("examples", "bar.c")]
cache_extra_test_sources(self, srcs)
self.cache_extra_test_sources(srcs)
def test_foo(self):
exe = "foo"
src_dir = self.test_suite.current_test_cache_dir.examples
src_dir = join_path(
self.test_suite.current_test_cache_dir, "examples"
)
with working_dir(src_dir):
cc = which(os.environ["CC"])
cc(
f"-L{self.prefix.lib}",
f"-I{self.prefix.include}",
f"{exe}.c",
"-o", exe
"-L{0}".format(self.prefix.lib),
"-I{0}".format(self.prefix.include),
"{0}.c".format(exe),
"-o",
exe
)
foo = which(exe)
foo()
@@ -5423,9 +5326,9 @@ the files using the ``self.test_suite.current_test_cache_dir`` property.
In our example above, test methods can use the following paths to reference
the copy of each entry listed in ``srcs``, respectively:
* ``self.test_suite.current_test_cache_dir.tests``
* ``join_path(self.test_suite.current_test_cache_dir.examples, "foo.c")``
* ``join_path(self.test_suite.current_test_cache_dir.examples, "bar.c")``
* ``join_path(self.test_suite.current_test_cache_dir, "tests")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "foo.c")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "bar.c")``
.. admonition:: Library packages should build stand-alone tests
@@ -5444,7 +5347,7 @@ the copy of each entry listed in ``srcs``, respectively:
If one or more of the copied files needs to be modified to reference
the installed software, it is recommended that those changes be made
to the cached files **once** in the ``copy_test_sources`` method and
***after** the call to ``cache_extra_test_sources()``. This will
***after** the call to ``self.cache_extra_test_sources()``. This will
reduce the amount of unnecessary work in the test method **and** avoid
problems testing in shared instances and facility deployments.
@@ -5491,7 +5394,7 @@ property as shown below.
"""build and run custom-example"""
data_dir = self.test_suite.current_test_data_dir
exe = "custom-example"
src = datadir.join(f"{exe}.cpp")
src = datadir.join("{0}.cpp".format(exe))
...
# TODO: Build custom-example using src and exe
...
@@ -5507,7 +5410,7 @@ Reading expected output from a file
The helper function ``get_escaped_text_output`` is available for packages
to retrieve and properly format the text from a file that contains the
expected output from running an executable that may contain special
expected output from running an executable that may contain special
characters.
The signature for ``get_escaped_text_output`` is:
@@ -5541,7 +5444,7 @@ added to the package's ``test`` subdirectory.
db_filename, ".dump", output=str.split, error=str.split
)
for exp in expected:
assert re.search(exp, out), f"Expected '{exp}' in output"
assert re.search(exp, out), "Expected '{0}' in output".format(exp)
If the file was instead copied from the ``tests`` subdirectory of the staged
source code, the path would be obtained as shown below.
@@ -5554,7 +5457,7 @@ source code, the path would be obtained as shown below.
db_filename = test_cache_dir.join("packages.db")
Alternatively, if the file was copied to the ``share/tests`` subdirectory
as part of the installation process, the test could access the path as
as part of the installation process, the test could access the path as
follows:
.. code-block:: python
@@ -5591,12 +5494,9 @@ Invoking the method is the equivalent of:
.. code-block:: python
errors = []
for check in expected:
if not re.search(check, actual):
errors.append(f"Expected '{check}' in output '{actual}'")
if errors:
raise RuntimeError("\n ".join(errors))
raise RuntimeError("Expected '{0}' in output '{1}'".format(check, actual))
.. _accessing-files:
@@ -5636,7 +5536,7 @@ repository, and installation.
- ``self.test_suite.test_dir_for_spec(self.spec)``
* - Current Spec's Build-time Files
- ``self.test_suite.current_test_cache_dir``
- ``join_path(self.test_suite.current_test_cache_dir.examples, "foo.c")``
- ``join_path(self.test_suite.current_test_cache_dir, "examples", "foo.c")``
* - Current Spec's Custom Test Files
- ``self.test_suite.current_test_data_dir``
- ``join_path(self.test_suite.current_test_data_dir, "hello.f90")``
@@ -5651,7 +5551,7 @@ Inheriting stand-alone tests
Stand-alone tests defined in parent (.e.g., :ref:`build-systems`) and
virtual (e.g., :ref:`virtual-dependencies`) packages are executed by
packages that inherit from or provide interface implementations for those
packages, respectively.
packages, respectively.
The table below summarizes the stand-alone tests that will be executed along
with those implemented in the package itself.
@@ -5721,7 +5621,7 @@ for ``openmpi``:
SKIPPED: test_version_oshcc: oshcc is not installed
...
==> [2023-03-10-16:04:02.215227] Completed testing
==> [2023-03-10-16:04:02.215597]
==> [2023-03-10-16:04:02.215597]
======================== SUMMARY: openmpi-4.1.4-ubmrigj ========================
Openmpi::test_bin_mpirun .. PASSED
Openmpi::test_bin_ompi_info .. PASSED
@@ -6171,7 +6071,7 @@ in the extra attributes can implement this method like this:
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
"""Check that "compilers" is in the extra attributes."""
msg = ("the extra attribute 'compilers' must be set for "
msg = ("the extra attribute "compilers" must be set for "
"the detected spec '{0}'".format(spec))
assert "compilers" in extra_attributes, msg
@@ -6247,100 +6147,7 @@ follows:
"foo-package@{0}".format(version_str)
)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add detection tests to packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To ensure that software is detected correctly for multiple configurations
and on different systems users can write a ``detection_test.yaml`` file and
put it in the package directory alongside the ``package.py`` file.
This YAML file contains enough information for Spack to mock an environment
and try to check if the detection logic yields the results that are expected.
As a general rule, attributes at the top-level of ``detection_test.yaml``
represent search mechanisms and they each map to a list of tests that should confirm
the validity of the package's detection logic.
The detection tests can be run with the following command:
.. code-block:: console
$ spack audit externals
Errors that have been detected are reported to screen.
""""""""""""""""""""""""""
Tests for PATH inspections
""""""""""""""""""""""""""
Detection tests insisting on ``PATH`` inspections are listed under
the ``paths`` attribute:
.. code-block:: yaml
paths:
- layout:
- executables:
- "bin/clang-3.9"
- "bin/clang++-3.9"
script: |
echo "clang version 3.9.1-19ubuntu1 (tags/RELEASE_391/rc2)"
echo "Target: x86_64-pc-linux-gnu"
echo "Thread model: posix"
echo "InstalledDir: /usr/bin"
results:
- spec: 'llvm@3.9.1 +clang~lld~lldb'
Each test is performed by first creating a temporary directory structure as
specified in the corresponding ``layout`` and by then running
package detection and checking that the outcome matches the expected
``results``. The exact details on how to specify both the ``layout`` and the
``results`` are reported in the table below:
.. list-table:: Test based on PATH inspections
:header-rows: 1
* - Option Name
- Description
- Allowed Values
- Required Field
* - ``layout``
- Specifies the filesystem tree used for the test
- List of objects
- Yes
* - ``layout:[0]:executables``
- Relative paths for the mock executables to be created
- List of strings
- Yes
* - ``layout:[0]:script``
- Mock logic for the executable
- Any valid shell script
- Yes
* - ``results``
- List of expected results
- List of objects (empty if no result is expected)
- Yes
* - ``results:[0]:spec``
- A spec that is expected from detection
- Any valid spec
- Yes
"""""""""""""""""""""""""""""""
Reuse tests from other packages
"""""""""""""""""""""""""""""""
When using a custom repository, it is possible to customize a package that already exists in ``builtin``
and reuse its external tests. To do so, just write a ``detection_tests.yaml`` alongside the customized
``package.py`` with an ``includes`` attribute. For instance the ``detection_tests.yaml`` for
``myrepo.llvm`` might look like:
.. code-block:: yaml
includes:
- "builtin.llvm"
This YAML file instructs Spack to run the detection tests defined in ``builtin.llvm`` in addition to
those locally defined in the file.
.. _package-lifecycle:
-----------------------------
Style guidelines for packages
@@ -6799,30 +6606,3 @@ To achieve backward compatibility with the single-class format Spack creates in
Overall the role of the adapter is to route access to attributes of methods first through the ``*Package``
hierarchy, and then back to the base class builder. This is schematically shown in the diagram above, where
the adapter role is to "emulate" a method resolution order like the one represented by the red arrows.
------------------------------
Specifying License Information
------------------------------
A significant portion of software that Spack packages is open source. Most open
source software is released under one or more common open source licenses.
Specifying the specific license that a package is released under in a project's
`package.py` is good practice. To specify a license, find the SPDX identifier for
a project and then add it using the license directive:
.. code-block:: python
license("<SPDX Identifier HERE>")
Note that specifying a license without a when clause makes it apply to all
versions and variants of the package, which might not actually be the case.
For example, a project might have switched licenses at some point or have
certain build configurations that include files that are licensed differently.
To account for this, you can specify when licenses should be applied. For
example, to specify that a specific license identifier should only apply
to versionup to and including 1.5, you could write the following directive:
.. code-block:: python
license("...", when="@:1.5")

View File

@@ -213,16 +213,6 @@ pipeline jobs.
``spack ci generate``
^^^^^^^^^^^^^^^^^^^^^
Throughout this documentation, references to the "mirror" mean the target
mirror which is checked for the presence of up-to-date specs, and where
any scheduled jobs should push built binary packages. In the past, this
defaulted to the mirror at index 0 in the mirror configs, and could be
overridden using the ``--buildcache-destination`` argument. Starting with
Spack 0.23, ``spack ci generate`` will require you to identify this mirror
by the name "buildcache-destination". While you can configure any number
of mirrors as sources for your pipelines, you will need to identify the
destination mirror by name.
Concretizes the specs in the active environment, stages them (as described in
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
During concretization of the environment, ``spack ci generate`` also writes a

View File

@@ -4,7 +4,7 @@
SPDX-License-Identifier: (Apache-2.0 OR MIT)
=====================================
Spack for Homebrew/Conda Users
Using Spack to Replace Homebrew/Conda
=====================================
Spack is an incredibly powerful package manager, designed for supercomputers
@@ -191,18 +191,18 @@ The ``--fresh`` flag tells Spack to use the latest version of every package
where possible instead of trying to optimize for reuse of existing installed
packages.
The ``--force`` flag in addition tells Spack to overwrite its previous
concretization decisions, allowing you to choose a new version of Python.
If any of the new packages like Bash are already installed, ``spack install``
The ``--force`` flag in addition tells Spack to overwrite its previous
concretization decisions, allowing you to choose a new version of Python.
If any of the new packages like Bash are already installed, ``spack install``
won't re-install them, it will keep the symlinks in place.
-----------------------------------
Updating & Cleaning Up Old Packages
-----------------------------------
If you're looking to mimic the behavior of Homebrew, you may also want to
clean up out-of-date packages from your environment after an upgrade. To
upgrade your entire software stack within an environment and clean up old
If you're looking to mimic the behavior of Homebrew, you may also want to
clean up out-of-date packages from your environment after an upgrade. To
upgrade your entire software stack within an environment and clean up old
package versions, simply run the following commands:
.. code-block:: console
@@ -212,9 +212,9 @@ package versions, simply run the following commands:
$ spack concretize --fresh --force
$ spack install
$ spack gc
Running ``spack mark -i --all`` tells Spack to mark all of the existing
packages within an environment as "implicitly" installed. This tells
Running ``spack mark -i --all`` tells Spack to mark all of the existing
packages within an environment as "implicitly" installed. This tells
spack's garbage collection system that these packages should be cleaned up.
Don't worry however, this will not remove your entire environment.
@@ -223,8 +223,8 @@ a fresh concretization and will re-mark any packages that should remain
installed as "explicitly" installed.
**Note:** if you use multiple spack environments you should re-run ``spack install``
in each of your environments prior to running ``spack gc`` to prevent spack
from uninstalling any shared packages that are no longer required by the
in each of your environments prior to running ``spack gc`` to prevent spack
from uninstalling any shared packages that are no longer required by the
environment you just upgraded.
--------------

View File

@@ -1,13 +1,13 @@
sphinx==7.2.6
sphinx==7.2.2
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==1.3.0
python-levenshtein==0.23.0
python-levenshtein==0.21.1
docutils==0.18.1
pygments==2.16.1
urllib3==2.0.7
pytest==7.4.2
urllib3==2.0.4
pytest==7.4.0
isort==5.12.0
black==23.9.1
black==23.7.0
flake8==6.1.0
mypy==1.6.1
mypy==1.5.1

View File

@@ -1,7 +1,9 @@
Name, Supported Versions, Notes, Requirement Reason
Python, 3.6--3.12, , Interpreter for Spack
Python, 3.6--3.11, , Interpreter for Spack
C/C++ Compilers, , , Building software
make, , , Build software
patch, , , Build software
bash, , , Compiler wrappers
tar, , , Extract/create archives
gzip, , , Compress/Decompress archives
unzip, , , Compress/Decompress archives
1 Name Supported Versions Notes Requirement Reason
2 Python 3.6--3.12 3.6--3.11 Interpreter for Spack
3 C/C++ Compilers Building software
4 make Build software
5 patch Build software
6 bash Compiler wrappers
7 tar Extract/create archives
8 gzip Compress/Decompress archives
9 unzip Compress/Decompress archives

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.1 (commit df43a1834460bf94516136951c4729a3100603ec)
* Version: 0.2.1 (commit 9e1117bd8a2f0581bced161f2a2e8d6294d0300b)
astunparse
----------------

View File

@@ -1,2 +1,2 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.1"
__version__ = "0.2.0"

View File

@@ -79,18 +79,14 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0):
self.features = features
self.compilers = compilers
self.generation = generation
# Cache the ancestor computation
self._ancestors = None
@property
def ancestors(self):
"""All the ancestors of this microarchitecture."""
if self._ancestors is None:
value = self.parents[:]
for parent in self.parents:
value.extend(a for a in parent.ancestors if a not in value)
self._ancestors = value
return self._ancestors
value = self.parents[:]
for parent in self.parents:
value.extend(a for a in parent.ancestors if a not in value)
return value
def _to_set(self):
"""Returns a set of the nodes in this microarchitecture DAG."""

View File

@@ -145,13 +145,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"intel": [
{
"versions": "16.0:",
"name": "corei7",
"flags": "-march={name} -mtune=generic -mpopcnt"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -224,13 +217,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"intel": [
{
"versions": "16.0:",
"name": "core-avx2",
"flags": "-march={name} -mtune={name} -fma -mf16c"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -314,13 +300,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"intel": [
{
"versions": "16.0:",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -1433,92 +1412,6 @@
]
}
},
"sapphirerapids": {
"from": [
"icelake"
],
"vendor": "GenuineIntel",
"features": [
"mmx",
"sse",
"sse2",
"ssse3",
"sse4_1",
"sse4_2",
"popcnt",
"aes",
"pclmulqdq",
"avx",
"rdrand",
"f16c",
"movbe",
"fma",
"avx2",
"bmi1",
"bmi2",
"rdseed",
"adx",
"clflushopt",
"xsavec",
"xsaveopt",
"avx512f",
"avx512vl",
"avx512bw",
"avx512dq",
"avx512cd",
"avx512vbmi",
"avx512ifma",
"sha_ni",
"clwb",
"rdpid",
"gfni",
"avx512_vbmi2",
"avx512_vpopcntdq",
"avx512_bitalg",
"avx512_vnni",
"vpclmulqdq",
"vaes",
"avx512_bf16",
"cldemote",
"movdir64b",
"movdiri",
"pdcm",
"serialize",
"waitpkg"
],
"compilers": {
"gcc": [
{
"versions": "11.0:",
"flags": "-march={name} -mtune={name}"
}
],
"clang": [
{
"versions": "12.0:",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
]
}
},
"k10": {
"from": ["x86_64"],
"vendor": "AuthenticAMD",
@@ -2172,6 +2065,8 @@
"pku",
"gfni",
"flush_l1d",
"erms",
"avic",
"avx512f",
"avx512dq",
"avx512ifma",
@@ -2188,12 +2083,12 @@
"compilers": {
"gcc": [
{
"versions": "10.3:12.2",
"versions": "10.3:13.0",
"name": "znver3",
"flags": "-march={name} -mtune={name} -mavx512f -mavx512dq -mavx512ifma -mavx512cd -mavx512bw -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512vnni -mavx512bitalg"
},
{
"versions": "12.3:",
"versions": "13.1:",
"name": "znver4",
"flags": "-march={name} -mtune={name}"
}

View File

@@ -1,105 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Path primitives that just require Python standard library."""
import functools
import sys
from typing import List, Optional
from urllib.parse import urlparse
class Path:
"""Enum to identify the path-style."""
unix: int = 0
windows: int = 1
platform_path: int = windows if sys.platform == "win32" else unix
def format_os_path(path: str, mode: int = Path.unix) -> str:
"""Formats the input path to use consistent, platform specific separators.
Absolute paths are converted between drive letters and a prepended '/' as per platform
requirement.
Parameters:
path: the path to be normalized, must be a string or expose the replace method.
mode: the path file separator style to normalize the passed path to.
Default is unix style, i.e. '/'
"""
if not path:
return path
if mode == Path.windows:
path = path.replace("/", "\\")
else:
path = path.replace("\\", "/")
return path
def convert_to_posix_path(path: str) -> str:
"""Converts the input path to POSIX style."""
return format_os_path(path, mode=Path.unix)
def convert_to_windows_path(path: str) -> str:
"""Converts the input path to Windows style."""
return format_os_path(path, mode=Path.windows)
def convert_to_platform_path(path: str) -> str:
"""Converts the input path to the current platform's native style."""
return format_os_path(path, mode=Path.platform_path)
def path_to_os_path(*parameters: str) -> List[str]:
"""Takes an arbitrary number of positional parameters, converts each argument of type
string to use a normalized filepath separator, and returns a list of all values.
"""
def _is_url(path_or_url: str) -> bool:
if "\\" in path_or_url:
return False
url_tuple = urlparse(path_or_url)
return bool(url_tuple.scheme) and len(url_tuple.scheme) > 1
result = []
for item in parameters:
if isinstance(item, str) and not _is_url(item):
item = convert_to_platform_path(item)
result.append(item)
return result
def system_path_filter(_func=None, arg_slice: Optional[slice] = None):
"""Filters function arguments to account for platform path separators.
Optional slicing range can be specified to select specific arguments
This decorator takes all (or a slice) of a method's positional arguments
and normalizes usage of filepath separators on a per platform basis.
Note: `**kwargs`, urls, and any type that is not a string are ignored
so in such cases where path normalization is required, that should be
handled by calling path_to_os_path directly as needed.
Parameters:
arg_slice: a slice object specifying the slice of arguments
in the decorated method over which filepath separators are
normalized
"""
def holder_func(func):
@functools.wraps(func)
def path_filter_caller(*args, **kwargs):
args = list(args)
if arg_slice:
args[arg_slice] = path_to_os_path(*args[arg_slice])
else:
args = path_to_os_path(*args)
return func(*args, **kwargs)
return path_filter_caller
if _func:
return holder_func(_func)
return holder_func

View File

@@ -1,67 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""String manipulation functions that do not have other dependencies than Python
standard library
"""
from typing import List, Optional
def comma_list(sequence: List[str], article: str = "") -> str:
if type(sequence) is not list:
sequence = list(sequence)
if not sequence:
return ""
if len(sequence) == 1:
return sequence[0]
out = ", ".join(str(s) for s in sequence[:-1])
if len(sequence) != 2:
out += "," # oxford comma
out += " "
if article:
out += article + " "
out += str(sequence[-1])
return out
def comma_or(sequence: List[str]) -> str:
"""Return a string with all the elements of the input joined by comma, but the last
one (which is joined by 'or').
"""
return comma_list(sequence, "or")
def comma_and(sequence: List[str]) -> str:
"""Return a string with all the elements of the input joined by comma, but the last
one (which is joined by 'and').
"""
return comma_list(sequence, "and")
def quote(sequence: List[str], q: str = "'") -> List[str]:
"""Quotes each item in the input list with the quote character passed as second argument."""
return [f"{q}{e}{q}" for e in sequence]
def plural(n: int, singular: str, plural: Optional[str] = None, show_n: bool = True) -> str:
"""Pluralize <singular> word by adding an s if n != 1.
Arguments:
n: number of things there are
singular: singular form of word
plural: optional plural form, for when it's not just singular + 's'
show_n: whether to include n in the result string (default True)
Returns:
"1 thing" if n == 1 or "n things" if n != 1
"""
number = f"{n} " if show_n else ""
if n == 1:
return f"{number}{singular}"
elif plural is not None:
return f"{number}{plural}"
else:
return f"{number}{singular}s"

View File

@@ -1,459 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""URL primitives that just require Python standard library."""
import itertools
import os.path
import re
from typing import Optional, Set, Tuple
from urllib.parse import urlsplit, urlunsplit
# Archive extensions allowed in Spack
PREFIX_EXTENSIONS = ("tar", "TAR")
EXTENSIONS = ("gz", "bz2", "xz", "Z")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = (
tuple(".".join(ext) for ext in itertools.product(PREFIX_EXTENSIONS, EXTENSIONS))
+ PREFIX_EXTENSIONS
+ EXTENSIONS
+ NO_TAR_EXTENSIONS
)
CONTRACTION_MAP = {"tgz": "tar.gz", "txz": "tar.xz", "tbz": "tar.bz2", "tbz2": "tar.bz2"}
def find_list_urls(url: str) -> Set[str]:
r"""Find good list URLs for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
PyPI https://pypi.org/simple/<name>/
LuaRocks https://luarocks.org/modules/<repo>/<name>
========= =======================================================
Note: this function is called by `spack versions`, `spack checksum`,
and `spack create`, but not by `spack fetch` or `spack install`.
Parameters:
url (str): The download URL for the package
Returns:
set: One or more list URLs for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r"(.*github\.com/[^/]+/[^/]+)", lambda m: m.group(1) + "/releases"),
# GitLab API endpoint
# e.g. https://gitlab.dkrz.de/api/v4/projects/k202009%2Flibaec/repository/archive.tar.gz?sha=v1.0.2
(
r"(.*gitlab[^/]+)/api/v4/projects/([^/]+)%2F([^/]+)",
lambda m: m.group(1) + "/" + m.group(2) + "/" + m.group(3) + "/tags",
),
# GitLab non-API endpoint
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r"(.*gitlab[^/]+/(?!api/v4/projects)[^/]+/[^/]+)", lambda m: m.group(1) + "/tags"),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r"(.*bitbucket.org/[^/]+/[^/]+)", lambda m: m.group(1) + "/downloads/?tab=tags"),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(
r"(.*\.r-project\.org/src/contrib)/([^_]+)",
lambda m: m.group(1) + "/Archive/" + m.group(2),
),
# PyPI
# e.g. https://pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://www.pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.python.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.io/packages/py2.py3/o/opencensus-context/opencensus_context-0.1.1-py2.py3-none-any.whl
(
r"(?:pypi|pythonhosted)[^/]+/packages/[^/]+/./([^/]+)",
lambda m: "https://pypi.org/simple/" + m.group(1) + "/",
),
# LuaRocks
# e.g. https://luarocks.org/manifests/gvvaughan/lpeg-1.0.2-1.src.rock
# e.g. https://luarocks.org/manifests/openresty/lua-cjson-2.1.0-1.src.rock
(
r"luarocks[^/]+/(?:modules|manifests)/(?P<org>[^/]+)/"
+ r"(?P<name>.+?)-[0-9.-]*\.src\.rock",
lambda m: "https://luarocks.org/modules/"
+ m.group("org")
+ "/"
+ m.group("name")
+ "/",
),
]
list_urls = {os.path.dirname(url)}
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
list_urls.add(fun(match))
return list_urls
def strip_query_and_fragment(url: str) -> Tuple[str, str]:
"""Strips query and fragment from a url, then returns the base url and the suffix.
Args:
url: URL to be stripped
Raises:
ValueError: when there is any error parsing the URL
"""
components = urlsplit(url)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ""
if query:
suffix += "?" + query
if frag:
suffix += "#" + frag
return urlunsplit(stripped), suffix
SOURCEFORGE_RE = re.compile(r"(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$")
def split_url_on_sourceforge_suffix(url: str) -> Tuple[str, ...]:
"""If the input is a sourceforge URL, returns base URL and "/download" suffix. Otherwise,
returns the input URL and an empty string.
"""
match = SOURCEFORGE_RE.search(url)
if match is not None:
return match.groups()
return url, ""
def has_extension(path_or_url: str, ext: str) -> bool:
"""Returns true if the extension in input is present in path, false otherwise."""
prefix, _ = split_url_on_sourceforge_suffix(path_or_url)
if not ext.startswith(r"\."):
ext = rf"\.{ext}$"
if re.search(ext, prefix):
return True
return False
def extension_from_path(path_or_url: Optional[str]) -> Optional[str]:
"""Tries to match an allowed archive extension to the input. Returns the first match,
or None if no match was found.
Raises:
ValueError: if the input is None
"""
if path_or_url is None:
raise ValueError("Can't call extension() on None")
for t in ALLOWED_ARCHIVE_TYPES:
if has_extension(path_or_url, t):
return t
return None
def remove_extension(path_or_url: str, *, extension: str) -> str:
"""Returns the input with the extension removed"""
suffix = rf"\.{extension}$"
return re.sub(suffix, "", path_or_url)
def check_and_remove_ext(path: str, *, extension: str) -> str:
"""Returns the input path with the extension removed, if the extension is present in path.
Otherwise, returns the input unchanged.
"""
if not has_extension(path, extension):
return path
path, _ = split_url_on_sourceforge_suffix(path)
return remove_extension(path, extension=extension)
def strip_extension(path_or_url: str, *, extension: Optional[str] = None) -> str:
"""If a path contains the extension in input, returns the path stripped of the extension.
Otherwise, returns the input path.
If extension is None, attempts to strip any allowed extension from path.
"""
if extension is None:
for t in ALLOWED_ARCHIVE_TYPES:
if has_extension(path_or_url, ext=t):
extension = t
break
else:
return path_or_url
return check_and_remove_ext(path_or_url, extension=extension)
def split_url_extension(url: str) -> Tuple[str, ...]:
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
prefix, suffix = split_url_on_sourceforge_suffix(url)
ext = extension_from_path(prefix)
if ext is not None:
prefix = strip_extension(prefix)
return prefix, ext, suffix
try:
prefix, suf = strip_query_and_fragment(prefix)
except ValueError:
# FIXME: tty.debug("Got error parsing path %s" % path)
# Ignore URL parse errors here
return url, ""
ext = extension_from_path(prefix)
prefix = strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ""
return prefix, ext, suffix
def strip_version_suffixes(path_or_url: str) -> str:
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path_or_url: The filename or URL for the package
Returns:
The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
r"[Ii]nstall",
r"all",
r"code",
r"[Ss]ources?",
r"file",
r"full",
r"single",
r"with[a-zA-Z_-]+",
r"rock",
r"src(_0)?",
r"public",
r"bin",
r"binary",
r"run",
r"[Uu]niversal",
r"jar",
r"complete",
r"dynamic",
r"oss",
r"gem",
r"tar",
r"sh",
# Download version
r"release",
r"bin",
r"stable",
r"[Ff]inal",
r"rel",
r"orig",
r"dist",
r"\+",
# License
r"gpl",
# Arch
# Needs to come before and after OS, appears in both orders
r"ia32",
r"intel",
r"amd64",
r"linux64",
r"x64",
r"64bit",
r"x86[_-]64",
r"i586_64",
r"x86",
r"i[36]86",
r"ppc64(le)?",
r"armv?(7l|6l|64)",
# Other
r"cpp",
r"gtk",
r"incubating",
# OS
r"[Ll]inux(_64)?",
r"LINUX",
r"[Uu]ni?x",
r"[Ss]un[Oo][Ss]",
r"[Mm]ac[Oo][Ss][Xx]?",
r"[Oo][Ss][Xx]",
r"[Dd]arwin(64)?",
r"[Aa]pple",
r"[Ww]indows",
r"[Ww]in(64|32)?",
r"[Cc]ygwin(64|32)?",
r"[Mm]ingw",
r"centos",
# Arch
# Needs to come before and after OS, appears in both orders
r"ia32",
r"intel",
r"amd64",
r"linux64",
r"x64",
r"64bit",
r"x86[_-]64",
r"i586_64",
r"x86",
r"i[36]86",
r"ppc64(le)?",
r"armv?(7l|6l|64)?",
# PyPI
r"[._-]py[23].*\.whl",
r"[._-]cp[23].*\.whl",
r"[._-]win.*\.exe",
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path_or_url = re.sub(r"[._-]?" + regex + "$", "", path_or_url)
return path_or_url
def expand_contracted_extension(extension: str) -> str:
"""Returns the expanded version of a known contracted extension.
This function maps extensions like ".tgz" to ".tar.gz". On unknown extensions,
return the input unmodified.
"""
extension = extension.strip(".")
return CONTRACTION_MAP.get(extension, extension)
def expand_contracted_extension_in_path(
path_or_url: str, *, extension: Optional[str] = None
) -> str:
"""Returns the input path or URL with any contraction extension expanded.
Args:
path_or_url: path or URL to be expanded
extension: if specified, only attempt to expand that extension
"""
extension = extension or extension_from_path(path_or_url)
if extension is None:
return path_or_url
expanded = expand_contracted_extension(extension)
if expanded != extension:
return re.sub(rf"{extension}", rf"{expanded}", path_or_url)
return path_or_url
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
"""Returns compression extension for a compressed archive"""
extension = expand_contracted_extension(extension)
for ext in [*EXTENSIONS]:
if ext in extension:
return ext
return None
def strip_compression_extension(path_or_url: str, ext: Optional[str] = None) -> str:
"""Strips the compression extension from the input, and returns it. For instance,
"foo.tgz" becomes "foo.tar".
If no extension is given, try a default list of extensions.
Args:
path_or_url: input to be stripped
ext: if given, extension to be stripped
"""
if not extension_from_path(path_or_url):
return path_or_url
expanded_path = expand_contracted_extension_in_path(path_or_url)
candidates = [ext] if ext is not None else EXTENSIONS
for current_extension in candidates:
modified_path = check_and_remove_ext(expanded_path, extension=current_extension)
if modified_path != expanded_path:
return modified_path
return expanded_path
def allowed_archive(path_or_url: str) -> bool:
"""Returns true if the input is a valid archive, False otherwise."""
return (
False if not path_or_url else any(path_or_url.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
)
def determine_url_file_extension(path: str) -> str:
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r"github.com/.+/(zip|tar)ball/", path)
if match:
if match.group(1) == "zip":
return "zip"
elif match.group(1) == "tar":
return "tar.gz"
prefix, ext, suffix = split_url_extension(path)
return ext

View File

@@ -11,7 +11,6 @@
import itertools
import numbers
import os
import pathlib
import posixpath
import re
import shutil
@@ -19,17 +18,14 @@
import sys
import tempfile
from contextlib import contextmanager
from itertools import accumulate
from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
import llnl.util.symlink
from llnl.util import tty
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
from llnl.util.symlink import islink, symlink
from spack.util.executable import Executable, which
from ..path import path_to_os_path, system_path_filter
from spack.util.path import path_to_os_path, system_path_filter
if sys.platform != "win32":
import grp
@@ -105,7 +101,7 @@ def _nop(args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (islink(src) and islink(dst))
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
@@ -156,37 +152,6 @@ def lookup(name):
shutil.copystat = copystat
def polite_path(components: Iterable[str]):
"""
Given a list of strings which are intended to be path components,
generate a path, and format each component to avoid generating extra
path entries.
For example all "/", "\", and ":" characters will be replaced with
"_". Other characters like "=" will also be replaced.
"""
return os.path.join(*[polite_filename(x) for x in components])
@memoized
def _polite_antipattern():
# A regex of all the characters we don't want in a filename
return re.compile(r"[^A-Za-z0-9_.-]")
def polite_filename(filename: str) -> str:
"""
Replace generally problematic filename characters with underscores.
This differs from sanitize_filename in that it is more aggressive in
changing characters in the name. For example it removes "=" which can
confuse path parsing in external tools.
"""
# This character set applies for both Windows and Linux. It does not
# account for reserved filenames in Windows.
return _polite_antipattern().sub("_", filename)
def getuid():
if sys.platform == "win32":
import ctypes
@@ -204,7 +169,7 @@ def rename(src, dst):
if sys.platform == "win32":
# Windows path existence checks will sometimes fail on junctions/links/symlinks
# so check for that case
if os.path.exists(dst) or islink(dst):
if os.path.exists(dst) or os.path.islink(dst):
os.remove(dst)
os.rename(src, dst)
@@ -368,7 +333,8 @@ def groupid_to_group(x):
if string:
regex = re.escape(regex)
for filename in path_to_os_path(*filenames):
filenames = path_to_os_path(*filenames)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
@@ -600,7 +566,7 @@ def set_install_permissions(path):
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if islink(path):
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
@@ -669,7 +635,7 @@ def chmod_x(entry, perms):
@system_path_filter
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link."""
if islink(dest):
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
@@ -755,12 +721,26 @@ def install(src, dest):
copy(src, dest, _permissions=True)
@system_path_filter
def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = os.readlink(link)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
@system_path_filter
def copy_tree(
src: str,
dest: str,
symlinks: bool = True,
allow_broken_symlinks: bool = sys.platform != "win32",
ignore: Optional[Callable[[str], bool]] = None,
_permissions: bool = False,
):
@@ -783,8 +763,6 @@ def copy_tree(
src (str): the directory to copy
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception. Defaults to true on unix.
ignore (typing.Callable): function indicating which files to ignore
_permissions (bool): for internal use only
@@ -792,8 +770,6 @@ def copy_tree(
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
if allow_broken_symlinks and sys.platform == "win32":
raise llnl.util.symlink.SymlinkError("Cannot allow broken symlinks on Windows!")
if _permissions:
tty.debug("Installing {0} to {1}".format(src, dest))
else:
@@ -807,11 +783,6 @@ def copy_tree(
if not files:
raise IOError("No such file or directory: '{0}'".format(src))
# For Windows hard-links and junctions, the source path must exist to make a symlink. Add
# all symlinks to this list while traversing the tree, then when finished, make all
# symlinks at the end.
links = []
for src in files:
abs_src = os.path.abspath(src)
if not abs_src.endswith(os.path.sep):
@@ -834,7 +805,7 @@ def copy_tree(
ignore=ignore,
follow_nonexisting=True,
):
if islink(s):
if os.path.islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = os.readlink(s)
@@ -848,9 +819,7 @@ def escaped_path(path):
tty.debug("Redirecting link {0} to {1}".format(target, new_target))
target = new_target
links.append((target, d, s))
continue
symlink(target, d)
elif os.path.isdir(link_target):
mkdirp(d)
else:
@@ -865,17 +834,9 @@ def escaped_path(path):
set_install_permissions(d)
copy_mode(s, d)
for target, d, s in links:
symlink(target, d, allow_broken_symlinks=allow_broken_symlinks)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
@system_path_filter
def install_tree(
src, dest, symlinks=True, ignore=None, allow_broken_symlinks=sys.platform != "win32"
):
def install_tree(src, dest, symlinks=True, ignore=None):
"""Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper
@@ -886,21 +847,12 @@ def install_tree(
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
ignore (typing.Callable): function indicating which files to ignore
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception.
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
copy_tree(
src,
dest,
symlinks=symlinks,
allow_broken_symlinks=allow_broken_symlinks,
ignore=ignore,
_permissions=True,
)
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True)
@system_path_filter
@@ -1304,12 +1256,7 @@ def traverse_tree(
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (typing.Callable): function indicating which files to ignore. This will also
ignore symlinks if they point to an ignored file (regardless of whether the symlink
is explicitly ignored); note this only supports one layer of indirection (i.e. if
you have x -> y -> z, and z is ignored but x/y are not, then y would be ignored
but not x). To avoid this, make sure the ignore function also ignores the symlink
paths too.
ignore (typing.Callable): function indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
@@ -1336,24 +1283,11 @@ def traverse_tree(
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# If the source path is a link and the link's source is ignored, then ignore the link too,
# but only do this if the ignore is defined.
if ignore is not None:
if islink(source_child) and not follow_links:
target = readlink(source_child)
all_parents = accumulate(target.split(os.sep), lambda x, y: os.path.join(x, y))
if any(map(ignore, all_parents)):
tty.warn(
f"Skipping {source_path} because the source or a part of the source's "
f"path is included in the ignores."
)
continue
# Treat as a directory
# TODO: for symlinks, os.path.isdir looks for the link target. If the
# target is relative to the link, then that may not resolve properly
# relative to our cwd - see resolve_link_target_relative_to_the_link
if os.path.isdir(source_child) and (follow_links or not islink(source_child)):
if os.path.isdir(source_child) and (follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
@@ -1379,11 +1313,7 @@ def traverse_tree(
def lexists_islink_isdir(path):
"""Computes the tuple (lexists(path), islink(path), isdir(path)) in a minimal
number of stat calls on unix. Use os.path and symlink.islink methods for windows."""
if sys.platform == "win32":
if not os.path.lexists(path):
return False, False, False
return os.path.lexists(path), islink(path), os.path.isdir(path)
number of stat calls."""
# First try to lstat, so we know if it's a link or not.
try:
lst = os.lstat(path)
@@ -1598,7 +1528,7 @@ def remove_if_dead_link(path):
Parameters:
path (str): The potential dead link
"""
if islink(path) and not os.path.exists(path):
if os.path.islink(path) and not os.path.exists(path):
os.unlink(path)
@@ -1657,7 +1587,7 @@ def remove_linked_tree(path):
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
if os.path.exists(path):
if islink(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), **kwargs)
os.unlink(path)
else:
@@ -2458,7 +2388,7 @@ def library_dependents(self):
"""
Set of directories where package binaries/libraries are located.
"""
return set([pathlib.Path(self.pkg.prefix.bin)]) | self._additional_library_dependents
return set([self.pkg.prefix.bin]) | self._additional_library_dependents
def add_library_dependent(self, *dest):
"""
@@ -2471,9 +2401,9 @@ def add_library_dependent(self, *dest):
"""
for pth in dest:
if os.path.isfile(pth):
self._additional_library_dependents.add(pathlib.Path(pth).parent)
self._additional_library_dependents.add(os.path.dirname)
else:
self._additional_library_dependents.add(pathlib.Path(pth))
self._additional_library_dependents.add(pth)
@property
def rpaths(self):
@@ -2486,7 +2416,7 @@ def rpaths(self):
dependent_libs.extend(list(find_all_shared_libraries(path, recursive=True)))
for extra_path in self._addl_rpaths:
dependent_libs.extend(list(find_all_shared_libraries(extra_path, recursive=True)))
return set([pathlib.Path(x) for x in dependent_libs])
return set(dependent_libs)
def add_rpath(self, *paths):
"""
@@ -2502,7 +2432,7 @@ def add_rpath(self, *paths):
"""
self._addl_rpaths = self._addl_rpaths | set(paths)
def _link(self, path: pathlib.Path, dest_dir: pathlib.Path):
def _link(self, path, dest_dir):
"""Perform link step of simulated rpathing, installing
simlinks of file in path to the dest_dir
location. This method deliberately prevents
@@ -2510,35 +2440,27 @@ def _link(self, path: pathlib.Path, dest_dir: pathlib.Path):
This is because it is both meaningless from an rpath
perspective, and will cause an error when Developer
mode is not enabled"""
def report_already_linked():
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(str(dest_file))
tty.debug(
"Linking library %s to %s failed, " % (str(path), str(dest_file))
+ "already linked."
if already_linked
else "library with name %s already exists at location %s."
% (str(file_name), str(dest_dir))
)
file_name = path.name
dest_file = dest_dir / file_name
if not dest_file.exists() and dest_dir.exists() and not dest_file == path:
file_name = os.path.basename(path)
dest_file = os.path.join(dest_dir, file_name)
if os.path.exists(dest_dir) and not dest_file == path:
try:
symlink(str(path), str(dest_file))
symlink(path, dest_file)
# For py2 compatibility, we have to catch the specific Windows error code
# associate with trying to create a file that already exists (winerror 183)
# Catch OSErrors missed by the SymlinkError checks
except OSError as e:
if sys.platform == "win32" and (e.winerror == 183 or e.errno == errno.EEXIST):
report_already_linked()
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(dest_file)
tty.debug(
"Linking library %s to %s failed, " % (path, dest_file) + "already linked."
if already_linked
else "library with name %s already exists at location %s."
% (file_name, dest_dir)
)
pass
else:
raise e
# catch errors we raise ourselves from Spack
except llnl.util.symlink.AlreadyExistsError:
report_already_linked()
def establish_link(self):
"""
@@ -2771,7 +2693,7 @@ def remove_directory_contents(dir):
"""Remove all contents of a directory."""
if os.path.exists(dir):
for entry in [os.path.join(dir, entry) for entry in os.listdir(dir)]:
if os.path.isfile(entry) or islink(entry):
if os.path.isfile(entry) or os.path.islink(entry):
os.unlink(entry)
else:
shutil.rmtree(entry)

View File

@@ -14,7 +14,7 @@
from llnl.util import lang, tty
from ..string import plural
import spack.util.string
if sys.platform != "win32":
import fcntl
@@ -169,7 +169,7 @@ def _attempts_str(wait_time, nattempts):
if nattempts <= 1:
return ""
attempts = plural(nattempts, "attempt")
attempts = spack.util.string.plural(nattempts, "attempt")
return " after {} and {}".format(lang.pretty_seconds(wait_time), attempts)

View File

@@ -2,189 +2,77 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import os
import re
import shutil
import subprocess
import sys
import tempfile
from os.path import exists, join
from llnl.util import lang, tty
from ..path import system_path_filter
from llnl.util import lang
if sys.platform == "win32":
from win32file import CreateHardLink
is_windows = sys.platform == "win32"
def symlink(source_path: str, link_path: str, allow_broken_symlinks: bool = not is_windows):
def symlink(real_path, link_path):
"""
Create a link.
Create a symbolic link.
On non-Windows and Windows with System Administrator
privleges this will be a normal symbolic link via
os.symlink.
On Windows without privledges the link will be a
junction for a directory and a hardlink for a file.
On Windows the various link types are:
Symbolic Link: A link to a file or directory on the
same or different volume (drive letter) or even to
a remote file or directory (using UNC in its path).
Need System Administrator privileges to make these.
Hard Link: A link to a file on the same volume (drive
letter) only. Every file (file's data) has at least 1
hard link (file's name). But when this method creates
a new hard link there will be 2. Deleting all hard
links effectively deletes the file. Don't need System
Administrator privileges.
Junction: A link to a directory on the same or different
volume (drive letter) but not to a remote directory. Don't
need System Administrator privileges.
Parameters:
source_path (str): The real file or directory that the link points to.
Must be absolute OR relative to the link.
link_path (str): The path where the link will exist.
allow_broken_symlinks (bool): On Linux or Mac, don't raise an exception if the source_path
doesn't exist. This will still raise an exception on Windows.
On Windows, use junctions if os.symlink fails.
"""
source_path = os.path.normpath(source_path)
win_source_path = source_path
link_path = os.path.normpath(link_path)
# Never allow broken links on Windows.
if sys.platform == "win32" and allow_broken_symlinks:
raise ValueError("allow_broken_symlinks parameter cannot be True on Windows.")
if not allow_broken_symlinks:
# Perform basic checks to make sure symlinking will succeed
if os.path.lexists(link_path):
raise AlreadyExistsError(
f"Link path ({link_path}) already exists. Cannot create link."
)
if not os.path.exists(source_path):
if os.path.isabs(source_path) and not allow_broken_symlinks:
# An absolute source path that does not exist will result in a broken link.
raise SymlinkError(
f"Source path ({source_path}) is absolute but does not exist. Resulting "
f"link would be broken so not making link."
)
else:
# os.symlink can create a link when the given source path is relative to
# the link path. Emulate this behavior and check to see if the source exists
# relative to the link path ahead of link creation to prevent broken
# links from being made.
link_parent_dir = os.path.dirname(link_path)
relative_path = os.path.join(link_parent_dir, source_path)
if os.path.exists(relative_path):
# In order to work on windows, the source path needs to be modified to be
# relative because hardlink/junction dont resolve relative paths the same
# way as os.symlink. This is ignored on other operating systems.
win_source_path = relative_path
elif not allow_broken_symlinks:
raise SymlinkError(
f"The source path ({source_path}) is not relative to the link path "
f"({link_path}). Resulting link would be broken so not making link."
)
# Create the symlink
if sys.platform == "win32" and not _windows_can_symlink():
_windows_create_link(win_source_path, link_path)
if sys.platform != "win32":
os.symlink(real_path, link_path)
elif _win32_can_symlink():
# Windows requires target_is_directory=True when the target is a dir.
os.symlink(real_path, link_path, target_is_directory=os.path.isdir(real_path))
else:
os.symlink(source_path, link_path, target_is_directory=os.path.isdir(source_path))
try:
# Try to use junctions
_win32_junction(real_path, link_path)
except OSError as e:
if e.errno == errno.EEXIST:
# EEXIST error indicates that file we're trying to "link"
# is already present, don't bother trying to copy which will also fail
# just raise
raise
else:
# If all else fails, fall back to copying files
shutil.copyfile(real_path, link_path)
def islink(path: str) -> bool:
"""Override os.islink to give correct answer for spack logic.
For Non-Windows: a link can be determined with the os.path.islink method.
Windows-only methods will return false for other operating systems.
For Windows: spack considers symlinks, hard links, and junctions to
all be links, so if any of those are True, return True.
Args:
path (str): path to check if it is a link.
Returns:
bool - whether the path is any kind link or not.
"""
return any([os.path.islink(path), _windows_is_junction(path), _windows_is_hardlink(path)])
def islink(path):
return os.path.islink(path) or _win32_is_junction(path)
def _windows_is_hardlink(path: str) -> bool:
"""Determines if a path is a windows hard link. This is accomplished
by looking at the number of links using os.stat. A non-hard-linked file
will have a st_nlink value of 1, whereas a hard link will have a value
larger than 1. Note that both the original and hard-linked file will
return True because they share the same inode.
# '_win32' functions based on
# https://github.com/Erotemic/ubelt/blob/master/ubelt/util_links.py
def _win32_junction(path, link):
# junctions require absolute paths
if not os.path.isabs(link):
link = os.path.abspath(link)
Args:
path (str): Windows path to check for a hard link
# os.symlink will fail if link exists, emulate the behavior here
if exists(link):
raise OSError(errno.EEXIST, "File exists: %s -> %s" % (link, path))
Returns:
bool - Whether the path is a hard link or not.
"""
if sys.platform != "win32" or os.path.islink(path) or not os.path.exists(path):
return False
if not os.path.isabs(path):
parent = os.path.join(link, os.pardir)
path = os.path.join(parent, path)
path = os.path.abspath(path)
return os.stat(path).st_nlink > 1
def _windows_is_junction(path: str) -> bool:
"""Determines if a path is a windows junction. A junction can be
determined using a bitwise AND operation between the file's
attribute bitmask and the known junction bitmask (0x400).
Args:
path (str): A non-file path
Returns:
bool - whether the path is a junction or not.
"""
if sys.platform != "win32" or os.path.islink(path) or os.path.isfile(path):
return False
import ctypes.wintypes
get_file_attributes = ctypes.windll.kernel32.GetFileAttributesW # type: ignore[attr-defined]
get_file_attributes.argtypes = (ctypes.wintypes.LPWSTR,)
get_file_attributes.restype = ctypes.wintypes.DWORD
invalid_file_attributes = 0xFFFFFFFF
reparse_point = 0x400
file_attr = get_file_attributes(str(path))
if file_attr == invalid_file_attributes:
return False
return file_attr & reparse_point > 0
CreateHardLink(link, path)
@lang.memoized
def _windows_can_symlink() -> bool:
"""
Determines if windows is able to make a symlink depending on
the system configuration and the level of the user's permissions.
"""
if sys.platform != "win32":
tty.warn("windows_can_symlink method can't be used on non-Windows OS.")
return False
def _win32_can_symlink():
tempdir = tempfile.mkdtemp()
dpath = os.path.join(tempdir, "dpath")
fpath = os.path.join(tempdir, "fpath.txt")
dpath = join(tempdir, "dpath")
fpath = join(tempdir, "fpath.txt")
dlink = os.path.join(tempdir, "dlink")
flink = os.path.join(tempdir, "flink.txt")
dlink = join(tempdir, "dlink")
flink = join(tempdir, "flink.txt")
import llnl.util.filesystem as fs
@@ -208,140 +96,24 @@ def _windows_can_symlink() -> bool:
return can_symlink_directories and can_symlink_files
def _windows_create_link(source: str, link: str):
def _win32_is_junction(path):
"""
Attempts to create a Hard Link or Junction as an alternative
to a symbolic link. This is called when symbolic links cannot
be created.
Determines if a path is a win32 junction
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_link method can't be used on non-Windows OS.")
elif os.path.isdir(source):
_windows_create_junction(source=source, link=link)
elif os.path.isfile(source):
_windows_create_hard_link(path=source, link=link)
else:
raise SymlinkError(
f"Cannot create link from {source}. It is neither a file nor a directory."
)
if os.path.islink(path):
return False
if sys.platform == "win32":
import ctypes.wintypes
def _windows_create_junction(source: str, link: str):
"""Duly verify that the path and link are eligible to create a junction,
then create the junction.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_junction method can't be used on non-Windows OS.")
elif not os.path.exists(source):
raise SymlinkError("Source path does not exist, cannot create a junction.")
elif os.path.lexists(link):
raise AlreadyExistsError("Link path already exists, cannot create a junction.")
elif not os.path.isdir(source):
raise SymlinkError("Source path is not a directory, cannot create a junction.")
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
import subprocess
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
cmd = ["cmd", "/C", "mklink", "/J", link, source]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
tty.debug(out.decode())
if proc.returncode != 0:
err = err.decode()
tty.error(err)
raise SymlinkError("Make junction command returned a non-zero return code.", err)
res = GetFileAttributes(path)
return res != INVALID_FILE_ATTRIBUTES and bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
def _windows_create_hard_link(path: str, link: str):
"""Duly verify that the path and link are eligible to create a hard
link, then create the hard link.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_hard_link method can't be used on non-Windows OS.")
elif not os.path.exists(path):
raise SymlinkError(f"File path {path} does not exist. Cannot create hard link.")
elif os.path.lexists(link):
raise AlreadyExistsError(f"Link path ({link}) already exists. Cannot create hard link.")
elif not os.path.isfile(path):
raise SymlinkError(f"File path ({link}) is not a file. Cannot create hard link.")
else:
tty.debug(f"Creating hard link {link} pointing to {path}")
CreateHardLink(link, path)
def readlink(path: str):
"""Spack utility to override of os.readlink method to work cross platform"""
if _windows_is_hardlink(path):
return _windows_read_hard_link(path)
elif _windows_is_junction(path):
return _windows_read_junction(path)
else:
return os.readlink(path)
def _windows_read_hard_link(link: str) -> str:
"""Find all of the files that point to the same inode as the link"""
if sys.platform != "win32":
raise SymlinkError("Can't read hard link on non-Windows OS.")
link = os.path.abspath(link)
fsutil_cmd = ["fsutil", "hardlink", "list", link]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading hard link: {err.decode()}")
# fsutil response does not include the drive name, so append it back to each linked file.
drive, link_tail = os.path.splitdrive(os.path.abspath(link))
links = set([os.path.join(drive, p) for p in out.decode().splitlines()])
links.remove(link)
if len(links) == 1:
return links.pop()
elif len(links) > 1:
# TODO: How best to handle the case where 3 or more paths point to a single inode?
raise SymlinkError(f"Found multiple paths pointing to the same inode {links}")
else:
raise SymlinkError("Cannot determine hard link source path.")
def _windows_read_junction(link: str):
"""Find the path that a junction points to."""
if sys.platform != "win32":
raise SymlinkError("Can't read junction on non-Windows OS.")
link = os.path.abspath(link)
link_basename = os.path.basename(link)
link_parent = os.path.dirname(link)
fsutil_cmd = ["dir", "/a:l", link_parent]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading junction: {err.decode()}")
matches = re.search(rf"<JUNCTION>\s+{link_basename} \[(.*)]", out.decode())
if matches:
return matches.group(1)
else:
raise SymlinkError("Could not find junction path.")
@system_path_filter
def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = readlink(link)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
class SymlinkError(RuntimeError):
"""Exception class for errors raised while creating symlinks,
junctions and hard links
"""
class AlreadyExistsError(SymlinkError):
"""Link path already exists."""
return False

View File

@@ -8,8 +8,8 @@
from llnl.util.lang import memoized
import spack.spec
import spack.version
from spack.compilers.clang import Clang
from spack.spec import CompilerSpec
from spack.util.executable import Executable, ProcessError
@@ -17,9 +17,7 @@ class ABI:
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""
def architecture_compatible(
self, target: spack.spec.Spec, constraint: spack.spec.Spec
) -> bool:
def architecture_compatible(self, target, constraint):
"""Return true if architecture of target spec is ABI compatible
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
@@ -36,7 +34,7 @@ def _gcc_get_libstdcxx_version(self, version):
a compiler's libstdc++ or libgcc_s"""
from spack.build_environment import dso_suffix
spec = spack.spec.CompilerSpec("gcc", version)
spec = CompilerSpec("gcc", version)
compilers = spack.compilers.compilers_for_spec(spec)
if not compilers:
return None
@@ -79,20 +77,16 @@ def _gcc_compiler_compare(self, pversion, cversion):
return False
return plib == clib
def _intel_compiler_compare(
self, pversion: spack.version.ClosedOpenRange, cversion: spack.version.ClosedOpenRange
) -> bool:
def _intel_compiler_compare(self, pversion, cversion):
"""Returns true iff the intel version pversion and cversion
are ABI compatible"""
# Test major and minor versions. Ignore build version.
pv = pversion.lo
cv = cversion.lo
return pv.up_to(2) == cv.up_to(2)
if len(pversion.version) < 2 or len(cversion.version) < 2:
return False
return pversion.version[:2] == cversion.version[:2]
def compiler_compatible(
self, parent: spack.spec.Spec, child: spack.spec.Spec, loose: bool = False
) -> bool:
def compiler_compatible(self, parent, child, **kwargs):
"""Return true if compilers for parent and child are ABI compatible."""
if not parent.compiler or not child.compiler:
return True
@@ -101,7 +95,7 @@ def compiler_compatible(
# Different compiler families are assumed ABI incompatible
return False
if loose:
if kwargs.get("loose", False):
return True
# TODO: Can we move the specialized ABI matching stuff
@@ -122,10 +116,9 @@ def compiler_compatible(
return True
return False
def compatible(
self, target: spack.spec.Spec, constraint: spack.spec.Spec, loose: bool = False
) -> bool:
def compatible(self, target, constraint, **kwargs):
"""Returns true if target spec is ABI compatible to constraint spec"""
loosematch = kwargs.get("loose", False)
return self.architecture_compatible(target, constraint) and self.compiler_compatible(
target, constraint, loose=loose
target, constraint, loose=loosematch
)

View File

@@ -38,13 +38,10 @@ def _search_duplicate_compilers(error_cls):
import ast
import collections
import collections.abc
import glob
import inspect
import itertools
import pathlib
import pickle
import re
import warnings
from urllib.request import urlopen
import llnl.util.lang
@@ -801,76 +798,3 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
errors.append(err)
return errors
#: Sanity checks on package directives
external_detection = AuditClass(
group="externals",
tag="PKG-EXTERNALS",
description="Sanity checks for external software detection",
kwargs=("pkgs",),
)
def packages_with_detection_tests():
"""Return the list of packages with a corresponding detection_test.yaml file."""
import spack.config
import spack.util.path
to_be_tested = []
for current_repo in spack.repo.PATH.repos:
namespace = current_repo.namespace
packages_dir = pathlib.PurePath(current_repo.packages_path)
pattern = packages_dir / "**" / "detection_test.yaml"
pkgs_with_tests = [
f"{namespace}.{str(pathlib.PurePath(x).parent.name)}" for x in glob.glob(str(pattern))
]
to_be_tested.extend(pkgs_with_tests)
return to_be_tested
@external_detection
def _test_detection_by_executable(pkgs, error_cls):
"""Test drive external detection for packages"""
import spack.detection
errors = []
# Filter the packages and retain only the ones with detection tests
pkgs_with_tests = packages_with_detection_tests()
selected_pkgs = []
for current_package in pkgs_with_tests:
_, unqualified_name = spack.repo.partition_package_name(current_package)
# Check for both unqualified name and qualified name
if unqualified_name in pkgs or current_package in pkgs:
selected_pkgs.append(current_package)
selected_pkgs.sort()
if not selected_pkgs:
summary = "No detection test to run"
details = [f' "{p}" has no detection test' for p in pkgs]
warnings.warn("\n".join([summary] + details))
return errors
for pkg_name in selected_pkgs:
for idx, test_runner in enumerate(
spack.detection.detection_tests(pkg_name, spack.repo.PATH)
):
specs = test_runner.execute()
expected_specs = test_runner.expected_specs
not_detected = set(expected_specs) - set(specs)
if not_detected:
summary = pkg_name + ": cannot detect some specs"
details = [f'"{s}" was not detected [test_id={idx}]' for s in sorted(not_detected)]
errors.append(error_cls(summary=summary, details=details))
not_expected = set(specs) - set(expected_specs)
if not_expected:
summary = pkg_name + ": detected unexpected specs"
msg = '"{0}" was detected, but was not expected [test_id={1}]'
details = [msg.format(s, idx) for s in sorted(not_expected)]
errors.append(error_cls(summary=summary, details=details))
return errors

View File

@@ -9,6 +9,7 @@
import io
import itertools
import json
import multiprocessing.pool
import os
import re
import shutil
@@ -23,7 +24,7 @@
import warnings
from contextlib import closing, contextmanager
from gzip import GzipFile
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
from typing import List, NamedTuple, Optional, Union
from urllib.error import HTTPError, URLError
import llnl.util.filesystem as fsys
@@ -34,7 +35,6 @@
import spack.cmd
import spack.config as config
import spack.database as spack_db
import spack.error
import spack.hooks
import spack.hooks.sbang
import spack.mirror
@@ -49,7 +49,6 @@
import spack.util.gpg
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
import spack.util.timer as timer
import spack.util.url as url_util
import spack.util.web as web_util
from spack.caches import misc_cache_location
@@ -216,11 +215,11 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
with self._index_file_cache.read_transaction(cache_key):
db._read_from_file(cache_path)
except spack_db.InvalidDatabaseVersionError as e:
tty.warn(
msg = (
f"you need a newer Spack version to read the buildcache index for the "
f"following mirror: '{mirror_url}'. {e.database_version_message}"
)
return
raise BuildcacheIndexError(msg) from e
spec_list = db.query_local(installed=False, in_buildcache=True)
@@ -625,7 +624,8 @@ def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
return os.path.join(prefix, ".spack/binary_distribution")
name = os.path.join(prefix, ".spack/binary_distribution")
return name
def read_buildinfo_file(prefix):
@@ -646,7 +646,8 @@ class BuildManifestVisitor(BaseDirectoryVisitor):
directories."""
def __init__(self):
# Save unique identifiers of hardlinks to avoid relocating them multiple times
# Save unique identifiers of files to avoid
# relocating hardlink files for each path.
self.visited = set()
# Lists of files we will check
@@ -655,8 +656,6 @@ def __init__(self):
def seen_before(self, root, rel_path):
stat_result = os.lstat(os.path.join(root, rel_path))
if stat_result.st_nlink == 1:
return False
identifier = (stat_result.st_dev, stat_result.st_ino)
if identifier in self.visited:
return True
@@ -797,7 +796,11 @@ def tarball_directory_name(spec):
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return spec.format_path("{architecture}/{compiler.name}-{compiler.version}/{name}-{version}")
return os.path.join(
str(spec.architecture),
f"{spec.compiler.name}-{spec.compiler.version}",
f"{spec.name}-{spec.version}",
)
def tarball_name(spec, ext):
@@ -805,10 +808,10 @@ def tarball_name(spec, ext):
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
spec_formatted = spec.format_path(
"{architecture}-{compiler.name}-{compiler.version}-{name}-{version}-{hash}"
return (
f"{spec.architecture}-{spec.compiler.name}-{spec.compiler.version}-"
f"{spec.name}-{spec.version}-{spec.dag_hash()}{ext}"
)
return f"{spec_formatted}{ext}"
def tarball_path_name(spec, ext):
@@ -873,18 +876,32 @@ def _read_specs_and_push_index(file_list, read_method, cache_prefix, db, temp_di
db: A spack database used for adding specs and then writing the index.
temp_dir (str): Location to write index.json and hash for pushing
concurrency (int): Number of parallel processes to use when fetching
"""
for file in file_list:
contents = read_method(file)
# Need full spec.json name or this gets confused with index.json.
if file.endswith(".json.sig"):
specfile_json = Spec.extract_json_from_clearsig(contents)
fetched_spec = Spec.from_dict(specfile_json)
elif file.endswith(".json"):
fetched_spec = Spec.from_json(contents)
else:
continue
Return:
None
"""
def _fetch_spec_from_mirror(spec_url):
spec_file_contents = read_method(spec_url)
if spec_file_contents:
# Need full spec.json name or this gets confused with index.json.
if spec_url.endswith(".json.sig"):
specfile_json = Spec.extract_json_from_clearsig(spec_file_contents)
return Spec.from_dict(specfile_json)
if spec_url.endswith(".json"):
return Spec.from_json(spec_file_contents)
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
fetched_specs = tp.map(
llnl.util.lang.star(_fetch_spec_from_mirror), [(f,) for f in file_list]
)
finally:
tp.terminate()
tp.join()
for fetched_spec in fetched_specs:
db.add(fetched_spec, None)
db.mark(fetched_spec, "in_buildcache", True)
@@ -909,7 +926,7 @@ def _read_specs_and_push_index(file_list, read_method, cache_prefix, db, temp_di
index_json_path,
url_util.join(cache_prefix, "index.json"),
keep_original=False,
extra_args={"ContentType": "application/json", "CacheControl": "no-cache"},
extra_args={"ContentType": "application/json"},
)
# Push the hash
@@ -917,7 +934,7 @@ def _read_specs_and_push_index(file_list, read_method, cache_prefix, db, temp_di
index_hash_path,
url_util.join(cache_prefix, "index.json.hash"),
keep_original=False,
extra_args={"ContentType": "text/plain", "CacheControl": "no-cache"},
extra_args={"ContentType": "text/plain"},
)
@@ -1153,99 +1170,57 @@ def gzip_compressed_tarfile(path):
yield tar
def _tarinfo_name(p: str):
return p.lstrip("/")
def deterministic_tarinfo(tarinfo: tarfile.TarInfo):
# We only add files, symlinks, hardlinks, and directories
# No character devices, block devices and FIFOs should ever enter a tarball.
if tarinfo.isdev():
return None
# For distribution, it makes no sense to user/group data; since (a) they don't exist
# on other machines, and (b) they lead to surprises as `tar x` run as root will change
# ownership if it can. We want to extract as the current user. By setting owner to root,
# root will extract as root, and non-privileged user will extract as themselves.
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.uname = ""
tarinfo.gname = ""
# Reset mtime to epoch time, our prefixes are not truly immutable, so files may get
# touched; as long as the content does not change, this ensures we get stable tarballs.
tarinfo.mtime = 0
# Normalize mode
if tarinfo.isfile() or tarinfo.islnk():
# If user can execute, use 0o755; else 0o644
# This is to avoid potentially unsafe world writable & exeutable files that may get
# extracted when Python or tar is run with privileges
tarinfo.mode = 0o644 if tarinfo.mode & 0o100 == 0 else 0o755
else: # symbolic link and directories
tarinfo.mode = 0o755
return tarinfo
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
"""Create a tarfile of an install prefix of a spec. Skips existing buildinfo file.
Only adds regular files, symlinks and dirs. Skips devices, fifos. Preserves hardlinks.
Normalizes permissions like git. Tar entries are added in depth-first pre-order, with
dir entries partitioned by file | dir, and sorted alphabetically, for reproducibility.
Partitioning ensures only one dir is in memory at a time, and sorting improves compression.
Args:
tar: tarfile object to add files to
prefix: absolute install prefix of spec"""
if not os.path.isabs(prefix) or not os.path.isdir(prefix):
raise ValueError(f"prefix '{prefix}' must be an absolute path to a directory")
hardlink_to_tarinfo_name: Dict[Tuple[int, int], str] = dict()
stat_key = lambda stat: (stat.st_dev, stat.st_ino)
try: # skip buildinfo file if it exists
files_to_skip = [stat_key(os.lstat(buildinfo_file_name(prefix)))]
except OSError:
files_to_skip = []
dir_stack = [prefix]
while dir_stack:
dir = dir_stack.pop()
# Add the dir before its contents
dir_info = tarfile.TarInfo(_tarinfo_name(dir))
dir_info.type = tarfile.DIRTYPE
dir_info.mode = 0o755
tar.addfile(dir_info)
# Sort by name: reproducible & improves compression
with os.scandir(dir) as it:
entries = sorted(it, key=lambda entry: entry.name)
new_dirs = []
for entry in entries:
if entry.is_dir(follow_symlinks=False):
new_dirs.append(entry.path)
continue
file_info = tarfile.TarInfo(_tarinfo_name(entry.path))
s = entry.stat(follow_symlinks=False)
# Skip existing binary distribution files.
id = stat_key(s)
if id in files_to_skip:
continue
# Normalize the mode
file_info.mode = 0o644 if s.st_mode & 0o100 == 0 else 0o755
if entry.is_symlink():
file_info.type = tarfile.SYMTYPE
file_info.linkname = os.readlink(entry.path)
tar.addfile(file_info)
elif entry.is_file(follow_symlinks=False):
# Deduplicate hardlinks
if s.st_nlink > 1:
if id in hardlink_to_tarinfo_name:
file_info.type = tarfile.LNKTYPE
file_info.linkname = hardlink_to_tarinfo_name[id]
tar.addfile(file_info)
continue
hardlink_to_tarinfo_name[id] = file_info.name
# If file not yet seen, copy it.
file_info.type = tarfile.REGTYPE
file_info.size = s.st_size
with open(entry.path, "rb") as f:
tar.addfile(file_info, f)
dir_stack.extend(reversed(new_dirs)) # we pop, so reverse to stay alphabetical
def tar_add_metadata(tar: tarfile.TarFile, path: str, data: dict):
# Serialize buildinfo for the tarball
bstring = syaml.dump(data, default_flow_style=True).encode("utf-8")
tarinfo = tarfile.TarInfo(name=path)
tarinfo.size = len(bstring)
tar.addfile(deterministic_tarinfo(tarinfo), io.BytesIO(bstring))
def _do_create_tarball(tarfile_path: str, binaries_dir: str, buildinfo: dict):
def deterministic_tarinfo_without_buildinfo(tarinfo: tarfile.TarInfo):
"""Skip buildinfo file when creating a tarball, and normalize other tarinfo fields."""
if tarinfo.name.endswith("/.spack/binary_distribution"):
return None
return deterministic_tarinfo(tarinfo)
def _do_create_tarball(tarfile_path: str, binaries_dir: str, pkg_dir: str, buildinfo: dict):
with gzip_compressed_tarfile(tarfile_path) as tar:
# Tarball the install prefix
tarfile_of_spec_prefix(tar, binaries_dir)
# Serialize buildinfo for the tarball
bstring = syaml.dump(buildinfo, default_flow_style=True).encode("utf-8")
tarinfo = tarfile.TarInfo(name=_tarinfo_name(buildinfo_file_name(binaries_dir)))
tarinfo.type = tarfile.REGTYPE
tarinfo.size = len(bstring)
tarinfo.mode = 0o644
tar.addfile(tarinfo, io.BytesIO(bstring))
tar.add(name=binaries_dir, arcname=pkg_dir, filter=deterministic_tarinfo_without_buildinfo)
tar_add_metadata(tar, buildinfo_file_name(pkg_dir), buildinfo)
class PushOptions(NamedTuple):
@@ -1317,12 +1292,14 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
):
raise NoOverwriteException(url_util.format(remote_specfile_path))
pkg_dir = os.path.basename(spec.prefix.rstrip(os.path.sep))
binaries_dir = spec.prefix
# create info for later relocation and create tar
buildinfo = get_buildinfo_dict(spec)
_do_create_tarball(tarfile_path, binaries_dir, buildinfo)
_do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
@@ -1454,7 +1431,7 @@ def try_fetch(url_to_fetch):
try:
stage.fetch()
except spack.error.FetchError:
except web_util.FetchError:
stage.destroy()
return None
@@ -1617,10 +1594,9 @@ def dedupe_hardlinks_if_necessary(root, buildinfo):
for rel_path in buildinfo[key]:
stat_result = os.lstat(os.path.join(root, rel_path))
identifier = (stat_result.st_dev, stat_result.st_ino)
if stat_result.st_nlink > 1:
if identifier in visited:
continue
visited.add(identifier)
if identifier in visited:
continue
visited.add(identifier)
new_list.append(rel_path)
buildinfo[key] = new_list
@@ -1837,11 +1813,10 @@ def _tar_strip_component(tar: tarfile.TarFile, prefix: str):
m.linkname = m.linkname[result.end() :]
def extract_tarball(spec, download_result, unsigned=False, force=False, timer=timer.NULL_TIMER):
def extract_tarball(spec, download_result, unsigned=False, force=False):
"""
extract binary tarball for given package into install area
"""
timer.start("extract")
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
@@ -1921,9 +1896,7 @@ def extract_tarball(spec, download_result, unsigned=False, force=False, timer=ti
os.remove(tarfile_path)
os.remove(specfile_path)
timer.stop("extract")
timer.start("relocate")
try:
relocate_package(spec)
except Exception as e:
@@ -1944,7 +1917,6 @@ def extract_tarball(spec, download_result, unsigned=False, force=False, timer=ti
if os.path.exists(filename):
os.remove(filename)
_delete_staged_downloads(download_result)
timer.stop("relocate")
def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
@@ -2182,7 +2154,7 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except spack.error.FetchError:
except web_util.FetchError:
continue
tty.debug("Found key {0}".format(fingerprint))
@@ -2334,7 +2306,7 @@ def _download_buildcache_entry(mirror_root, descriptions):
try:
stage.fetch()
break
except spack.error.FetchError as e:
except web_util.FetchError as e:
tty.debug(e)
else:
if fail_if_missing:

View File

@@ -228,7 +228,7 @@ def _install_and_test(
if not abstract_spec.intersects(candidate_spec):
continue
if python_spec is not None and not abstract_spec.intersects(f"^{python_spec}"):
if python_spec is not None and python_spec not in abstract_spec:
continue
for _, pkg_hash, pkg_sha256 in item["binaries"]:
@@ -446,11 +446,16 @@ def ensure_executables_in_path_or_raise(
current_bootstrapper.last_search["spec"],
current_bootstrapper.last_search["command"],
)
cmd.add_default_envmod(
spack.user_environment.environment_modifications_for_specs(
concrete_spec, set_package_py_globals=False
env_mods = spack.util.environment.EnvironmentModifications()
for dep in concrete_spec.traverse(
root=True, order="post", deptype=("link", "run")
):
env_mods.extend(
spack.user_environment.environment_modifications_for_spec(
dep, set_package_py_globals=False
)
)
)
cmd.add_default_envmod(env_mods)
return cmd
assert exception_handler, (
@@ -471,16 +476,16 @@ def ensure_executables_in_path_or_raise(
def _add_externals_if_missing() -> None:
search_list = [
# clingo
"cmake",
"bison",
spack.repo.PATH.get_pkg_class("cmake"),
spack.repo.PATH.get_pkg_class("bison"),
# GnuPG
"gawk",
spack.repo.PATH.get_pkg_class("gawk"),
# develop deps
"git",
spack.repo.PATH.get_pkg_class("git"),
]
if IS_WINDOWS:
search_list.append("winbison")
externals = spack.detection.by_path(search_list)
search_list.append(spack.repo.PATH.get_pkg_class("winbison"))
externals = spack.detection.by_executable(search_list)
# System git is typically deprecated, so mark as non-buildable to force it as external
non_buildable_externals = {k: externals.pop(k) for k in ("git",) if k in externals}
spack.detection.update_configuration(externals, scope="bootstrap", buildable=True)

View File

@@ -15,9 +15,9 @@
from llnl.util import tty
import spack.build_environment
import spack.environment
import spack.tengine
import spack.util.cpus
import spack.util.executable
from spack.environment import depfile
@@ -137,7 +137,7 @@ def _install_with_depfile(self) -> None:
"-C",
str(self.environment_root()),
"-j",
str(spack.util.cpus.determine_number_of_jobs(parallel=True)),
str(spack.build_environment.determine_number_of_jobs(parallel=True)),
**kwargs,
)

View File

@@ -40,15 +40,11 @@
import sys
import traceback
import types
from collections import defaultdict
from enum import Flag, auto
from itertools import chain
from typing import List, Tuple
import llnl.util.tty as tty
from llnl.string import plural
from llnl.util.filesystem import join_path
from llnl.util.lang import dedupe, stable_partition
from llnl.util.lang import dedupe
from llnl.util.symlink import symlink
from llnl.util.tty.color import cescape, colorize
from llnl.util.tty.log import MultiProcessFd
@@ -58,37 +54,35 @@
import spack.build_systems.python
import spack.builder
import spack.config
import spack.deptypes as dt
import spack.main
import spack.package_base
import spack.paths
import spack.platforms
import spack.repo
import spack.schema.environment
import spack.spec
import spack.store
import spack.subprocess_context
import spack.user_environment
import spack.util.path
import spack.util.pattern
from spack import traverse
from spack.context import Context
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import spack_install_test_log
from spack.installer import InstallError
from spack.util.cpus import determine_number_of_jobs
from spack.util.cpus import cpus_available
from spack.util.environment import (
SYSTEM_DIRS,
EnvironmentModifications,
env_flag,
filter_system_paths,
get_path,
inspect_path,
is_system_path,
validate,
)
from spack.util.executable import Executable
from spack.util.log_parse import make_log_context, parse_log_events
from spack.util.module_cmd import load_module, module, path_from_modules
from spack.util.string import plural
#
# This can be set by the user to globally disable parallel builds.
@@ -115,6 +109,7 @@
SPACK_CCACHE_BINARY = "SPACK_CCACHE_BINARY"
SPACK_SYSTEM_DIRS = "SPACK_SYSTEM_DIRS"
# Platform-specific library suffix.
if sys.platform == "darwin":
dso_suffix = "dylib"
@@ -411,13 +406,19 @@ def set_compiler_environment_variables(pkg, env):
def set_wrapper_variables(pkg, env):
"""Set environment variables used by the Spack compiler wrapper (which have the prefix
`SPACK_`) and also add the compiler wrappers to PATH.
"""Set environment variables used by the Spack compiler wrapper
(which have the prefix `SPACK_`) and also add the compiler wrappers
to PATH.
This determines the injected -L/-I/-rpath options; each of these specifies a search order and
this function computes these options in a manner that is intended to match the DAG traversal
order in `SetupContext`. TODO: this is not the case yet, we're using post order, SetupContext
is using topo order."""
This determines the injected -L/-I/-rpath options; each
of these specifies a search order and this function computes these
options in a manner that is intended to match the DAG traversal order
in `modifications_from_dependencies`: that method uses a post-order
traversal so that `PrependPath` actions from dependencies take lower
precedence; we use a post-order traversal here to match the visitation
order of `modifications_from_dependencies` (so we are visiting the
lowest priority packages first).
"""
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
@@ -536,42 +537,78 @@ def update_compiler_args_for_dep(dep):
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
def set_package_py_globals(pkg, context: Context = Context.BUILD):
def determine_number_of_jobs(
parallel=False, command_line=None, config_default=None, max_cpus=None
):
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel (bool or None): true when package supports parallel builds
command_line (int or None): command line override
config_default (int or None): config default number of jobs
max_cpus (int or None): maximum number of CPUs available. When None, this
value is automatically determined.
"""
if not parallel:
return 1
if command_line is None and "command_line" in spack.config.scopes():
command_line = spack.config.get("config:build_jobs", scope="command_line")
if command_line is not None:
return command_line
max_cpus = max_cpus or cpus_available()
# in some rare cases _builtin config may not be set, so default to max 16
config_default = config_default or spack.config.get("config:build_jobs", 16)
return min(max_cpus, config_default)
def set_module_variables_for_package(pkg):
"""Populate the Python module of a package with some useful global names.
This makes things easier for package writers.
"""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = "_set_run_already_called"
if getattr(pkg.module, marker, False):
return
module = ModuleChangePropagator(pkg)
jobs = determine_number_of_jobs(parallel=pkg.parallel)
m = module
m.make_jobs = jobs
if context == Context.BUILD:
jobs = determine_number_of_jobs(parallel=pkg.parallel)
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable("make", jobs)
m.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
# TODO: johnwparent: add package or builder support to define these build tools
# for now there is no entrypoint for builders to define these on their
# own
if sys.platform == "win32":
m.nmake = Executable("nmake")
m.msbuild = Executable("msbuild")
# analog to configure for win32
m.cscript = Executable("cscript")
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable("make", jobs)
m.gmake = MakeExecutable("gmake", jobs)
m.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
# TODO: johnwparent: add package or builder support to define these build tools
# for now there is no entrypoint for builders to define these on their
# own
if sys.platform == "win32":
m.nmake = Executable("nmake")
m.msbuild = Executable("msbuild")
# analog to configure for win32
m.cscript = Executable("cscript")
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable("./configure")
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable("./configure")
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
m.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
m.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
# Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
@@ -595,6 +632,9 @@ def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
module.propagate_changes_to_mro()
@@ -720,15 +760,12 @@ def load_external_modules(pkg):
load_module(external_module)
def setup_package(pkg, dirty, context: Context = Context.BUILD):
def setup_package(pkg, dirty, context="build"):
"""Execute all environment setup routines."""
if context not in (Context.BUILD, Context.TEST):
raise ValueError(f"'context' must be Context.BUILD or Context.TEST - got {context}")
if context not in ["build", "test"]:
raise ValueError("'context' must be one of ['build', 'test'] - got: {0}".format(context))
# First populate the package.py's module with the relevant globals that could be used in any
# of the setup_* functions.
setup_context = SetupContext(pkg.spec, context=context)
setup_context.set_all_package_py_globals()
set_module_variables_for_package(pkg)
# Keep track of env changes from packages separately, since we want to
# issue warnings when packages make "suspicious" modifications.
@@ -736,15 +773,13 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
env_mods = EnvironmentModifications()
# setup compilers for build contexts
need_compiler = context == Context.BUILD or (
context == Context.TEST and pkg.test_requires_compiler
)
need_compiler = context == "build" or (context == "test" and pkg.test_requires_compiler)
if need_compiler:
set_compiler_environment_variables(pkg, env_mods)
set_wrapper_variables(pkg, env_mods)
tty.debug("setup_package: grabbing modifications from dependencies")
env_mods.extend(setup_context.get_env_modifications())
env_mods.extend(modifications_from_dependencies(pkg.spec, context, custom_mods_only=False))
tty.debug("setup_package: collected all modifications from dependencies")
# architecture specific setup
@@ -752,7 +787,7 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
target = platform.target(pkg.spec.architecture.target)
platform.setup_platform_environment(pkg, env_mods)
if context == Context.BUILD:
if context == "build":
tty.debug("setup_package: setup build environment for root")
builder = spack.builder.create(pkg)
builder.setup_build_environment(env_mods)
@@ -763,7 +798,16 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
"config to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'."
)
elif context == Context.TEST:
elif context == "test":
tty.debug("setup_package: setup test environment for root")
env_mods.extend(
inspect_path(
pkg.spec.prefix,
spack.user_environment.prefix_inspections(pkg.spec.platform),
exclude=is_system_path,
)
)
pkg.setup_run_environment(env_mods)
env_mods.prepend_path("PATH", ".")
# First apply the clean environment changes
@@ -802,245 +846,158 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
return env_base
class EnvironmentVisitor:
def __init__(self, *roots: spack.spec.Spec, context: Context):
# For the roots (well, marked specs) we follow different edges
# than for their deps, depending on the context.
self.root_hashes = set(s.dag_hash() for s in roots)
def _make_runnable(pkg, env):
# Helper method which prepends a Package's bin/ prefix to the PATH
# environment variable
prefix = pkg.prefix
if context == Context.BUILD:
# Drop direct run deps in build context
# We don't really distinguish between install and build time test deps,
# so we include them here as build-time test deps.
self.root_depflag = dt.BUILD | dt.TEST | dt.LINK
elif context == Context.TEST:
# This is more of an extended run environment
self.root_depflag = dt.TEST | dt.RUN | dt.LINK
elif context == Context.RUN:
self.root_depflag = dt.RUN | dt.LINK
def neighbors(self, item):
spec = item.edge.spec
if spec.dag_hash() in self.root_hashes:
depflag = self.root_depflag
else:
depflag = dt.LINK | dt.RUN
return traverse.sort_edges(spec.edges_to_dependencies(depflag=depflag))
for dirname in ["bin", "bin64"]:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path("PATH", bin_dir)
class UseMode(Flag):
#: Entrypoint spec (a spec to be built; an env root, etc)
ROOT = auto()
def modifications_from_dependencies(
spec, context, custom_mods_only=True, set_package_py_globals=True
):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
#: A spec used at runtime, but no executables in PATH
RUNTIME = auto()
Environment modifications include:
#: A spec used at runtime, with executables in PATH
RUNTIME_EXECUTABLE = auto()
- Updating PATH so that executables can be found
- Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective
tools can find Spack-built dependencies
- Running custom package environment modifications
#: A spec that's a direct build or test dep
BUILDTIME_DIRECT = auto()
Custom package modifications can conflict with the default PATH changes
we make (specifically for the PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH
environment variables), so this applies changes in a fixed order:
#: A spec that should be visible in search paths in a build env.
BUILDTIME = auto()
- All modifications (custom and default) from external deps first
- All modifications from non-external deps afterwards
#: Flag is set when the (node, mode) is finalized
ADDED = auto()
With that order, `PrependPath` actions from non-external default
environment modifications will take precedence over custom modifications
from external packages.
A secondary constraint is that custom and default modifications are
grouped on a per-package basis: combined with the post-order traversal this
means that default modifications of dependents can override custom
modifications of dependencies (again, this would only occur for PATH,
CMAKE_PREFIX_PATH, or PKG_CONFIG_PATH).
def effective_deptypes(
*specs: spack.spec.Spec, context: Context = Context.BUILD
) -> List[Tuple[spack.spec.Spec, UseMode]]:
"""Given a list of input specs and a context, return a list of tuples of
all specs that contribute to (environment) modifications, together with
a flag specifying in what way they do so. The list is ordered topologically
from root to leaf, meaning that environment modifications should be applied
in reverse so that dependents override dependencies, not the other way around."""
visitor = traverse.TopoVisitor(
EnvironmentVisitor(*specs, context=context),
key=lambda x: x.dag_hash(),
root=True,
all_edges=True,
)
traverse.traverse_depth_first_with_visitor(traverse.with_artificial_edges(specs), visitor)
# Dictionary with "no mode" as default value, so it's easy to write modes[x] |= flag.
use_modes = defaultdict(lambda: UseMode(0))
nodes_with_type = []
for edge in visitor.edges:
parent, child, depflag = edge.parent, edge.spec, edge.depflag
# Mark the starting point
if parent is None:
use_modes[child] = UseMode.ROOT
continue
parent_mode = use_modes[parent]
# Nothing to propagate.
if not parent_mode:
continue
# Dependending on the context, include particular deps from the root.
if UseMode.ROOT & parent_mode:
if context == Context.BUILD:
if (dt.BUILD | dt.TEST) & depflag:
use_modes[child] |= UseMode.BUILDTIME_DIRECT
if dt.LINK & depflag:
use_modes[child] |= UseMode.BUILDTIME
elif context == Context.TEST:
if (dt.RUN | dt.TEST) & depflag:
use_modes[child] |= UseMode.RUNTIME_EXECUTABLE
elif dt.LINK & depflag:
use_modes[child] |= UseMode.RUNTIME
elif context == Context.RUN:
if dt.RUN & depflag:
use_modes[child] |= UseMode.RUNTIME_EXECUTABLE
elif dt.LINK & depflag:
use_modes[child] |= UseMode.RUNTIME
# Propagate RUNTIME and RUNTIME_EXECUTABLE through link and run deps.
if (UseMode.RUNTIME | UseMode.RUNTIME_EXECUTABLE | UseMode.BUILDTIME_DIRECT) & parent_mode:
if dt.LINK & depflag:
use_modes[child] |= UseMode.RUNTIME
if dt.RUN & depflag:
use_modes[child] |= UseMode.RUNTIME_EXECUTABLE
# Propagate BUILDTIME through link deps.
if UseMode.BUILDTIME & parent_mode:
if dt.LINK & depflag:
use_modes[child] |= UseMode.BUILDTIME
# Finalize the spec; the invariant is that all in-edges are processed
# before out-edges, meaning that parent is done.
if not (UseMode.ADDED & parent_mode):
use_modes[parent] |= UseMode.ADDED
nodes_with_type.append((parent, parent_mode))
# Attach the leaf nodes, since we only added nodes with out-edges.
for spec, parent_mode in use_modes.items():
if parent_mode and not (UseMode.ADDED & parent_mode):
nodes_with_type.append((spec, parent_mode))
return nodes_with_type
class SetupContext:
"""This class encapsulates the logic to determine environment modifications, and is used as
well to set globals in modules of package.py."""
def __init__(self, *specs: spack.spec.Spec, context: Context) -> None:
"""Construct a ModificationsFromDag object.
Args:
specs: single root spec for build/test context, possibly more for run context
context: build, run, or test"""
if (context == Context.BUILD or context == Context.TEST) and not len(specs) == 1:
raise ValueError("Cannot setup build environment for multiple specs")
specs_with_type = effective_deptypes(*specs, context=context)
self.specs = specs
self.context = context
self.external: List[Tuple[spack.spec.Spec, UseMode]]
self.nonexternal: List[Tuple[spack.spec.Spec, UseMode]]
# Reverse so we go from leaf to root
self.nodes_in_subdag = set(id(s) for s, _ in specs_with_type)
# Split into non-external and external, maintaining topo order per group.
self.external, self.nonexternal = stable_partition(
reversed(specs_with_type), lambda t: t[0].external
Args:
spec (spack.spec.Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
custom_mods_only (bool): if True returns only custom modifications, if False
returns custom and default modifications
set_package_py_globals (bool): whether or not to set the global variables in the
package.py files (this may be problematic when using buildcaches that have
been built on a different but compatible OS)
"""
if context not in ["build", "run", "test"]:
raise ValueError(
"Expecting context to be one of ['build', 'run', 'test'], " "got: {0}".format(context)
)
self.should_be_runnable = UseMode.BUILDTIME_DIRECT | UseMode.RUNTIME_EXECUTABLE
self.should_setup_run_env = UseMode.RUNTIME | UseMode.RUNTIME_EXECUTABLE
self.should_setup_dependent_build_env = UseMode.BUILDTIME | UseMode.BUILDTIME_DIRECT
if context == Context.RUN or context == Context.TEST:
self.should_be_runnable |= UseMode.ROOT
self.should_setup_run_env |= UseMode.ROOT
env = EnvironmentModifications()
# Everything that calls setup_run_environment and setup_dependent_* needs globals set.
self.should_set_package_py_globals = (
self.should_setup_dependent_build_env | self.should_setup_run_env | UseMode.ROOT
)
# In a build context, the root and direct build deps need build-specific globals set.
self.needs_build_context = UseMode.ROOT | UseMode.BUILDTIME_DIRECT
# Note: see computation of 'custom_mod_deps' and 'exe_deps' later in this
# function; these sets form the building blocks of those collections.
build_deps = set(spec.dependencies(deptype=("build", "test")))
link_deps = set(spec.traverse(root=False, deptype="link"))
build_link_deps = build_deps | link_deps
build_and_supporting_deps = set()
for build_dep in build_deps:
build_and_supporting_deps.update(build_dep.traverse(deptype="run"))
run_and_supporting_deps = set(spec.traverse(root=False, deptype=("run", "link")))
test_and_supporting_deps = set()
for test_dep in set(spec.dependencies(deptype="test")):
test_and_supporting_deps.update(test_dep.traverse(deptype="run"))
def set_all_package_py_globals(self):
"""Set the globals in modules of package.py files."""
for dspec, flag in chain(self.external, self.nonexternal):
pkg = dspec.package
# All dependencies that might have environment modifications to apply
custom_mod_deps = set()
if context == "build":
custom_mod_deps.update(build_and_supporting_deps)
# Tests may be performed after build
custom_mod_deps.update(test_and_supporting_deps)
else:
# test/run context
custom_mod_deps.update(run_and_supporting_deps)
if context == "test":
custom_mod_deps.update(test_and_supporting_deps)
custom_mod_deps.update(link_deps)
if self.should_set_package_py_globals & flag:
if self.context == Context.BUILD and self.needs_build_context & flag:
set_package_py_globals(pkg, context=Context.BUILD)
else:
# This includes runtime dependencies, also runtime deps of direct build deps.
set_package_py_globals(pkg, context=Context.RUN)
# Determine 'exe_deps': the set of packages with binaries we want to use
if context == "build":
exe_deps = build_and_supporting_deps | test_and_supporting_deps
elif context == "run":
exe_deps = set(spec.traverse(deptype="run"))
elif context == "test":
exe_deps = test_and_supporting_deps
for spec in dspec.dependents():
# Note: some specs have dependents that are unreachable from the root, so avoid
# setting globals for those.
if id(spec) not in self.nodes_in_subdag:
continue
dependent_module = ModuleChangePropagator(spec.package)
pkg.setup_dependent_package(dependent_module, spec)
dependent_module.propagate_changes_to_mro()
def default_modifications_for_dep(dep):
if dep in build_link_deps and not is_system_path(dep.prefix) and context == "build":
prefix = dep.prefix
def get_env_modifications(self) -> EnvironmentModifications:
"""Returns the environment variable modifications for the given input specs and context.
Environment modifications include:
- Updating PATH for packages that are required at runtime
- Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective
tools can find Spack-built dependencies (when context=build)
- Running custom package environment modifications (setup_run_environment,
setup_dependent_build_environment, setup_dependent_run_environment)
env.prepend_path("CMAKE_PREFIX_PATH", prefix)
The (partial) order imposed on the specs is externals first, then topological
from leaf to root. That way externals cannot contribute search paths that would shadow
Spack's prefixes, and dependents override variables set by dependencies."""
env = EnvironmentModifications()
for dspec, flag in chain(self.external, self.nonexternal):
tty.debug(f"Adding env modifications for {dspec.name}")
pkg = dspec.package
for directory in ("lib", "lib64", "share"):
pcdir = os.path.join(prefix, directory, "pkgconfig")
if os.path.isdir(pcdir):
env.prepend_path("PKG_CONFIG_PATH", pcdir)
if self.should_setup_dependent_build_env & flag:
self._make_buildtime_detectable(dspec, env)
if dep in exe_deps and not is_system_path(dep.prefix):
_make_runnable(dep, env)
for spec in self.specs:
builder = spack.builder.create(pkg)
builder.setup_dependent_build_environment(env, spec)
def add_modifications_for_dep(dep):
tty.debug("Adding env modifications for {0}".format(dep.name))
# Some callers of this function only want the custom modifications.
# For callers that want both custom and default modifications, we want
# to perform the default modifications here (this groups custom
# and default modifications together on a per-package basis).
if not custom_mods_only:
default_modifications_for_dep(dep)
if self.should_be_runnable & flag:
self._make_runnable(dspec, env)
# Perform custom modifications here (PrependPath actions performed in
# the custom method override the default environment modifications
# we do to help the build, namely for PATH, CMAKE_PREFIX_PATH, and
# PKG_CONFIG_PATH)
if dep in custom_mod_deps:
dpkg = dep.package
if set_package_py_globals:
set_module_variables_for_package(dpkg)
if self.should_setup_run_env & flag:
# TODO: remove setup_dependent_run_environment...
for spec in dspec.dependents(deptype=dt.RUN):
if id(spec) in self.nodes_in_subdag:
pkg.setup_dependent_run_environment(env, spec)
pkg.setup_run_environment(env)
return env
current_module = ModuleChangePropagator(spec.package)
dpkg.setup_dependent_package(current_module, spec)
current_module.propagate_changes_to_mro()
def _make_buildtime_detectable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
if is_system_path(dep.prefix):
return
if context == "build":
builder = spack.builder.create(dpkg)
builder.setup_dependent_build_environment(env, spec)
else:
dpkg.setup_dependent_run_environment(env, spec)
tty.debug("Added env modifications for {0}".format(dep.name))
env.prepend_path("CMAKE_PREFIX_PATH", dep.prefix)
for d in ("lib", "lib64", "share"):
pcdir = os.path.join(dep.prefix, d, "pkgconfig")
if os.path.isdir(pcdir):
env.prepend_path("PKG_CONFIG_PATH", pcdir)
# Note that we want to perform environment modifications in a fixed order.
# The Spec.traverse method provides this: i.e. in addition to
# the post-order semantics, it also guarantees a fixed traversal order
# among dependencies which are not constrained by post-order semantics.
for dspec in spec.traverse(root=False, order="post"):
if dspec.external:
add_modifications_for_dep(dspec)
def _make_runnable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
if is_system_path(dep.prefix):
return
for dspec in spec.traverse(root=False, order="post"):
# Default env modifications for non-external packages can override
# custom modifications of external packages (this can only occur
# for modifications to PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH)
if not dspec.external:
add_modifications_for_dep(dspec)
for d in ("bin", "bin64"):
bin_dir = os.path.join(dep.prefix, d)
if os.path.isdir(bin_dir):
env.prepend_path("PATH", bin_dir)
return env
def get_cmake_prefix_path(pkg):
@@ -1072,7 +1029,7 @@ def get_cmake_prefix_path(pkg):
def _setup_pkg_and_run(
serialized_pkg, function, kwargs, write_pipe, input_multiprocess_fd, jsfd1, jsfd2
):
context: str = kwargs.get("context", "build")
context = kwargs.get("context", "build")
try:
# We are in the child process. Python sets sys.stdin to
@@ -1088,7 +1045,7 @@ def _setup_pkg_and_run(
if not kwargs.get("fake", False):
kwargs["unmodified_env"] = os.environ.copy()
kwargs["env_modifications"] = setup_package(
pkg, dirty=kwargs.get("dirty", False), context=Context.from_string(context)
pkg, dirty=kwargs.get("dirty", False), context=context
)
return_value = function(pkg, kwargs)
write_pipe.send(return_value)

View File

@@ -46,7 +46,6 @@ class AutotoolsPackage(spack.package_base.PackageBase):
depends_on("gnuconfig", type="build", when="target=ppc64le:")
depends_on("gnuconfig", type="build", when="target=aarch64:")
depends_on("gnuconfig", type="build", when="target=riscv64:")
depends_on("gmake", type="build")
conflicts("platform=windows")
def flags_to_build_system_args(self, flags):

View File

@@ -142,10 +142,10 @@ def flags_to_build_system_args(self, flags):
# We specify for each of them.
if flags["ldflags"]:
ldflags = " ".join(flags["ldflags"])
ld_string = "-DCMAKE_{0}_LINKER_FLAGS={1}"
# cmake has separate linker arguments for types of builds.
self.cmake_flag_args.append(f"-DCMAKE_EXE_LINKER_FLAGS={ldflags}")
self.cmake_flag_args.append(f"-DCMAKE_MODULE_LINKER_FLAGS={ldflags}")
self.cmake_flag_args.append(f"-DCMAKE_SHARED_LINKER_FLAGS={ldflags}")
for type in ["EXE", "MODULE", "SHARED", "STATIC"]:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags["ldlibs"]:
@@ -274,6 +274,7 @@ def std_args(pkg, generator=None):
generator,
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
define("CMAKE_BUILD_TYPE", build_type),
define("BUILD_TESTING", pkg.run_tests),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
@@ -450,6 +451,7 @@ def cmake_args(self):
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
* BUILD_TESTING
which will be set automatically.
"""

View File

@@ -154,7 +154,7 @@ def cuda_flags(arch_list):
conflicts("%pgi@:15.3,15.5:", when="+cuda ^cuda@7.5 target=x86_64:")
conflicts("%pgi@:16.2,16.0:16.3", when="+cuda ^cuda@8 target=x86_64:")
conflicts("%pgi@:15,18:", when="+cuda ^cuda@9.0:9.1 target=x86_64:")
conflicts("%pgi@:16,19:", when="+cuda ^cuda@9.2.88:10.0 target=x86_64:")
conflicts("%pgi@:16,19:", when="+cuda ^cuda@9.2.88:10 target=x86_64:")
conflicts("%pgi@:17,20:", when="+cuda ^cuda@10.1.105:10.2.89 target=x86_64:")
conflicts("%pgi@:17,21:", when="+cuda ^cuda@11.0.2:11.1.0 target=x86_64:")
conflicts("%clang@:3.4", when="+cuda ^cuda@:7.5 target=x86_64:")

View File

@@ -9,8 +9,7 @@
import spack.builder
import spack.package_base
from spack.directives import build_system, conflicts, depends_on
from spack.multimethod import when
from spack.directives import build_system, conflicts
from ._checks import (
BaseBuilder,
@@ -30,10 +29,7 @@ class MakefilePackage(spack.package_base.PackageBase):
legacy_buildsystem = "makefile"
build_system("makefile")
with when("build_system=makefile"):
conflicts("platform=windows")
depends_on("gmake", type="build")
conflicts("platform=windows", when="build_system=makefile")
@spack.builder.builder("makefile")

View File

@@ -10,7 +10,7 @@
import spack.builder
import spack.package_base
from spack.directives import build_system, conflicts, depends_on, variant
from spack.directives import build_system, depends_on, variant
from spack.multimethod import when
from ._checks import BaseBuilder, execute_build_time_tests
@@ -47,13 +47,6 @@ class MesonPackage(spack.package_base.PackageBase):
variant("strip", default=False, description="Strip targets on install")
depends_on("meson", type="build")
depends_on("ninja", type="build")
# Python detection in meson requires distutils to be importable, but distutils no longer
# exists in Python 3.12. In Spack, we can't use setuptools as distutils replacement,
# because the distutils-precedence.pth startup file that setuptools ships with is not run
# when setuptools is in PYTHONPATH; it has to be in system site-packages. In a future meson
# release, the distutils requirement will be dropped, so this conflict can be relaxed.
# We have patches to make it work with meson 1.1 and above.
conflicts("^python@3.12:", when="^meson@:1.0")
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified

View File

@@ -95,7 +95,7 @@ def makefile_root(self):
return self.stage.source_path
@property
def makefile_name(self):
def nmakefile_name(self):
"""Name of the current makefile. This is currently an empty value.
If a project defines this value, it will be used with the /f argument
to provide nmake an explicit makefile. This is usefule in scenarios where
@@ -126,8 +126,8 @@ def build(self, pkg, spec, prefix):
"""Run "nmake" on the build targets specified by the builder."""
opts = self.std_nmake_args
opts += self.nmake_args()
if self.makefile_name:
opts.append("/F{}".format(self.makefile_name))
if self.nmakefile_name:
opts.append("/f {}".format(self.nmakefile_name))
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).nmake(
*opts, *self.build_targets, ignore_quotes=self.ignore_quotes
@@ -139,8 +139,8 @@ def install(self, pkg, spec, prefix):
opts = self.std_nmake_args
opts += self.nmake_args()
opts += self.nmake_install_args()
if self.makefile_name:
opts.append("/F{}".format(self.makefile_name))
if self.nmakefile_name:
opts.append("/f {}".format(self.nmakefile_name))
opts.append(self.define("PREFIX", prefix))
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).nmake(

View File

@@ -61,11 +61,6 @@ def component_prefix(self):
"""Path to component <prefix>/<component>/<version>."""
return self.prefix.join(join_path(self.component_dir, self.spec.version))
@property
def env_script_args(self):
"""Additional arguments to pass to vars.sh script."""
return ()
def install(self, spec, prefix):
self.install_component(basename(self.url_for_version(spec.version)))
@@ -129,7 +124,7 @@ def setup_run_environment(self, env):
if "~envmods" not in self.spec:
env.extend(
EnvironmentModifications.from_sourcing_file(
join_path(self.component_prefix, "env", "vars.sh"), *self.env_script_args
join_path(self.component_prefix, "env", "vars.sh")
)
)

View File

@@ -6,7 +6,6 @@
import os
import re
import shutil
import stat
from typing import Optional
import archspec
@@ -17,7 +16,6 @@
import spack.builder
import spack.config
import spack.deptypes as dt
import spack.detection
import spack.multimethod
import spack.package_base
@@ -26,7 +24,6 @@
from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError, SpecError
from spack.install_test import test_part
from spack.util.executable import Executable
from spack.version import Version
from ._checks import BaseBuilder, execute_install_time_tests
@@ -229,48 +226,7 @@ def update_external_dependencies(self, extendee_spec=None):
python.external_path = self.spec.external_path
python._mark_concrete()
self.spec.add_dependency_edge(python, depflag=dt.BUILD | dt.LINK | dt.RUN, virtuals=())
def get_external_python_for_prefix(self):
"""
For an external package that extends python, find the most likely spec for the python
it depends on.
First search: an "installed" external that shares a prefix with this package
Second search: a configured external that shares a prefix with this package
Third search: search this prefix for a python package
Returns:
spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec
"""
python_externals_installed = [
s for s in spack.store.STORE.db.query("python") if s.prefix == self.spec.external_path
]
if python_externals_installed:
return python_externals_installed[0]
python_external_config = spack.config.get("packages:python:externals", [])
python_externals_configured = [
spack.spec.parse_with_version_concrete(item["spec"])
for item in python_external_config
if item["prefix"] == self.spec.external_path
]
if python_externals_configured:
return python_externals_configured[0]
python_externals_detection = spack.detection.by_path(
["python"], path_hints=[self.spec.external_path]
)
python_externals_detected = [
d.spec
for d in python_externals_detection.get("python", [])
if d.prefix == self.spec.external_path
]
if python_externals_detected:
return python_externals_detected[0]
raise StopIteration("No external python could be detected for %s to depend on" % self.spec)
self.spec.add_dependency_edge(python, deptypes=("build", "link", "run"), virtuals=())
class PythonPackage(PythonExtension):
@@ -317,16 +273,54 @@ def list_url(cls):
name = cls.pypi.split("/")[0]
return "https://pypi.org/simple/" + name + "/"
def get_external_python_for_prefix(self):
"""
For an external package that extends python, find the most likely spec for the python
it depends on.
First search: an "installed" external that shares a prefix with this package
Second search: a configured external that shares a prefix with this package
Third search: search this prefix for a python package
Returns:
spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec
"""
python_externals_installed = [
s for s in spack.store.STORE.db.query("python") if s.prefix == self.spec.external_path
]
if python_externals_installed:
return python_externals_installed[0]
python_external_config = spack.config.get("packages:python:externals", [])
python_externals_configured = [
spack.spec.parse_with_version_concrete(item["spec"])
for item in python_external_config
if item["prefix"] == self.spec.external_path
]
if python_externals_configured:
return python_externals_configured[0]
python_externals_detection = spack.detection.by_executable(
[spack.repo.PATH.get_pkg_class("python")], path_hints=[self.spec.external_path]
)
python_externals_detected = [
d.spec
for d in python_externals_detection.get("python", [])
if d.prefix == self.spec.external_path
]
if python_externals_detected:
return python_externals_detected[0]
raise StopIteration("No external python could be detected for %s to depend on" % self.spec)
@property
def headers(self):
"""Discover header files in platlib."""
# Remove py- prefix in package name
name = self.spec.name[3:]
# Headers may be in either location
include = self.prefix.join(self.spec["python"].package.include).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
include = self.prefix.join(self.spec["python"].package.include)
platlib = self.prefix.join(self.spec["python"].package.platlib)
headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)
if headers:
@@ -340,64 +334,18 @@ def libs(self):
"""Discover libraries in platlib."""
# Remove py- prefix in package name
name = self.spec.name[3:]
library = "lib" + self.spec.name[3:].replace("-", "?")
root = self.prefix.join(self.spec["python"].package.platlib)
root = self.prefix.join(self.spec["python"].package.platlib).join(name)
libs = fs.find_all_libraries(root, recursive=True)
if libs:
return libs
for shared in [True, False]:
libs = fs.find_libraries(library, root, shared=shared, recursive=True)
if libs:
return libs
msg = "Unable to recursively locate {} libraries in {}"
raise NoLibrariesError(msg.format(self.spec.name, root))
def fixup_shebangs(path: str, old_interpreter: bytes, new_interpreter: bytes):
# Recurse into the install prefix and fixup shebangs
exe = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
dirs = [path]
hardlinks = set()
while dirs:
with os.scandir(dirs.pop()) as entries:
for entry in entries:
if entry.is_dir(follow_symlinks=False):
dirs.append(entry.path)
continue
# Only consider files, not symlinks
if not entry.is_file(follow_symlinks=False):
continue
lstat = entry.stat(follow_symlinks=False)
# Skip over files that are not executable
if not (lstat.st_mode & exe):
continue
# Don't modify hardlinks more than once
if lstat.st_nlink > 1:
key = (lstat.st_ino, lstat.st_dev)
if key in hardlinks:
continue
hardlinks.add(key)
# Finally replace shebangs if any.
with open(entry.path, "rb+") as f:
contents = f.read(2)
if contents != b"#!":
continue
contents += f.read()
if old_interpreter not in contents:
continue
f.seek(0)
f.write(contents.replace(old_interpreter, new_interpreter))
f.truncate()
@spack.builder.builder("python_pip")
class PythonPipBuilder(BaseBuilder):
phases = ("install",)
@@ -494,36 +442,8 @@ def global_options(self, spec, prefix):
"""
return []
@property
def _build_venv_path(self):
"""Return the path to the virtual environment used for building when
python is external."""
return os.path.join(self.spec.package.stage.path, "build_env")
@property
def _build_venv_python(self) -> Executable:
"""Return the Python executable in the build virtual environment when
python is external."""
return Executable(os.path.join(self._build_venv_path, "bin", "python"))
def install(self, pkg, spec, prefix):
"""Install everything from build directory."""
python: Executable = spec["python"].command
# Since we invoke pip with --no-build-isolation, we have to make sure that pip cannot
# execute hooks from user and system site-packages.
if spec["python"].external:
# There are no environment variables to disable the system site-packages, so we use a
# virtual environment instead. The downside of this approach is that pip produces
# incorrect shebangs that refer to the virtual environment, which we have to fix up.
python("-m", "venv", "--without-pip", self._build_venv_path)
pip = self._build_venv_python
else:
# For a Spack managed Python, system site-packages is empty/unused by design, so it
# suffices to disable user site-packages, for which there is an environment variable.
pip = python
pip.add_default_env("PYTHONNOUSERSITE", "1")
pip.add_default_arg("-m")
pip.add_default_arg("pip")
args = PythonPipBuilder.std_args(pkg) + ["--prefix=" + prefix]
@@ -547,31 +467,8 @@ def install(self, pkg, spec, prefix):
else:
args.append(".")
pip = inspect.getmodule(pkg).pip
with fs.working_dir(self.build_directory):
pip(*args)
@spack.builder.run_after("install")
def fixup_shebangs_pointing_to_build(self):
"""When installing a package using an external python, we use a temporary virtual
environment which improves build isolation. The downside is that pip produces shebangs
that point to the temporary virtual environment. This method fixes them up to point to the
underlying Python."""
# No need to fixup shebangs if no build venv was used. (this post install function also
# runs when install was overridden in another package, so check existence of the venv path)
if not os.path.exists(self._build_venv_path):
return
# Use sys.executable, since that's what pip uses.
interpreter = (
lambda python: python("-c", "import sys; print(sys.executable)", output=str)
.strip()
.encode("utf-8")
)
fixup_shebangs(
path=self.spec.prefix,
old_interpreter=interpreter(self._build_venv_python),
new_interpreter=interpreter(self.spec["python"].command),
)
spack.builder.run_after("install")(execute_install_time_tests)

View File

@@ -10,10 +10,9 @@
import llnl.util.tty as tty
import spack.builder
from spack.build_environment import SPACK_NO_PARALLEL_MAKE
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs
from spack.directives import build_system, extends, maintainers
from spack.package_base import PackageBase
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError
@@ -64,7 +63,7 @@ class RacketBuilder(spack.builder.Builder):
@property
def subdirectory(self):
if self.pkg.racket_name:
if self.racket_name:
return "pkgs/{0}".format(self.pkg.racket_name)
return None
@@ -93,7 +92,7 @@ def install(self, pkg, spec, prefix):
"--copy",
"-i",
"-j",
str(determine_number_of_jobs(parallel=parallel)),
str(determine_number_of_jobs(parallel)),
"--",
os.getcwd(),
]

View File

@@ -49,11 +49,7 @@
TEMP_STORAGE_MIRROR_NAME = "ci_temporary_mirror"
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
# TODO: Remove this in Spack 0.23
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
JOB_NAME_FORMAT = (
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}"
)
spack_gpg = spack.main.SpackCommand("gpg")
spack_compiler = spack.main.SpackCommand("compiler")
@@ -73,23 +69,48 @@ def __exit__(self, exc_type, exc_value, exc_traceback):
return False
def get_job_name(spec: spack.spec.Spec, build_group: str = ""):
"""Given a spec and possibly a build group, return the job name. If the
resulting name is longer than 255 characters, it will be truncated.
def get_job_name(spec, osarch, build_group):
"""Given the necessary parts, format the gitlab job name
Arguments:
spec (spack.spec.Spec): Spec job will build
osarch: Architecture TODO: (this is a spack.spec.ArchSpec,
but sphinx doesn't recognize the type and fails).
build_group (str): Name of build group this job belongs to (a CDash
notion)
Returns: The job name
"""
job_name = spec.format(JOB_NAME_FORMAT)
item_idx = 0
format_str = ""
format_args = []
format_str += "{{{0}}}".format(item_idx)
format_args.append(spec.name)
item_idx += 1
format_str += "/{{{0}}}".format(item_idx)
format_args.append(spec.dag_hash(7))
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(spec.version)
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(spec.compiler)
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(osarch)
item_idx += 1
if build_group:
job_name = "{0} {1}".format(job_name, build_group)
format_str += " {{{0}}}".format(item_idx)
format_args.append(build_group)
item_idx += 1
return job_name[:255]
return format_str.format(*format_args)
def _remove_reserved_tags(tags):
@@ -287,7 +308,7 @@ def append_dep(s, d):
dependencies.append({"spec": s, "depends": d})
for spec in spec_list:
for s in spec.traverse(deptype="all"):
for s in spec.traverse(deptype=all):
if s.external:
tty.msg("Will not stage external pkg: {0}".format(s))
continue
@@ -295,7 +316,7 @@ def append_dep(s, d):
skey = _spec_deps_key(s)
spec_labels[skey] = s
for d in s.dependencies(deptype="all"):
for d in s.dependencies(deptype=all):
dkey = _spec_deps_key(d)
if d.external:
tty.msg("Will not stage external dep: {0}".format(d))
@@ -316,7 +337,7 @@ def _spec_matches(spec, match_string):
def _format_job_needs(
dep_jobs, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
dep_jobs, osname, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
):
needs_list = []
for dep_job in dep_jobs:
@@ -326,7 +347,7 @@ def _format_job_needs(
if not prune_dag or rebuild:
needs_list.append(
{
"job": get_job_name(dep_job, build_group),
"job": get_job_name(dep_job, dep_job.architecture, build_group),
"artifacts": enable_artifacts_buildcache,
}
)
@@ -679,7 +700,7 @@ def generate_gitlab_ci_yaml(
remote_mirror_override (str): Typically only needed when one spack.yaml
is used to populate several mirrors with binaries, based on some
criteria. Spack protected pipelines populate different mirrors based
on branch name, facilitated by this option. DEPRECATED
on branch name, facilitated by this option.
"""
with spack.concretize.disable_compiler_existence_check():
with env.write_transaction():
@@ -776,39 +797,17 @@ def generate_gitlab_ci_yaml(
"instead.",
)
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
deprecated_mirror_config = False
buildcache_destination = None
if "buildcache-destination" in pipeline_mirrors:
if remote_mirror_override:
tty.die(
"Using the deprecated --buildcache-destination cli option and "
"having a mirror named 'buildcache-destination' at the same time "
"is not allowed"
)
buildcache_destination = pipeline_mirrors["buildcache-destination"]
else:
deprecated_mirror_config = True
# TODO: This will be an error in Spack 0.23
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
tty.die("spack ci generate requires an env containing a mirror")
# TODO: Remove this block in spack 0.23
remote_mirror_url = None
if deprecated_mirror_config:
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
tty.die("spack ci generate requires an env containing a mirror")
ci_mirrors = yaml_root["mirrors"]
mirror_urls = [url for url in ci_mirrors.values()]
remote_mirror_url = mirror_urls[0]
ci_mirrors = yaml_root["mirrors"]
mirror_urls = [url for url in ci_mirrors.values()]
remote_mirror_url = mirror_urls[0]
spack_buildcache_copy = os.environ.get("SPACK_COPY_BUILDCACHE", None)
if spack_buildcache_copy:
buildcache_copies = {}
buildcache_copy_src_prefix = (
buildcache_destination.fetch_url
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
buildcache_copy_src_prefix = remote_mirror_override or remote_mirror_url
buildcache_copy_dest_prefix = spack_buildcache_copy
# Check for a list of "known broken" specs that we should not bother
@@ -820,7 +819,6 @@ def generate_gitlab_ci_yaml(
enable_artifacts_buildcache = False
if "enable-artifacts-buildcache" in ci_config:
tty.warn("Support for enable-artifacts-buildcache will be removed in Spack 0.23")
enable_artifacts_buildcache = ci_config["enable-artifacts-buildcache"]
rebuild_index_enabled = True
@@ -829,15 +827,13 @@ def generate_gitlab_ci_yaml(
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
tty.warn("Support for temporary-storage-url-prefix will be removed in Spack 0.23")
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
# generate.
# TODO: Remove this block in Spack 0.23
mirrors_to_check = None
if deprecated_mirror_config and remote_mirror_override:
if remote_mirror_override:
if spack_pipeline_type == "spack_protected_branch":
# Overriding the main mirror in this case might result
# in skipping jobs on a release pipeline because specs are
@@ -857,9 +853,8 @@ def generate_gitlab_ci_yaml(
cfg.default_modify_scope(),
)
# TODO: Remove this block in Spack 0.23
shared_pr_mirror = None
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
if spack_pipeline_type == "spack_pull_request":
stack_name = os.environ.get("SPACK_CI_STACK_NAME", "")
shared_pr_mirror = url_util.join(SHARED_PR_MIRROR_URL, stack_name)
spack.mirror.add(
@@ -911,7 +906,6 @@ def generate_gitlab_ci_yaml(
job_log_dir = os.path.join(pipeline_artifacts_dir, "logs")
job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction")
job_test_dir = os.path.join(pipeline_artifacts_dir, "tests")
# TODO: Remove this line in Spack 0.23
local_mirror_dir = os.path.join(pipeline_artifacts_dir, "mirror")
user_artifacts_dir = os.path.join(pipeline_artifacts_dir, "user_data")
@@ -926,11 +920,11 @@ def generate_gitlab_ci_yaml(
rel_job_log_dir = os.path.relpath(job_log_dir, ci_project_dir)
rel_job_repro_dir = os.path.relpath(job_repro_dir, ci_project_dir)
rel_job_test_dir = os.path.relpath(job_test_dir, ci_project_dir)
# TODO: Remove this line in Spack 0.23
rel_local_mirror_dir = os.path.join(local_mirror_dir, ci_project_dir)
rel_user_artifacts_dir = os.path.relpath(user_artifacts_dir, ci_project_dir)
# Speed up staging by first fetching binary indices from all mirrors
# (including the override mirror we may have just added above).
try:
bindist.binary_index.update()
except bindist.FetchCacheError as e:
@@ -1029,23 +1023,19 @@ def main_script_replacements(cmd):
if "after_script" in job_object:
job_object["after_script"] = _unpack_script(job_object["after_script"])
job_name = get_job_name(release_spec, build_group)
osname = str(release_spec.architecture)
job_name = get_job_name(release_spec, osname, build_group)
job_vars = job_object.setdefault("variables", {})
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
job_object["needs"] = []
if spec_label in dependencies:
if enable_artifacts_buildcache:
# Get dependencies transitively, so they're all
# available in the artifacts buildcache.
dep_jobs = [d for d in release_spec.traverse(deptype="all", root=False)]
dep_jobs = [d for d in release_spec.traverse(deptype=all, root=False)]
else:
# In this case, "needs" is only used for scheduling
# purposes, so we only get the direct dependencies.
@@ -1056,6 +1046,7 @@ def main_script_replacements(cmd):
job_object["needs"].extend(
_format_job_needs(
dep_jobs,
osname,
build_group,
prune_dag,
rebuild_decisions,
@@ -1141,7 +1132,6 @@ def main_script_replacements(cmd):
},
)
# TODO: Remove this block in Spack 0.23
if enable_artifacts_buildcache:
bc_root = os.path.join(local_mirror_dir, "build_cache")
job_object["artifacts"]["paths"].extend(
@@ -1171,12 +1161,10 @@ def main_script_replacements(cmd):
_print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions)
# Clean up remote mirror override if enabled
# TODO: Remove this block in Spack 0.23
if deprecated_mirror_config:
if remote_mirror_override:
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
if remote_mirror_override:
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
tty.debug("{0} build jobs generated in {1} stages".format(job_id, stage_id))
@@ -1207,28 +1195,10 @@ def main_script_replacements(cmd):
sync_job["needs"] = [
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
]
if "variables" not in sync_job:
sync_job["variables"] = {}
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = (
buildcache_destination.fetch_url
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
if "buildcache-source" in pipeline_mirrors:
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
else:
# TODO: Remove this condition in Spack 0.23
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
output_object["copy"] = sync_job
job_id += 1
if job_id > 0:
# TODO: Remove this block in Spack 0.23
if temp_storage_url_prefix:
# There were some rebuild jobs scheduled, so we will need to
# schedule a job to clean up the temporary storage location
@@ -1262,13 +1232,6 @@ def main_script_replacements(cmd):
signing_job["when"] = "always"
signing_job["retry"] = {"max": 2, "when": ["always"]}
signing_job["interruptible"] = True
if "variables" not in signing_job:
signing_job["variables"] = {}
signing_job["variables"]["SPACK_BUILDCACHE_DESTINATION"] = (
buildcache_destination.push_url # need the s3 url for aws s3 sync
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
output_object["sign-pkgs"] = signing_job
@@ -1277,13 +1240,13 @@ def main_script_replacements(cmd):
stage_names.append("stage-rebuild-index")
final_job = spack_ci_ir["jobs"]["reindex"]["attributes"]
index_target_mirror = mirror_urls[0]
if remote_mirror_override:
index_target_mirror = remote_mirror_override
final_job["stage"] = "stage-rebuild-index"
target_mirror = remote_mirror_override or remote_mirror_url
if buildcache_destination:
target_mirror = buildcache_destination.push_url
final_job["script"] = _unpack_script(
final_job["script"],
op=lambda cmd: cmd.replace("{index_target_mirror}", target_mirror),
op=lambda cmd: cmd.replace("{index_target_mirror}", index_target_mirror),
)
final_job["when"] = "always"
@@ -1305,24 +1268,20 @@ def main_script_replacements(cmd):
"SPACK_CONCRETE_ENV_DIR": rel_concrete_env_dir,
"SPACK_VERSION": spack_version,
"SPACK_CHECKOUT_VERSION": version_to_clone,
# TODO: Remove this line in Spack 0.23
"SPACK_REMOTE_MIRROR_URL": remote_mirror_url,
"SPACK_JOB_LOG_DIR": rel_job_log_dir,
"SPACK_JOB_REPRO_DIR": rel_job_repro_dir,
"SPACK_JOB_TEST_DIR": rel_job_test_dir,
# TODO: Remove this line in Spack 0.23
"SPACK_LOCAL_MIRROR_DIR": rel_local_mirror_dir,
"SPACK_PIPELINE_TYPE": str(spack_pipeline_type),
"SPACK_CI_STACK_NAME": os.environ.get("SPACK_CI_STACK_NAME", "None"),
# TODO: Remove this line in Spack 0.23
"SPACK_CI_SHARED_PR_MIRROR_URL": shared_pr_mirror or "None",
"SPACK_REBUILD_CHECK_UP_TO_DATE": str(prune_dag),
"SPACK_REBUILD_EVERYTHING": str(rebuild_everything),
"SPACK_REQUIRE_SIGNING": os.environ.get("SPACK_REQUIRE_SIGNING", "False"),
}
# TODO: Remove this block in Spack 0.23
if deprecated_mirror_config and remote_mirror_override:
if remote_mirror_override:
(output_object["variables"]["SPACK_REMOTE_MIRROR_OVERRIDE"]) = remote_mirror_override
spack_stack_name = os.environ.get("SPACK_CI_STACK_NAME", None)
@@ -2062,23 +2021,43 @@ def process_command(name, commands, repro_dir, run=True, exit_on_failure=True):
def create_buildcache(
input_spec: spack.spec.Spec, *, destination_mirror_urls: List[str], sign_binaries: bool = False
input_spec: spack.spec.Spec,
*,
pipeline_mirror_url: Optional[str] = None,
buildcache_mirror_url: Optional[str] = None,
sign_binaries: bool = False,
) -> List[PushResult]:
"""Create the buildcache at the provided mirror(s).
Arguments:
input_spec: Installed spec to package and push
destination_mirror_urls: List of urls to push to
buildcache_mirror_url: URL for the buildcache mirror
pipeline_mirror_url: URL for the pipeline mirror
sign_binaries: Whether or not to sign buildcache entry
Returns: A list of PushResults, indicating success or failure.
"""
results = []
for mirror_url in destination_mirror_urls:
# Create buildcache in either the main remote mirror, or in the
# per-PR mirror, if this is a PR pipeline
if buildcache_mirror_url:
results.append(
PushResult(
success=push_mirror_contents(input_spec, mirror_url, sign_binaries), url=mirror_url
success=push_mirror_contents(input_spec, buildcache_mirror_url, sign_binaries),
url=buildcache_mirror_url,
)
)
# Create another copy of that buildcache in the per-pipeline
# temporary storage mirror (this is only done if either
# artifacts buildcache is enabled or a temporary storage url
# prefix is set)
if pipeline_mirror_url:
results.append(
PushResult(
success=push_mirror_contents(input_spec, pipeline_mirror_url, sign_binaries),
url=pipeline_mirror_url,
)
)

View File

@@ -11,7 +11,6 @@
from textwrap import dedent
from typing import List, Match, Tuple
import llnl.string
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
from llnl.util.lang import attr_setdefault, index_by
@@ -30,6 +29,7 @@
import spack.user_environment as uenv
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
import spack.util.string
# cmd has a submodule called "list" so preserve the python list module
python_list = list
@@ -342,9 +342,9 @@ def iter_groups(specs, indent, all_headers):
print()
header = "%s{%s} / %s{%s}" % (
spack.spec.ARCHITECTURE_COLOR,
spack.spec.architecture_color,
architecture if architecture else "no arch",
spack.spec.COMPILER_COLOR,
spack.spec.compiler_color,
f"{compiler.display_str}" if compiler else "no compiler",
)
@@ -516,7 +516,7 @@ def print_how_many_pkgs(specs, pkg_type=""):
category, e.g. if pkg_type is "installed" then the message
would be "3 installed packages"
"""
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package"))
tty.msg("%s" % spack.util.string.plural(len(specs), pkg_type + " package"))
def spack_is_git_repo():

View File

@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import llnl.util.tty.colify
import llnl.util.tty.color as cl
import spack.audit
@@ -21,15 +20,6 @@ def setup_parser(subparser):
# Audit configuration files
sp.add_parser("configs", help="audit configuration files")
# Audit package recipes
external_parser = sp.add_parser("externals", help="check external detection in packages")
external_parser.add_argument(
"--list",
action="store_true",
dest="list_externals",
help="if passed, list which packages have detection tests",
)
# Https and other linting
https_parser = sp.add_parser("packages-https", help="check https in packages")
https_parser.add_argument(
@@ -39,7 +29,7 @@ def setup_parser(subparser):
# Audit package recipes
pkg_parser = sp.add_parser("packages", help="audit package recipes")
for group in [pkg_parser, https_parser, external_parser]:
for group in [pkg_parser, https_parser]:
group.add_argument(
"name",
metavar="PKG",
@@ -72,18 +62,6 @@ def packages_https(parser, args):
_process_reports(reports)
def externals(parser, args):
if args.list_externals:
msg = "@*{The following packages have detection tests:}"
tty.msg(cl.colorize(msg))
llnl.util.tty.colify.colify(spack.audit.packages_with_detection_tests(), indent=2)
return
pkgs = args.name or spack.repo.PATH.all_package_names()
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
_process_reports(reports)
def list(parser, args):
for subcommand, check_tags in spack.audit.GROUPS.items():
print(cl.colorize("@*b{" + subcommand + "}:"))
@@ -100,7 +78,6 @@ def list(parser, args):
def audit(parser, args):
subcommands = {
"configs": configs,
"externals": externals,
"packages": packages,
"packages-https": packages_https,
"list": list,

View File

@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.cmd.common.env_utility as env_utility
from spack.context import Context
description = (
"run a command in a spec's install environment, or dump its environment to screen or file"
@@ -15,4 +14,4 @@
def build_env(parser, args):
env_utility.emulate_env_utility("build-env", Context.BUILD, args)
env_utility.emulate_env_utility("build-env", "build", args)

View File

@@ -13,7 +13,6 @@
import llnl.util.tty as tty
import llnl.util.tty.color as clr
from llnl.string import plural
from llnl.util.lang import elide_list
import spack.binary_distribution as bindist
@@ -21,7 +20,6 @@
import spack.cmd.common.arguments as arguments
import spack.config
import spack.environment as ev
import spack.error
import spack.mirror
import spack.relocate
import spack.repo
@@ -33,6 +31,7 @@
from spack.cmd import display_specs
from spack.spec import Spec, save_dependency_specfiles
from spack.stage import Stage
from spack.util.string import plural
description = "create, download and install binary packages"
section = "packaging"
@@ -79,11 +78,6 @@ def setup_parser(subparser: argparse.ArgumentParser):
"Alternatively, one can decide to build a cache for only the package or only the "
"dependencies",
)
push.add_argument(
"--fail-fast",
action="store_true",
help="stop pushing on first failure (default is best effort)",
)
arguments.add_common_arguments(push, ["specs"])
push.set_defaults(func=push_fn)
@@ -268,7 +262,7 @@ def _matching_specs(specs: List[Spec]) -> List[Spec]:
return [spack.cmd.disambiguate_spec(s, ev.active_environment(), installed=any) for s in specs]
def push_fn(args: argparse.Namespace):
def push_fn(args):
"""create a binary package and push it to a mirror"""
if args.spec_file:
tty.warn(
@@ -302,7 +296,6 @@ def push_fn(args: argparse.Namespace):
tty.info(f"Selected {len(specs)} specs to push to {url}")
skipped = []
failed = []
# tty printing
color = clr.get_color_when()
@@ -333,17 +326,11 @@ def push_fn(args: argparse.Namespace):
except bindist.NoOverwriteException:
skipped.append(format_spec(spec))
# Catch any other exception unless the fail fast option is set
except Exception as e:
if args.fail_fast or isinstance(e, (bindist.PickKeyException, bindist.NoKeyException)):
raise
failed.append((format_spec(spec), e))
if skipped:
if len(specs) == 1:
tty.info("The spec is already in the buildcache. Use --force to overwrite it.")
elif len(skipped) == len(specs):
tty.info("All specs are already in the buildcache. Use --force to overwrite them.")
tty.info("All specs are already in the buildcache. Use --force to overwite them.")
else:
tty.info(
"The following {} specs were skipped as they already exist in the buildcache:\n"
@@ -353,17 +340,6 @@ def push_fn(args: argparse.Namespace):
)
)
if failed:
if len(failed) == 1:
raise failed[0][1]
raise spack.error.SpackError(
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
"\n".join(
elide_list([f" {spec}: {e.__class__.__name__}: {e}" for spec, e in failed], 5)
),
)
def install_fn(args):
"""install from a binary package"""
@@ -414,7 +390,7 @@ def preview_fn(args):
)
def check_fn(args: argparse.Namespace):
def check_fn(args):
"""check specs against remote binary mirror(s) to see if any need to be rebuilt
this command uses the process exit code to indicate its result, specifically, if the
@@ -429,7 +405,7 @@ def check_fn(args: argparse.Namespace):
specs = spack.cmd.parse_specs(args.spec or args.spec_file)
if specs:
specs = _matching_specs(specs)
specs = _matching_specs(specs, specs)
else:
specs = spack.cmd.require_active_env("buildcache check").all_specs()
@@ -527,7 +503,7 @@ def copy_buildcache_file(src_url, dest_url, local_path=None):
temp_stage.create()
temp_stage.fetch()
web_util.push_to_url(local_path, dest_url, keep_original=True)
except spack.error.FetchError as e:
except web_util.FetchError as e:
# Expected, since we have to try all the possible extensions
tty.debug("no such file: {0}".format(src_url))
tty.debug(e)

View File

@@ -7,7 +7,6 @@
import re
import sys
import llnl.string
import llnl.util.lang
from llnl.util import tty
@@ -16,7 +15,6 @@
import spack.spec
import spack.stage
import spack.util.crypto
import spack.util.web as web_util
from spack.cmd.common import arguments
from spack.package_base import PackageBase, deprecated_version, preferred_version
from spack.util.editor import editor
@@ -68,7 +66,7 @@ def setup_parser(subparser):
modes_parser.add_argument(
"--verify", action="store_true", default=False, help="verify known package checksums"
)
arguments.add_common_arguments(subparser, ["package", "jobs"])
arguments.add_common_arguments(subparser, ["package"])
subparser.add_argument(
"versions", nargs=argparse.REMAINDER, help="versions to generate checksums for"
)
@@ -98,7 +96,7 @@ def checksum(parser, args):
# Add latest version if requested
if args.latest:
remote_versions = pkg.fetch_remote_versions(args.jobs)
remote_versions = pkg.fetch_remote_versions()
if len(remote_versions) > 0:
latest_version = sorted(remote_versions.keys(), reverse=True)[0]
versions.append(latest_version)
@@ -121,47 +119,27 @@ def checksum(parser, args):
# if we get here, it's because no valid url was provided by the package
# do expensive fallback to try to recover
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(args.jobs)
remote_versions = pkg.fetch_remote_versions()
if version in remote_versions:
url_dict[version] = remote_versions[version]
if len(versions) <= 0:
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(args.jobs)
remote_versions = pkg.fetch_remote_versions()
url_dict = remote_versions
# A spidered URL can differ from the package.py *computed* URL, pointing to different tarballs.
# For example, GitHub release pages sometimes have multiple tarballs with different shasum:
# - releases/download/1.0/<pkg>-1.0.tar.gz (uploaded tarball)
# - archive/refs/tags/1.0.tar.gz (generated tarball)
# We wanna ensure that `spack checksum` and `spack install` ultimately use the same URL, so
# here we check whether the crawled and computed URLs disagree, and if so, prioritize the
# former if that URL exists (just sending a HEAD request that is).
url_changed_for_version = set()
for version, url in url_dict.items():
possible_urls = pkg.all_urls_for_version(version)
if url not in possible_urls:
for possible_url in possible_urls:
if web_util.url_exists(possible_url):
url_dict[version] = possible_url
break
else:
url_changed_for_version.add(version)
if not url_dict:
tty.die(f"Could not find any remote versions for {pkg.name}")
elif len(url_dict) > 1 and not args.batch and sys.stdin.isatty():
filtered_url_dict = spack.stage.interactive_version_filter(
url_dict, pkg.versions, url_changes=url_changed_for_version
)
if not filtered_url_dict:
exit(0)
url_dict = filtered_url_dict
else:
tty.info(f"Found {llnl.string.plural(len(url_dict), 'version')} of {pkg.name}")
# print an empty line to create a new output section block
print()
version_hashes = spack.stage.get_checksums_for_versions(
url_dict, pkg.name, keep_stage=args.keep_stage, fetch_options=pkg.fetch_options
url_dict,
pkg.name,
keep_stage=args.keep_stage,
batch=(args.batch or len(versions) > 0 or len(url_dict) == 1),
fetch_options=pkg.fetch_options,
)
if args.verify:
@@ -261,7 +239,7 @@ def add_versions_to_package(pkg: PackageBase, version_lines: str):
parsed_version = Version(contents_version.group(1))
if parsed_version < new_versions[0][0]:
split_contents[i:i] = [new_versions.pop(0)[1], " # FIXME", "\n"]
split_contents[i:i] = [new_versions.pop(0)[1], " # FIX ME", "\n"]
num_versions_added += 1
elif parsed_version == new_versions[0][0]:

View File

@@ -19,7 +19,6 @@
import spack.hash_types as ht
import spack.mirror
import spack.util.gpg as gpg_util
import spack.util.timer as timer
import spack.util.url as url_util
import spack.util.web as web_util
@@ -191,14 +190,6 @@ def ci_generate(args):
"""
env = spack.cmd.require_active_env(cmd_name="ci generate")
if args.copy_to:
tty.warn("The flag --copy-to is deprecated and will be removed in Spack 0.23")
if args.buildcache_destination:
tty.warn(
"The flag --buildcache-destination is deprecated and will be removed in Spack 0.23"
)
output_file = args.output_file
copy_yaml_to = args.copy_to
run_optimizer = args.optimize
@@ -262,8 +253,6 @@ def ci_rebuild(args):
check a single spec against the remote mirror, and rebuild it from source if the mirror does
not contain the hash
"""
rebuild_timer = timer.Timer()
env = spack.cmd.require_active_env(cmd_name="ci rebuild")
# Make sure the environment is "gitlab-enabled", or else there's nothing
@@ -272,6 +261,12 @@ def ci_rebuild(args):
if not ci_config:
tty.die("spack ci rebuild requires an env containing ci cfg")
tty.msg(
"SPACK_BUILDCACHE_DESTINATION={0}".format(
os.environ.get("SPACK_BUILDCACHE_DESTINATION", None)
)
)
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
@@ -279,7 +274,6 @@ def ci_rebuild(args):
job_log_dir = os.environ.get("SPACK_JOB_LOG_DIR")
job_test_dir = os.environ.get("SPACK_JOB_TEST_DIR")
repro_dir = os.environ.get("SPACK_JOB_REPRO_DIR")
# TODO: Remove this in Spack 0.23
local_mirror_dir = os.environ.get("SPACK_LOCAL_MIRROR_DIR")
concrete_env_dir = os.environ.get("SPACK_CONCRETE_ENV_DIR")
ci_pipeline_id = os.environ.get("CI_PIPELINE_ID")
@@ -288,12 +282,9 @@ def ci_rebuild(args):
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
# TODO: Remove this in Spack 0.23
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
# TODO: Remove this in Spack 0.23
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
# TODO: Remove this in Spack 0.23
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
require_signing = os.environ.get("SPACK_REQUIRE_SIGNING")
@@ -350,36 +341,21 @@ def ci_rebuild(args):
full_rebuild = True if rebuild_everything and rebuild_everything.lower() == "true" else False
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
deprecated_mirror_config = False
buildcache_destination = None
if "buildcache-destination" in pipeline_mirrors:
buildcache_destination = pipeline_mirrors["buildcache-destination"]
else:
deprecated_mirror_config = True
# TODO: This will be an error in Spack 0.23
# If no override url exists, then just push binary package to the
# normal remote mirror url.
# TODO: Remove in Spack 0.23
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
if buildcache_destination:
buildcache_mirror_url = buildcache_destination.push_url
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
# artifacts from upstream to downstream jobs.
# TODO: Remove this in Spack 0.23
pipeline_mirror_url = None
# TODO: Remove this in Spack 0.23
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id)
# TODO: Remove this in Spack 0.23
enable_artifacts_mirror = False
if "enable-artifacts-buildcache" in ci_config:
enable_artifacts_mirror = ci_config["enable-artifacts-buildcache"]
@@ -475,14 +451,12 @@ def ci_rebuild(args):
# If we decided there should be a temporary storage mechanism, add that
# mirror now so it's used when we check for a hash match already
# built for this spec.
# TODO: Remove this block in Spack 0.23
if pipeline_mirror_url:
mirror = spack.mirror.Mirror(pipeline_mirror_url, name=spack_ci.TEMP_STORAGE_MIRROR_NAME)
spack.mirror.add(mirror, cfg.default_modify_scope())
pipeline_mirrors.append(pipeline_mirror_url)
# Check configured mirrors for a built spec with a matching hash
# TODO: Remove this block in Spack 0.23
mirrors_to_check = None
if remote_mirror_override:
if spack_pipeline_type == "spack_protected_branch":
@@ -500,8 +474,7 @@ def ci_rebuild(args):
)
pipeline_mirrors.append(remote_mirror_override)
# TODO: Remove this in Spack 0.23
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
if spack_pipeline_type == "spack_pull_request":
if shared_pr_mirror_url != "None":
pipeline_mirrors.append(shared_pr_mirror_url)
@@ -523,7 +496,6 @@ def ci_rebuild(args):
tty.msg("No need to rebuild {0}, found hash match at: ".format(job_spec_pkg_name))
for match in matches:
tty.msg(" {0}".format(match["mirror_url"]))
# TODO: Remove this block in Spack 0.23
if enable_artifacts_mirror:
matching_mirror = matches[0]["mirror_url"]
build_cache_dir = os.path.join(local_mirror_dir, "build_cache")
@@ -538,8 +510,7 @@ def ci_rebuild(args):
# only want to keep the mirror being used by the current pipeline as it's binary
# package destination. This ensures that the when we rebuild everything, we only
# consume binary dependencies built in this pipeline.
# TODO: Remove this in Spack 0.23
if deprecated_mirror_config and full_rebuild:
if full_rebuild:
spack_ci.remove_other_mirrors(pipeline_mirrors, cfg.default_modify_scope())
# No hash match anywhere means we need to rebuild spec
@@ -605,9 +576,7 @@ def ci_rebuild(args):
"SPACK_COLOR=always",
"SPACK_INSTALL_FLAGS={}".format(args_to_string(deps_install_args)),
"-j$(nproc)",
"install-deps/{}".format(
ev.depfile.MakefileSpec(job_spec).safe_format("{name}-{version}-{hash}")
),
"install-deps/{}".format(job_spec.format("{name}-{version}-{hash}")),
],
spack_cmd + ["install"] + root_install_args,
]
@@ -704,25 +673,21 @@ def ci_rebuild(args):
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0:
mirror_urls = [buildcache_mirror_url]
# TODO: Remove this block in Spack 0.23
if pipeline_mirror_url:
mirror_urls.append(pipeline_mirror_url)
for result in spack_ci.create_buildcache(
input_spec=job_spec,
destination_mirror_urls=mirror_urls,
sign_binaries=spack_ci.can_sign_binaries(),
):
msg = tty.msg if result.success else tty.warn
msg(
"{} {} to {}".format(
"Pushed" if result.success else "Failed to push",
job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
result.url,
if buildcache_mirror_url or pipeline_mirror_url:
for result in spack_ci.create_buildcache(
input_spec=job_spec,
buildcache_mirror_url=buildcache_mirror_url,
pipeline_mirror_url=pipeline_mirror_url,
sign_binaries=spack_ci.can_sign_binaries(),
):
msg = tty.msg if result.success else tty.warn
msg(
"{} {} to {}".format(
"Pushed" if result.success else "Failed to push",
job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
result.url,
)
)
)
# If this is a develop pipeline, check if the spec that we just built is
# on the broken-specs list. If so, remove it.
@@ -771,14 +736,6 @@ def ci_rebuild(args):
print(reproduce_msg)
rebuild_timer.stop()
try:
with open("install_timers.json", "w") as timelog:
extra_attributes = {"name": ".ci-rebuild"}
rebuild_timer.write_json(timelog, extra_attributes=extra_attributes)
except Exception as e:
tty.debug(str(e))
# Tie job success/failure to the success/failure of building the spec
return install_exit_code

View File

@@ -812,9 +812,6 @@ def bash(args: Namespace, out: IO) -> None:
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
aliases = ";".join(f"{key}:{val}" for key, val in spack.main.aliases.items())
out.write(f'SPACK_ALIASES="{aliases}"\n\n')
writer = BashCompletionWriter(parser.prog, out, args.aliases)
writer.write(parser)

View File

@@ -12,7 +12,7 @@
import spack.cmd
import spack.config
import spack.deptypes as dt
import spack.dependency as dep
import spack.environment as ev
import spack.mirror
import spack.modules
@@ -114,13 +114,16 @@ def __call__(self, parser, namespace, jobs, option_string):
class DeptypeAction(argparse.Action):
"""Creates a flag of valid dependency types from a deptype argument."""
"""Creates a tuple of valid dependency types from a deptype argument."""
def __call__(self, parser, namespace, values, option_string=None):
if not values or values == "all":
deptype = dt.ALL
else:
deptype = dt.canonicalize(values.split(","))
deptype = dep.all_deptypes
if values:
deptype = tuple(x.strip() for x in values.split(","))
if deptype == ("all",):
deptype = "all"
deptype = dep.canonical_deptype(deptype)
setattr(namespace, self.dest, deptype)
@@ -282,8 +285,9 @@ def deptype():
return Args(
"--deptype",
action=DeptypeAction,
default=dt.ALL,
help="comma-separated list of deptypes to traverse (default=%s)" % ",".join(dt.ALL_TYPES),
default=dep.all_deptypes,
help="comma-separated list of deptypes to traverse\n\ndefault=%s"
% ",".join(dep.all_deptypes),
)

View File

@@ -7,15 +7,14 @@
import llnl.util.tty as tty
import spack.build_environment as build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.deptypes as dt
import spack.error
import spack.paths
import spack.spec
import spack.store
from spack import build_environment, traverse
from spack.context import Context
from spack import traverse
from spack.util.environment import dump_environment, pickle_environment
@@ -42,14 +41,14 @@ def setup_parser(subparser):
class AreDepsInstalledVisitor:
def __init__(self, context: Context = Context.BUILD):
if context == Context.BUILD:
# TODO: run deps shouldn't be required for build env.
self.direct_deps = dt.BUILD | dt.LINK | dt.RUN
elif context == Context.TEST:
self.direct_deps = dt.BUILD | dt.TEST | dt.LINK | dt.RUN
def __init__(self, context="build"):
if context not in ("build", "test"):
raise ValueError("context can only be build or test")
if context == "build":
self.direct_deps = ("build", "link", "run")
else:
raise ValueError("context can only be Context.BUILD or Context.TEST")
self.direct_deps = ("build", "test", "link", "run")
self.has_uninstalled_deps = False
@@ -72,11 +71,11 @@ def accept(self, item):
def neighbors(self, item):
# Direct deps: follow build & test edges.
# Transitive deps: follow link / run.
depflag = self.direct_deps if item.depth == 0 else dt.LINK | dt.RUN
return item.edge.spec.edges_to_dependencies(depflag=depflag)
deptypes = self.direct_deps if item.depth == 0 else ("link", "run")
return item.edge.spec.edges_to_dependencies(deptype=deptypes)
def emulate_env_utility(cmd_name, context: Context, args):
def emulate_env_utility(cmd_name, context, args):
if not args.spec:
tty.die("spack %s requires a spec." % cmd_name)
@@ -120,7 +119,7 @@ def emulate_env_utility(cmd_name, context: Context, args):
hashes=True,
# This shows more than necessary, but we cannot dynamically change deptypes
# in Spec.tree(...).
deptypes="all" if context == Context.BUILD else ("build", "test", "link", "run"),
deptypes="all" if context == "build" else ("build", "test", "link", "run"),
),
)

View File

@@ -185,7 +185,7 @@ def compiler_list(args):
os_str = os
if target:
os_str += "-%s" % target
cname = "%s{%s} %s" % (spack.spec.COMPILER_COLOR, name, os_str)
cname = "%s{%s} %s" % (spack.spec.compiler_color, name, os_str)
tty.hline(colorize(cname), char="-")
colify(reversed(sorted(c.spec.display_str for c in compilers)))

View File

@@ -5,7 +5,6 @@
import os
import re
import sys
import urllib.parse
import llnl.util.tty as tty
@@ -63,9 +62,6 @@ class {class_name}({base_class_name}):
# notify when the package is updated.
# maintainers("github_user1", "github_user2")
# FIXME: Add the SPDX identifier of the project's license below.
license("UNKNOWN")
{versions}
{dependencies}
@@ -826,12 +822,7 @@ def get_versions(args, name):
if args.url is not None and args.template != "bundle" and valid_url:
# Find available versions
try:
url_dict = spack.url.find_versions_of_archive(args.url)
if len(url_dict) > 1 and not args.batch and sys.stdin.isatty():
url_dict_filtered = spack.stage.interactive_version_filter(url_dict)
if url_dict_filtered is None:
exit(0)
url_dict = url_dict_filtered
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
@@ -843,7 +834,11 @@ def get_versions(args, name):
url_dict = {version: args.url}
version_hashes = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser, keep_stage=args.keep_stage
url_dict,
name,
first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1),
)
versions = get_version_lines(version_hashes, url_dict)

View File

@@ -74,7 +74,7 @@ def dependencies(parser, args):
spec,
transitive=args.transitive,
expand_virtuals=args.expand_virtuals,
depflag=args.deptype,
deptype=args.deptype,
)
if spec.name in dependencies:

View File

@@ -8,9 +8,7 @@
import shutil
import sys
import tempfile
from typing import Optional
import llnl.string as string
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -30,6 +28,7 @@
import spack.schema.env
import spack.spec
import spack.tengine
import spack.util.string as string
from spack.util.environment import EnvironmentModifications
description = "manage virtual environments"
@@ -97,16 +96,22 @@ def env_activate_setup_parser(subparser):
view_options = subparser.add_mutually_exclusive_group()
view_options.add_argument(
"--with-view",
"-v",
metavar="name",
help="set runtime environment variables for specific view",
"--with-view",
action="store_const",
dest="with_view",
const=True,
default=True,
help="update PATH, etc., with associated view",
)
view_options.add_argument(
"--without-view",
"-V",
action="store_true",
help="do not set runtime environment variables for any view",
"--without-view",
action="store_const",
dest="with_view",
const=False,
default=True,
help="do not update PATH, etc., with associated view",
)
subparser.add_argument(
@@ -192,20 +197,10 @@ def env_activate(args):
# Activate new environment
active_env = ev.Environment(env_path)
# Check if runtime environment variables are requested, and if so, for what view.
view: Optional[str] = None
if args.with_view:
view = args.with_view
if not active_env.has_view(view):
tty.die(f"The environment does not have a view named '{view}'")
elif not args.without_view and active_env.has_view(ev.default_view_name):
view = ev.default_view_name
cmds += spack.environment.shell.activate_header(
env=active_env, shell=args.shell, prompt=env_prompt if args.prompt else None, view=view
env=active_env, shell=args.shell, prompt=env_prompt if args.prompt else None
)
env_mods.extend(spack.environment.shell.activate(env=active_env, view=view))
env_mods.extend(spack.environment.shell.activate(env=active_env, add_view=args.with_view))
cmds += env_mods.shell_modifications(args.shell)
sys.stdout.write(cmds)
@@ -244,13 +239,6 @@ def env_deactivate_setup_parser(subparser):
const="bat",
help="print bat commands to activate the environment",
)
shells.add_argument(
"--pwsh",
action="store_const",
dest="shell",
const="pwsh",
help="print pwsh commands to activate the environment",
)
def env_deactivate(args):

View File

@@ -5,9 +5,7 @@
import argparse
import errno
import os
import re
import sys
from typing import List, Optional
import llnl.util.tty as tty
import llnl.util.tty.colify as colify
@@ -56,7 +54,7 @@ def setup_parser(subparser):
find_parser.add_argument(
"--all", action="store_true", help="search for all packages that Spack knows about"
)
spack.cmd.common.arguments.add_common_arguments(find_parser, ["tags", "jobs"])
spack.cmd.common.arguments.add_common_arguments(find_parser, ["tags"])
find_parser.add_argument("packages", nargs=argparse.REMAINDER)
find_parser.epilog = (
'The search is by default on packages tagged with the "build-tools" or '
@@ -122,23 +120,46 @@ def external_find(args):
else:
tty.warn("Unable to read manifest, unexpected error: {0}".format(str(e)), skip_msg)
# Outside the Cray manifest, the search is done by tag for performance reasons,
# since tags are cached.
# If the user specified both --all and --tag, then --all has precedence
if args.all or args.packages:
# Each detectable package has at least the detectable tag
args.tags = ["detectable"]
elif not args.tags:
# If the user didn't specify anything, search for build tools by default
# If the user didn't specify anything, search for build tools by default
if not args.tags and not args.all and not args.packages:
args.tags = ["core-packages", "build-tools"]
candidate_packages = packages_to_search_for(
names=args.packages, tags=args.tags, exclude=args.exclude
)
detected_packages = spack.detection.by_path(
candidate_packages, path_hints=args.path, max_workers=args.jobs
)
# If the user specified both --all and --tag, then --all has precedence
if args.all and args.tags:
args.tags = []
# Construct the list of possible packages to be detected
pkg_cls_to_check = []
# Add the packages that have been required explicitly
if args.packages:
pkg_cls_to_check = [spack.repo.PATH.get_pkg_class(pkg) for pkg in args.packages]
if args.tags:
allowed = set(spack.repo.PATH.packages_with_tags(*args.tags))
pkg_cls_to_check = [x for x in pkg_cls_to_check if x.name in allowed]
if args.tags and not pkg_cls_to_check:
# If we arrived here we didn't have any explicit package passed
# as argument, which means to search all packages.
# Since tags are cached it's much faster to construct what we need
# to search directly, rather than filtering after the fact
pkg_cls_to_check = [
spack.repo.PATH.get_pkg_class(pkg_name)
for tag in args.tags
for pkg_name in spack.repo.PATH.packages_with_tags(tag)
]
pkg_cls_to_check = list(set(pkg_cls_to_check))
# If the list of packages is empty, search for every possible package
if not args.tags and not pkg_cls_to_check:
pkg_cls_to_check = list(spack.repo.PATH.all_package_classes())
# If the user specified any packages to exclude from external find, add them here
if args.exclude:
pkg_cls_to_check = [pkg for pkg in pkg_cls_to_check if pkg.name not in args.exclude]
detected_packages = spack.detection.by_executable(pkg_cls_to_check, path_hints=args.path)
detected_packages.update(spack.detection.by_library(pkg_cls_to_check, path_hints=args.path))
new_entries = spack.detection.update_configuration(
detected_packages, scope=args.scope, buildable=not args.not_buildable
@@ -152,28 +173,6 @@ def external_find(args):
tty.msg("No new external packages detected")
def packages_to_search_for(
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
):
result = []
for current_tag in tags:
result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
if names:
# Match both fully qualified and unqualified
parts = [rf"(^{x}$|[.]{x}$)" for x in names]
select_re = re.compile("|".join(parts))
result = [x for x in result if select_re.search(x)]
if exclude:
# Match both fully qualified and unqualified
parts = [rf"(^{x}$|[.]{x}$)" for x in exclude]
select_re = re.compile("|".join(parts))
result = [x for x in result if not select_re.search(x)]
return result
def external_read_cray_manifest(args):
_collect_and_consume_cray_manifest_files(
manifest_file=args.file,

View File

@@ -74,19 +74,19 @@ def graph(parser, args):
if args.static:
args.dot = True
static_graph_dot(specs, depflag=args.deptype)
static_graph_dot(specs, deptype=args.deptype)
return
if args.dot:
builder = SimpleDAG()
if args.color:
builder = DAGWithDependencyTypes()
graph_dot(specs, builder=builder, depflag=args.deptype)
graph_dot(specs, builder=builder, deptype=args.deptype)
return
# ascii is default: user doesn't need to provide it explicitly
debug = spack.config.get("config:debug")
graph_ascii(specs[0], debug=debug, depflag=args.deptype)
graph_ascii(specs[0], debug=debug, deptype=args.deptype)
for spec in specs[1:]:
print() # extra line bt/w independent graphs
graph_ascii(spec, debug=debug)

View File

@@ -11,7 +11,6 @@
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.deptypes as dt
import spack.fetch_strategy as fs
import spack.install_test
import spack.repo
@@ -65,15 +64,11 @@ def section_title(s):
def version(s):
return spack.spec.VERSION_COLOR + s + plain_format
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.ENABLED_VARIANT_COLOR + s + plain_format
def license(s):
return spack.spec.VERSION_COLOR + s + plain_format
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter:
@@ -165,7 +160,7 @@ def print_dependencies(pkg):
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(dt.flag_from_string(deptype)))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
@@ -352,22 +347,6 @@ def print_virtuals(pkg):
color.cprint(" None")
def print_licenses(pkg):
"""Output the licenses of the project."""
color.cprint("")
color.cprint(section_title("Licenses: "))
if len(pkg.licenses) == 0:
color.cprint(" None")
else:
pad = padder(pkg.licenses, 4)
for when_spec in pkg.licenses:
license_identifier = pkg.licenses[when_spec]
line = license(" {0}".format(pad(license_identifier))) + color.cescape(when_spec)
color.cprint(line)
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
@@ -397,7 +376,6 @@ def info(parser, args):
(args.all or not args.no_dependencies, print_dependencies),
(args.all or args.virtuals, print_virtuals),
(args.all or args.tests, print_tests),
(args.all or True, print_licenses),
]
for print_it, func in sections:
if print_it:

View File

@@ -240,7 +240,8 @@ def default_log_file(spec):
"""Computes the default filename for the log file and creates
the corresponding directory if not present
"""
basename = spec.format_path("test-{name}-{version}-{hash}.xml")
fmt = "test-{x.name}-{x.version}-{hash}.xml"
basename = fmt.format(x=spec, hash=spec.dag_hash())
dirname = fs.os.path.join(spack.paths.reports_path, "junit")
fs.mkdirp(dirname)
return fs.os.path.join(dirname, basename)

View File

@@ -16,7 +16,7 @@
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.deptypes as dt
import spack.dependency
import spack.repo
from spack.version import VersionList
@@ -149,8 +149,8 @@ def rows_for_ncols(elts, ncols):
def get_dependencies(pkg):
all_deps = {}
for deptype in dt.ALL_TYPES:
deps = pkg.dependencies_of_type(dt.flag_from_string(deptype))
for deptype in spack.dependency.all_deptypes:
deps = pkg.dependencies_of_type(deptype)
all_deps[deptype] = [d for d in deps]
return all_deps
@@ -275,8 +275,8 @@ def head(n, span_id, title, anchor=None):
out.write("\n")
out.write("</dd>\n")
for deptype in dt.ALL_TYPES:
deps = pkg_cls.dependencies_of_type(dt.flag_from_string(deptype))
for deptype in spack.dependency.all_deptypes:
deps = pkg_cls.dependencies_of_type(deptype)
if deps:
out.write("<dt>%s Dependencies:</dt>\n" % deptype.capitalize())
out.write("<dd>\n")

View File

@@ -5,8 +5,6 @@
import sys
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.cmd.find
@@ -54,13 +52,6 @@ def setup_parser(subparser):
const="bat",
help="print bat commands to load the package",
)
shells.add_argument(
"--pwsh",
action="store_const",
dest="shell",
const="pwsh",
help="print pwsh commands to load the package",
)
subparser.add_argument(
"--first",
@@ -110,14 +101,16 @@ def load(parser, args):
)
return 1
if args.things_to_load != "package,dependencies":
tty.warn(
"The `--only` flag in spack load is deprecated and will be removed in Spack v0.22"
)
with spack.store.STORE.db.read_transaction():
env_mod = uenv.environment_modifications_for_specs(*specs)
if "dependencies" in args.things_to_load:
include_roots = "package" in args.things_to_load
specs = [
dep for spec in specs for dep in spec.traverse(root=include_roots, order="post")
]
env_mod = spack.util.environment.EnvironmentModifications()
for spec in specs:
env_mod.extend(uenv.environment_modifications_for_spec(spec))
env_mod.prepend_path(uenv.spack_loaded_hashes_var, spec.dag_hash())
cmds = env_mod.shell_modifications(args.shell)

View File

@@ -6,11 +6,10 @@
import posixpath
import sys
from llnl.path import convert_to_posix_path
import spack.paths
import spack.util.executable
from spack.spec import Spec
from spack.util.path import convert_to_posix_path
description = "generate Windows installer"
section = "admin"

View File

@@ -443,7 +443,7 @@ def mirror_create(args):
)
# When no directory is provided, the source dir is used
path = args.directory or spack.caches.fetch_cache_location()
path = args.directory or spack.caches.FETCH_CACHE_location()
if args.all and not ev.active_environment():
create_mirror_for_all_specs(

View File

@@ -137,7 +137,7 @@ def solve(parser, args):
# these are the same options as `spack spec`
install_status_fn = spack.spec.Spec.install_status
fmt = spack.spec.DISPLAY_FORMAT
fmt = spack.spec.display_format
if args.namespaces:
fmt = "{namespace}." + fmt
@@ -176,29 +176,17 @@ def solve(parser, args):
output = sys.stdout if "asp" in show else None
setup_only = set(show) == {"asp"}
unify = spack.config.get("concretizer:unify")
allow_deprecated = spack.config.get("config:deprecated", False)
if unify != "when_possible":
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
result = solver.solve(
specs,
out=output,
timers=args.timers,
stats=args.stats,
setup_only=setup_only,
allow_deprecated=allow_deprecated,
specs, out=output, timers=args.timers, stats=args.stats, setup_only=setup_only
)
if not setup_only:
_process_result(result, show, required_format, kwargs)
else:
for idx, result in enumerate(
solver.solve_in_rounds(
specs,
out=output,
timers=args.timers,
stats=args.stats,
allow_deprecated=allow_deprecated,
)
solver.solve_in_rounds(specs, out=output, timers=args.timers, stats=args.stats)
):
if "solutions" in show:
tty.msg("ROUND {0}".format(idx))

View File

@@ -77,7 +77,7 @@ def setup_parser(subparser):
def spec(parser, args):
install_status_fn = spack.spec.Spec.install_status
fmt = spack.spec.DISPLAY_FORMAT
fmt = spack.spec.display_format
if args.namespaces:
fmt = "{namespace}." + fmt

View File

@@ -5,7 +5,6 @@
import io
import sys
import llnl.string
import llnl.util.tty as tty
import llnl.util.tty.colify as colify
@@ -25,7 +24,7 @@ def report_tags(category, tags):
if isatty:
num = len(tags)
fmt = "{0} package tag".format(category)
buffer.write("{0}:\n".format(llnl.string.plural(num, fmt)))
buffer.write("{0}:\n".format(spack.util.string.plural(num, fmt)))
if tags:
colify.colify(tags, output=buffer, tty=isatty, indent=4)

View File

@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.cmd.common.env_utility as env_utility
from spack.context import Context
description = (
"run a command in a spec's test environment, or dump its environment to screen or file"
@@ -15,4 +14,4 @@
def test_env(parser, args):
env_utility.emulate_env_utility("test-env", Context.TEST, args)
env_utility.emulate_env_utility("test-env", "test", args)

View File

@@ -51,13 +51,6 @@ def setup_parser(subparser):
const="bat",
help="print bat commands to load the package",
)
shells.add_argument(
"--pwsh",
action="store_const",
dest="shell",
const="pwsh",
help="print pwsh commands to load the package",
)
subparser.add_argument(
"-a", "--all", action="store_true", help="unload all loaded Spack packages"
@@ -88,8 +81,9 @@ def unload(parser, args):
)
return 1
env_mod = uenv.environment_modifications_for_specs(*specs).reversed()
env_mod = spack.util.environment.EnvironmentModifications()
for spec in specs:
env_mod.extend(uenv.environment_modifications_for_spec(spec).reversed())
env_mod.remove_path(uenv.spack_loaded_hashes_var, spec.dag_hash())
cmds = env_mod.shell_modifications(args.shell)

View File

@@ -12,7 +12,6 @@
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
import spack.url
import spack.util.crypto as crypto
from spack.url import (
UndetectableNameError,
@@ -27,6 +26,7 @@
substitution_offsets,
)
from spack.util.naming import simplify_name
from spack.util.web import find_versions_of_archive
description = "debugging tool for url parsing"
section = "developer"
@@ -139,7 +139,7 @@ def url_parse(args):
if args.spider:
print()
tty.msg("Spidering for versions:")
versions = spack.url.find_versions_of_archive(url)
versions = find_versions_of_archive(url)
if not versions:
print(" Found no versions for {0}".format(name))

View File

@@ -37,7 +37,10 @@ def setup_parser(subparser):
action="store_true",
help="only list remote versions newer than the latest checksummed version",
)
arguments.add_common_arguments(subparser, ["package", "jobs"])
subparser.add_argument(
"-c", "--concurrency", default=32, type=int, help="number of concurrent requests"
)
arguments.add_common_arguments(subparser, ["package"])
def versions(parser, args):
@@ -65,7 +68,7 @@ def versions(parser, args):
if args.safe:
return
fetched_versions = pkg.fetch_remote_versions(args.jobs)
fetched_versions = pkg.fetch_remote_versions(args.concurrency)
if args.new:
if sys.stdout.isatty():

View File

@@ -13,7 +13,6 @@
import tempfile
from typing import List, Optional, Sequence
import llnl.path
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import path_contains_subdirectory, paths_containing_libs
@@ -25,6 +24,7 @@
import spack.util.module_cmd
import spack.version
from spack.util.environment import filter_system_paths
from spack.util.path import system_path_filter
__all__ = ["Compiler"]
@@ -39,17 +39,10 @@ def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
version_arg (str): the argument used to extract version information
"""
compiler = spack.util.executable.Executable(compiler_path)
compiler_invocation_args = {
"output": str,
"error": str,
"ignore_errors": ignore_errors,
"timeout": 120,
"fail_on_error": True,
}
if version_arg:
output = compiler(version_arg, **compiler_invocation_args)
output = compiler(version_arg, output=str, error=str, ignore_errors=ignore_errors)
else:
output = compiler(**compiler_invocation_args)
output = compiler(output=str, error=str, ignore_errors=ignore_errors)
return output
@@ -160,7 +153,7 @@ def _parse_link_paths(string):
return implicit_link_dirs
@llnl.path.system_path_filter
@system_path_filter
def _parse_non_system_link_dirs(string: str) -> List[str]:
"""Parses link paths out of compiler debug output.
@@ -236,9 +229,6 @@ class Compiler:
# by any compiler
_all_compiler_rpath_libraries = ["libc", "libc++", "libstdc++"]
#: Platform matcher for Platform objects supported by compiler
is_supported_on_platform = lambda x: True
# Default flags used by a compiler to set an rpath
@property
def cc_rpath_arg(self):
@@ -604,6 +594,8 @@ def search_regexps(cls, language):
compiler_names = getattr(cls, "{0}_names".format(language))
prefixes = [""] + cls.prefixes
suffixes = [""]
# Windows compilers generally have an extension of some sort
# as do most files on Windows, handle that case here
if sys.platform == "win32":
ext = r"\.(?:exe|bat)"
cls_suf = [suf + ext for suf in cls.suffixes]

View File

@@ -10,7 +10,7 @@
import itertools
import multiprocessing.pool
import os
from typing import Dict, List
from typing import Dict
import archspec.cpu
@@ -298,7 +298,7 @@ def select_new_compilers(compilers, scope=None):
return compilers_not_in_config
def supported_compilers() -> List[str]:
def supported_compilers():
"""Return a set of names of compilers supported by Spack.
See available_compilers() to get a list of all the available
@@ -306,41 +306,10 @@ def supported_compilers() -> List[str]:
"""
# Hack to be able to call the compiler `apple-clang` while still
# using a valid python name for the module
return sorted(all_compiler_names())
def supported_compilers_for_host_platform() -> List[str]:
"""Return a set of compiler class objects supported by Spack
that are also supported by the current host platform
"""
host_plat = spack.platforms.real_host()
return supported_compilers_for_platform(host_plat)
def supported_compilers_for_platform(platform: spack.platforms.Platform) -> List[str]:
"""Return a set of compiler class objects supported by Spack
that are also supported by the provided platform
Args:
platform (str): string representation of platform
for which compiler compatability should be determined
"""
return [
name
for name in supported_compilers()
if class_for_compiler_name(name).is_supported_on_platform(platform)
]
def all_compiler_names() -> List[str]:
def replace_apple_clang(name):
return name if name != "apple_clang" else "apple-clang"
return [replace_apple_clang(name) for name in all_compiler_module_names()]
def all_compiler_module_names() -> List[str]:
return [name for name in llnl.util.lang.list_modules(spack.paths.compilers_path)]
return sorted(
name if name != "apple_clang" else "apple-clang"
for name in llnl.util.lang.list_modules(spack.paths.compilers_path)
)
@_auto_compiler_spec
@@ -659,7 +628,7 @@ def arguments_to_detect_version_fn(operating_system, paths):
def _default(search_paths):
command_arguments = []
files_to_be_tested = fs.files_in(*search_paths)
for compiler_name in spack.compilers.supported_compilers_for_host_platform():
for compiler_name in spack.compilers.supported_compilers():
compiler_cls = class_for_compiler_name(compiler_name)
for language in ("cc", "cxx", "f77", "fc"):
@@ -718,11 +687,9 @@ def _default(fn_args):
value = fn_args._replace(id=compiler_id._replace(version=version))
return value, None
error = f"Couldn't get version for compiler {path}".format(path)
error = "Couldn't get version for compiler {0}".format(path)
except spack.util.executable.ProcessError as e:
error = f"Couldn't get version for compiler {path}\n" + str(e)
except spack.util.executable.ProcessTimeoutError as e:
error = f"Couldn't get version for compiler {path}\n" + str(e)
error = "Couldn't get version for compiler {0}\n".format(path) + str(e)
except Exception as e:
# Catching "Exception" here is fine because it just
# means something went wrong running a candidate executable.

View File

@@ -112,7 +112,6 @@ def extract_version_from_output(cls, output):
match = re.search(r"AOCC_(\d+)[._](\d+)[._](\d+)", output)
if match:
return ".".join(match.groups())
return "unknown"
@classmethod
def fc_version(cls, fortran_compiler):

View File

@@ -99,28 +99,6 @@ def cxx17_flag(self):
else:
return "-std=c++17"
@property
def cxx20_flag(self):
if self.real_version < Version("8.0"):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++20 standard", "cxx20_flag", "< 8.0"
)
elif self.real_version < Version("11.0"):
return "-std=c++2a"
else:
return "-std=c++20"
@property
def cxx23_flag(self):
if self.real_version < Version("11.0"):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++23 standard", "cxx23_flag", "< 11.0"
)
elif self.real_version < Version("14.0"):
return "-std=c++2b"
else:
return "-std=c++23"
@property
def c99_flag(self):
if self.real_version < Version("4.5"):

View File

@@ -7,6 +7,7 @@
import re
import subprocess
import sys
from distutils.version import StrictVersion
from typing import Dict, List, Set
import spack.compiler
@@ -28,97 +29,13 @@
}
class CmdCall:
"""Compose a call to `cmd` for an ordered series of cmd commands/scripts"""
def __init__(self, *cmds):
if not cmds:
raise RuntimeError(
"""Attempting to run commands from CMD without specifying commands.
Please add commands to be run."""
)
self._cmds = cmds
def __call__(self):
out = subprocess.check_output(self.cmd_line, stderr=subprocess.STDOUT) # novermin
return out.decode("utf-16le", errors="replace") # novermin
@property
def cmd_line(self):
base_call = "cmd /u /c "
commands = " && ".join([x.command_str() for x in self._cmds])
# If multiple commands are being invoked by a single subshell
# they must be encapsulated by a double quote. Always double
# quote to be sure of proper handling
# cmd will properly resolve nested double quotes as needed
#
# `set`` writes out the active env to the subshell stdout,
# and in this context we are always trying to obtain env
# state so it should always be appended
return base_call + f'"{commands} && set"'
class VarsInvocation:
def __init__(self, script):
self._script = script
def command_str(self):
return f'"{self._script}"'
@property
def script(self):
return self._script
class VCVarsInvocation(VarsInvocation):
def __init__(self, script, arch, msvc_version):
super(VCVarsInvocation, self).__init__(script)
self._arch = arch
self._msvc_version = msvc_version
@property
def sdk_ver(self):
"""Accessor for Windows SDK version property
Note: This property may not be set by
the calling context and as such this property will
return an empty string
This property will ONLY be set if the SDK package
is a dependency somewhere in the Spack DAG of the package
for which we are constructing an MSVC compiler env.
Otherwise this property should be unset to allow the VCVARS
script to use its internal heuristics to determine appropriate
SDK version
"""
if getattr(self, "_sdk_ver", None):
return self._sdk_ver + ".0"
return ""
@sdk_ver.setter
def sdk_ver(self, val):
self._sdk_ver = val
@property
def arch(self):
return self._arch
@property
def vcvars_ver(self):
return f"-vcvars_ver={self._msvc_version}"
def command_str(self):
script = super(VCVarsInvocation, self).command_str()
return f"{script} {self.arch} {self.sdk_ver} {self.vcvars_ver}"
def get_valid_fortran_pth(comp_ver):
cl_ver = str(comp_ver)
sort_fn = lambda fc_ver: Version(fc_ver)
sort_fn = lambda fc_ver: StrictVersion(fc_ver)
sort_fc_ver = sorted(list(avail_fc_version), key=sort_fn)
for ver in sort_fc_ver:
if ver in fortran_mapping:
if Version(cl_ver) <= Version(fortran_mapping[ver]):
if StrictVersion(cl_ver) <= StrictVersion(fortran_mapping[ver]):
return fc_path[ver]
return None
@@ -153,58 +70,27 @@ class Msvc(Compiler):
#: Regex used to extract version from compiler's output
version_regex = r"([1-9][0-9]*\.[0-9]*\.[0-9]*)"
# The MSVC compiler class overrides this to prevent instances
# of erroneous matching on executable names that cannot be msvc
# compilers
suffixes = []
is_supported_on_platform = lambda x: isinstance(x, spack.platforms.Windows)
# Initialize, deferring to base class but then adding the vcvarsallfile
# file based on compiler executable path.
def __init__(self, *args, **kwargs):
# This positional argument "paths" is later parsed and process by the base class
# via the call to `super` later in this method
paths = args[3]
# This positional argument "cspec" is also parsed and handled by the base class
# constructor
cspec = args[0]
new_pth = [pth if pth else get_valid_fortran_pth(cspec.version) for pth in paths]
paths[:] = new_pth
# Initialize, deferring to base class but then adding the vcvarsallfile
# file based on compiler executable path.
new_pth = [pth if pth else get_valid_fortran_pth(args[0].version) for pth in args[3]]
args[3][:] = new_pth
super().__init__(*args, **kwargs)
# To use the MSVC compilers, VCVARS must be invoked
# VCVARS is located at a fixed location, referencable
# idiomatically by the following relative path from the
# compiler.
# Spack first finds the compilers via VSWHERE
# and stores their path, but their respective VCVARS
# file must be invoked before useage.
env_cmds = []
compiler_root = os.path.join(self.cc, "../../../../../../..")
vcvars_script_path = os.path.join(compiler_root, "Auxiliary", "Build", "vcvars64.bat")
# get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower()
arch = arch.replace("-", "_")
self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version)
env_cmds.append(self.vcvars_call)
# Below is a check for a valid fortran path
# paths has c, cxx, fc, and f77 paths in that order
# paths[2] refers to the fc path and is a generic check
# for a fortran compiler
if paths[2]:
if os.getenv("ONEAPI_ROOT"):
# If this found, it sets all the vars
oneapi_root = os.getenv("ONEAPI_ROOT")
oneapi_root_setvars = os.path.join(oneapi_root, "setvars.bat")
oneapi_version_setvars = os.path.join(
oneapi_root, "compiler", str(self.ifx_version), "env", "vars.bat"
)
# order matters here, the specific version env must be invoked first,
# otherwise it will be ignored if the root setvars sets up the oneapi
# env first
env_cmds.extend(
[VarsInvocation(oneapi_version_setvars), VarsInvocation(oneapi_root_setvars)]
)
self.msvc_compiler_environment = CmdCall(*env_cmds)
self.setvarsfile = os.path.join(os.getenv("ONEAPI_ROOT"), "setvars.bat")
else:
# To use the MSVC compilers, VCVARS must be invoked
# VCVARS is located at a fixed location, referencable
# idiomatically by the following relative path from the
# compiler.
# Spack first finds the compilers via VSWHERE
# and stores their path, but their respective VCVARS
# file must be invoked before useage.
self.setvarsfile = os.path.abspath(os.path.join(self.cc, "../../../../../../.."))
self.setvarsfile = os.path.join(self.setvarsfile, "Auxiliary", "Build", "vcvars64.bat")
@property
def msvc_version(self):
@@ -233,29 +119,15 @@ def platform_toolset_ver(self):
"""
return self.msvc_version[:2].joined.string[:3]
def _compiler_version(self, compiler):
"""Returns version object for given compiler"""
# ignore_errors below is true here due to ifx's
# non zero return code if it is not provided
# and input file
return Version(
re.search(
Msvc.version_regex,
spack.compiler.get_compiler_version_output(
compiler, version_arg=None, ignore_errors=True
),
).group(1)
)
@property
def cl_version(self):
"""Cl toolset version"""
return self._compiler_version(self.cc)
@property
def ifx_version(self):
"""Ifx compiler version associated with this version of MSVC"""
return self._compiler_version(self.fc)
return Version(
re.search(
Msvc.version_regex,
spack.compiler.get_compiler_version_output(self.cc, version_arg=None),
).group(1)
)
@property
def vs_root(self):
@@ -274,12 +146,27 @@ def setup_custom_environment(self, pkg, env):
# output, sort into dictionary, use that to make the build
# environment.
# get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower()
arch = arch.replace("-", "_")
# vcvars can target specific sdk versions, force it to pick up concretized sdk
# version, if needed by spec
if pkg.name != "win-sdk" and "win-sdk" in pkg.spec:
self.vcvars_call.sdk_ver = pkg.spec["win-sdk"].version.string
sdk_ver = (
""
if "win-sdk" not in pkg.spec or pkg.name == "win-sdk"
else pkg.spec["win-sdk"].version.string + ".0"
)
# provide vcvars with msvc version selected by concretization,
# not whatever it happens to pick up on the system (highest available version)
out = subprocess.check_output( # novermin
'cmd /u /c "{}" {} {} {} && set'.format(
self.setvarsfile, arch, sdk_ver, "-vcvars_ver=%s" % self.msvc_version
),
stderr=subprocess.STDOUT,
)
if sys.version_info[0] >= 3:
out = out.decode("utf-16le", errors="replace") # novermin
out = self.msvc_compiler_environment()
int_env = dict(
(key, value)
for key, _, value in (line.partition("=") for line in out.splitlines())

View File

@@ -155,7 +155,7 @@ def _valid_virtuals_and_externals(self, spec):
),
)
def choose_virtual_or_external(self, spec: spack.spec.Spec):
def choose_virtual_or_external(self, spec):
"""Given a list of candidate virtual and external packages, try to
find one that is most ABI compatible.
"""
@@ -744,11 +744,8 @@ def concretize_specs_together(*abstract_specs, **kwargs):
def _concretize_specs_together_new(*abstract_specs, **kwargs):
import spack.solver.asp
allow_deprecated = spack.config.get("config:deprecated", False)
solver = spack.solver.asp.Solver()
result = solver.solve(
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
)
result = solver.solve(abstract_specs, tests=kwargs.get("tests", False))
result.raise_if_unsat()
return [s.copy() for s in result.specs]

View File

@@ -857,12 +857,12 @@ def add_from_file(filename, scope=None):
def add(fullpath, scope=None):
"""Add the given configuration to the specified config scope.
Add accepts a path. If you want to add from a filename, use add_from_file"""
components = process_config_path(fullpath)
has_existing_value = True
path = ""
override = False
value = syaml.load_config(components[-1])
for idx, name in enumerate(components[:-1]):
# First handle double colons in constructing path
colon = "::" if override else ":" if path else ""
@@ -883,14 +883,14 @@ def add(fullpath, scope=None):
existing = get_valid_type(path)
# construct value from this point down
value = syaml.load_config(components[-1])
for component in reversed(components[idx + 1 : -1]):
value = {component: value}
break
if override:
path += "::"
if has_existing_value:
path, _, value = fullpath.rpartition(":")
value = syaml.load_config(value)
existing = get(path, scope=scope)
# append values to lists
@@ -1231,17 +1231,11 @@ def they_are(t):
return copy.copy(source)
#
# Process a path argument to config.set() that may contain overrides ('::' or
# trailing ':')
#
def process_config_path(path):
"""Process a path argument to config.set() that may contain overrides ('::' or
trailing ':')
Note: quoted value path components will be processed as a single value (escaping colons)
quoted path components outside of the value will be considered ill formed and will
raise.
e.g. `this:is:a:path:'value:with:colon'` will yield:
[this, is, a, path, value:with:colon]
"""
result = []
if path.startswith(":"):
raise syaml.SpackYAMLError("Illegal leading `:' in path `{0}'".format(path), "")
@@ -1269,17 +1263,6 @@ def process_config_path(path):
front.append = True
result.append(front)
quote = "['\"]"
not_quote = "[^'\"]"
if re.match(f"^{quote}", path):
m = re.match(rf"^({quote}{not_quote}+{quote})$", path)
if not m:
raise ValueError("Quotes indicate value, but there are additional path entries")
result.append(m.group(1))
break
return result

View File

@@ -272,6 +272,13 @@ def _os_pkg_manager(self):
raise spack.error.SpackError(msg)
return os_pkg_manager
@tengine.context_property
def extra_instructions(self):
Extras = namedtuple("Extra", ["build", "final"])
extras = self.container_config.get("extra_instructions", {})
build, final = extras.get("build", None), extras.get("final", None)
return Extras(build=build, final=final)
@tengine.context_property
def labels(self):
return self.container_config.get("labels", {})

View File

@@ -1,29 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module provides classes used in user and build environment"""
from enum import Enum
class Context(Enum):
"""Enum used to indicate the context in which an environment has to be setup: build,
run or test."""
BUILD = 1
RUN = 2
TEST = 3
def __str__(self):
return ("build", "run", "test")[self.value - 1]
@classmethod
def from_string(cls, s: str):
if s == "build":
return Context.BUILD
elif s == "run":
return Context.RUN
elif s == "test":
return Context.TEST
raise ValueError(f"context should be one of 'build', 'run', 'test', got {s}")

View File

@@ -4,9 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import os
import traceback
import warnings
import jsonschema
import jsonschema.exceptions
@@ -14,7 +11,6 @@
import llnl.util.tty as tty
import spack.cmd
import spack.deptypes as dt
import spack.error
import spack.hash_types as hash_types
import spack.platforms
@@ -49,29 +45,9 @@ def translated_compiler_name(manifest_compiler_name):
)
def compiler_from_entry(entry: dict, manifest_path: str):
# Note that manifest_path is only passed here to compose a
# useful warning message when paths appear to be missing.
def compiler_from_entry(entry):
compiler_name = translated_compiler_name(entry["name"])
if "prefix" in entry:
prefix = entry["prefix"]
paths = dict(
(lang, os.path.join(prefix, relpath))
for (lang, relpath) in entry["executables"].items()
)
else:
paths = entry["executables"]
# Do a check for missing paths. Note that this isn't possible for
# all compiler entries, since their "paths" might actually be
# exe names like "cc" that depend on modules being loaded. Cray
# manifest entries are always paths though.
missing_paths = []
for path in paths.values():
if not os.path.exists(path):
missing_paths.append(path)
paths = entry["executables"]
# to instantiate a compiler class we may need a concrete version:
version = "={}".format(entry["version"])
arch = entry["arch"]
@@ -80,18 +56,8 @@ def compiler_from_entry(entry: dict, manifest_path: str):
compiler_cls = spack.compilers.class_for_compiler_name(compiler_name)
spec = spack.spec.CompilerSpec(compiler_cls.name, version)
path_list = [paths.get(x, None) for x in ("cc", "cxx", "f77", "fc")]
if missing_paths:
warnings.warn(
"Manifest entry refers to nonexistent paths:\n\t"
+ "\n\t".join(missing_paths)
+ f"\nfor {str(spec)}"
+ f"\nin {manifest_path}"
+ "\nPlease report this issue"
)
return compiler_cls(spec, operating_system, target, path_list)
paths = [paths.get(x, None) for x in ("cc", "cxx", "f77", "fc")]
return compiler_cls(spec, operating_system, target, paths)
def spec_from_entry(entry):
@@ -192,13 +158,13 @@ def entries_to_specs(entries):
dependencies = entry["dependencies"]
for name, properties in dependencies.items():
dep_hash = properties["hash"]
depflag = dt.canonicalize(properties["type"])
deptypes = properties["type"]
if dep_hash in spec_dict:
if entry["hash"] not in spec_dict:
continue
parent_spec = spec_dict[entry["hash"]]
dep_spec = spec_dict[dep_hash]
parent_spec._add_dependency(dep_spec, depflag=depflag, virtuals=())
parent_spec._add_dependency(dep_spec, deptypes=deptypes, virtuals=())
for spec in spec_dict.values():
spack.spec.reconstruct_virtuals_on_edges(spec)
@@ -220,21 +186,12 @@ def read(path, apply_updates):
tty.debug("{0}: {1} specs read from manifest".format(path, str(len(specs))))
compilers = list()
if "compilers" in json_data:
compilers.extend(compiler_from_entry(x, path) for x in json_data["compilers"])
compilers.extend(compiler_from_entry(x) for x in json_data["compilers"])
tty.debug("{0}: {1} compilers read from manifest".format(path, str(len(compilers))))
# Filter out the compilers that already appear in the configuration
compilers = spack.compilers.select_new_compilers(compilers)
if apply_updates and compilers:
for compiler in compilers:
try:
spack.compilers.add_compilers_to_config([compiler], init_config=False)
except Exception:
warnings.warn(
f"Could not add compiler {str(compiler.spec)}: "
f"\n\tfrom manifest: {path}"
"\nPlease reexecute with 'spack -d' and include the stack trace"
)
tty.debug(f"Include this\n{traceback.format_exc()}")
spack.compilers.add_compilers_to_config(compilers, init_config=False)
if apply_updates:
for spec in specs.values():
spack.store.STORE.db.add(spec, directory_layout=None)

View File

@@ -27,8 +27,6 @@
import time
from typing import Any, Callable, Dict, Generator, List, NamedTuple, Set, Type, Union
import spack.deptypes as dt
try:
import uuid
@@ -91,7 +89,7 @@
#: Types of dependencies tracked by the database
#: We store by DAG hash, so we track the dependencies that the DAG hash includes.
_TRACKED_DEPENDENCIES = ht.dag_hash.depflag
_TRACKED_DEPENDENCIES = ht.dag_hash.deptype
#: Default list of fields written for each install record
DEFAULT_INSTALL_RECORD_FIELDS = (
@@ -797,7 +795,7 @@ def _assign_dependencies(self, spec_reader, hash_key, installs, data):
tty.warn(msg)
continue
spec._add_dependency(child, depflag=dt.canonicalize(dtypes), virtuals=virtuals)
spec._add_dependency(child, deptypes=dtypes, virtuals=virtuals)
def _read_from_file(self, filename):
"""Fill database from file, do not maintain old data.
@@ -1148,7 +1146,7 @@ def _add(
# Retrieve optional arguments
installation_time = installation_time or _now()
for edge in spec.edges_to_dependencies(depflag=_TRACKED_DEPENDENCIES):
for edge in spec.edges_to_dependencies(deptype=_TRACKED_DEPENDENCIES):
if edge.spec.dag_hash() in self._data:
continue
# allow missing build-only deps. This prevents excessive
@@ -1156,7 +1154,7 @@ def _add(
# is missing a build dep; there's no need to install the
# build dep's build dep first, and there's no need to warn
# about it missing.
dep_allow_missing = allow_missing or edge.depflag == dt.BUILD
dep_allow_missing = allow_missing or edge.deptypes == ("build",)
self._add(
edge.spec,
directory_layout,
@@ -1200,10 +1198,10 @@ def _add(
self._data[key] = InstallRecord(new_spec, path, installed, ref_count=0, **extra_args)
# Connect dependencies from the DB to the new copy.
for dep in spec.edges_to_dependencies(depflag=_TRACKED_DEPENDENCIES):
for dep in spec.edges_to_dependencies(deptype=_TRACKED_DEPENDENCIES):
dkey = dep.spec.dag_hash()
upstream, record = self.query_by_spec_hash(dkey)
new_spec._add_dependency(record.spec, depflag=dep.depflag, virtuals=dep.virtuals)
new_spec._add_dependency(record.spec, deptypes=dep.deptypes, virtuals=dep.virtuals)
if not upstream:
record.ref_count += 1
@@ -1373,13 +1371,7 @@ def deprecate(self, spec, deprecator):
return self._deprecate(spec, deprecator)
@_autospec
def installed_relatives(
self,
spec,
direction="children",
transitive=True,
deptype: Union[dt.DepFlag, dt.DepTypes] = dt.ALL,
):
def installed_relatives(self, spec, direction="children", transitive=True, deptype="all"):
"""Return installed specs related to this one."""
if direction not in ("parents", "children"):
raise ValueError("Invalid direction: %s" % direction)

View File

@@ -3,11 +3,64 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Data structures that represent Spack's dependency relationships."""
from typing import Dict, List
from typing import Dict, List, Optional, Set, Tuple, Union
import spack.deptypes as dt
import spack.spec
#: The types of dependency relationships that Spack understands.
all_deptypes = ("build", "link", "run", "test")
#: Default dependency type if none is specified
default_deptype = ("build", "link")
#: Type hint for the arguments accepting a dependency type
DependencyArgument = Union[str, List[str], Tuple[str, ...]]
def deptype_chars(*type_tuples: str) -> str:
"""Create a string representing deptypes for many dependencies.
The string will be some subset of 'blrt', like 'bl ', 'b t', or
' lr ' where each letter in 'blrt' stands for 'build', 'link',
'run', and 'test' (the dependency types).
For a single dependency, this just indicates that the dependency has
the indicated deptypes. For a list of dependnecies, this shows
whether ANY dpeendency in the list has the deptypes (so the deptypes
are merged).
"""
types: Set[str] = set()
for t in type_tuples:
if t:
types.update(t)
return "".join(t[0] if t in types else " " for t in all_deptypes)
def canonical_deptype(deptype: DependencyArgument) -> Tuple[str, ...]:
"""Convert deptype to a canonical sorted tuple, or raise ValueError.
Args:
deptype: string representing dependency type, or a list/tuple of such strings.
Can also be the builtin function ``all`` or the string 'all', which result in
a tuple of all dependency types known to Spack.
"""
if deptype in ("all", all):
return all_deptypes
elif isinstance(deptype, str):
if deptype not in all_deptypes:
raise ValueError("Invalid dependency type: %s" % deptype)
return (deptype,)
elif isinstance(deptype, (tuple, list, set)):
bad = [d for d in deptype if d not in all_deptypes]
if bad:
raise ValueError("Invalid dependency types: %s" % ",".join(str(t) for t in bad))
return tuple(sorted(set(deptype)))
raise ValueError("Invalid dependency type: %s" % repr(deptype))
class Dependency:
"""Class representing metadata for a dependency on a package.
@@ -40,7 +93,7 @@ def __init__(
self,
pkg: "spack.package_base.PackageBase",
spec: "spack.spec.Spec",
depflag: dt.DepFlag = dt.DEFAULT,
type: Optional[Tuple[str, ...]] = default_deptype,
):
"""Create a new Dependency.
@@ -57,7 +110,11 @@ def __init__(
# This dict maps condition specs to lists of Patch objects, just
# as the patches dict on packages does.
self.patches: Dict[spack.spec.Spec, "List[spack.patch.Patch]"] = {}
self.depflag = depflag
if type is None:
self.type = set(default_deptype)
else:
self.type = set(type)
@property
def name(self) -> str:
@@ -67,7 +124,7 @@ def name(self) -> str:
def merge(self, other: "Dependency"):
"""Merge constraints, deptypes, and patches of other into self."""
self.spec.constrain(other.spec)
self.depflag |= other.depflag
self.type |= other.type
# concatenate patch lists, or just copy them in
for cond, p in other.patches.items():
@@ -78,5 +135,5 @@ def merge(self, other: "Dependency"):
self.patches[cond] = other.patches[cond]
def __repr__(self) -> str:
types = dt.flag_to_chars(self.depflag)
types = deptype_chars(*self.type)
return f"<Dependency: {self.pkg.name} -> {self.spec} [{types}]>"

Some files were not shown because too many files have changed in this diff Show More