Compare commits

..

2 Commits

Author SHA1 Message Date
Gregory Becker
1c6bb8cfc3 add regression number 2023-06-21 11:35:57 -07:00
Gregory Becker
9244ecacf0 bugfix: environments with unify:false can concretize abstract hash without name 2023-06-21 11:34:16 -07:00
2056 changed files with 21300 additions and 41742 deletions

View File

@@ -10,8 +10,3 @@ updates:
directory: "/lib/spack/docs"
schedule:
interval: "daily"
# Requirements to run style checks
- package-ecosystem: "pip"
directory: "/.github/workflows/style"
schedule:
interval: "daily"

View File

@@ -17,13 +17,10 @@ concurrency:
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ${{ matrix.operating_system }}
strategy:
matrix:
operating_system: ["ubuntu-latest", "macos-latest"]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
@@ -44,4 +41,4 @@ jobs:
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits
flags: unittests,linux,audits

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup repo
@@ -158,7 +158,7 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -179,11 +179,11 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
@@ -204,7 +204,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup repo
@@ -214,7 +214,7 @@ jobs:
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
@@ -247,7 +247,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -283,7 +283,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- name: Setup non-root user
@@ -316,7 +316,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -333,7 +333,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh

View File

@@ -56,7 +56,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -86,19 +86,19 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # @v1
uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # @v1
uses: docker/setup-buildx-action@ecf95283f03858871ff00b787d79c419715afc34 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # @v1
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -106,13 +106,13 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # @v1
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # @v2
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -35,7 +35,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,7 +0,0 @@
black==23.1.0
clingo==5.6.2
flake8==6.1.0
isort==5.12.0
mypy==1.5.0
types-six==1.16.21.9
vermin==1.5.2

View File

@@ -47,10 +47,10 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -94,10 +94,10 @@ jobs:
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -133,7 +133,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -152,10 +152,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -165,7 +165,6 @@ jobs:
- name: Install Python packages
run: |
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pytest-xdist
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
@@ -187,10 +186,10 @@ jobs:
matrix:
python-version: ["3.10"]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages

View File

@@ -18,15 +18,15 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/style/requirements.txt
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
@@ -35,17 +35,16 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/style/requirements.txt
python3 -m pip install --upgrade pip setuptools types-six black==23.1.0 mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
@@ -69,7 +68,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # @v2
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- name: Setup repo and non-root user
run: |
git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
with:
python-version: 3.9
- name: Install Python packages
@@ -39,10 +39,10 @@ jobs:
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
with:
python-version: 3.9
- name: Install Python packages
@@ -63,10 +63,10 @@ jobs:
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
with:
python-version: 3.9
- name: Install Python packages
@@ -75,5 +75,6 @@ jobs:
- name: Build Test
run: |
spack compiler find
spack -d external find cmake ninja
spack external find cmake
spack external find ninja
spack -d install abseil-cpp

View File

@@ -1,21 +1,3 @@
# v0.20.1 (2023-07-10)
## Spack Bugfixes
- Spec removed from an environment where not actually removed if `--force` was not given (#37877)
- Speed-up module file generation (#37739)
- Hotfix for a few recipes that treat CMake as a link dependency (#35816)
- Fix re-running stand-alone test a second time, which was getting a trailing spurious failure (#37840)
- Fixed reading JSON manifest on Cray, reporting non-concrete specs (#37909)
- Fixed a few bugs when generating Dockerfiles from Spack (#37766,#37769)
- Fixed a few long-standing bugs when generating module files (#36678,#38347,#38465,#38455)
- Fixed issues with building Python extensions using an external Python (#38186)
- Fixed compiler removal from command line (#38057)
- Show external status as [e] (#33792)
- Backported `archspec` fixes (#37793)
- Improved a few error messages (#37791)
# v0.20.0 (2023-05-21)
`v0.20.0` is a major feature release.

View File

@@ -2,26 +2,24 @@
## Supported Versions
We provide security updates for `develop` and for the last two
stable (`0.x`) release series of Spack. Security updates will be
made available as patch (`0.x.1`, `0.x.2`, etc.) releases.
We provide security updates for the following releases.
For more on Spack's release structure, see
[`README.md`](https://github.com/spack/spack#releases).
| Version | Supported |
| ------- | ------------------ |
| develop | :white_check_mark: |
| 0.19.x | :white_check_mark: |
| 0.18.x | :white_check_mark: |
## Reporting a Vulnerability
You can report a vulnerability using GitHub's private reporting
feature:
To report a vulnerability or other security
issue, email maintainers@spack.io.
1. Go to [github.com/spack/spack/security](https://github.com/spack/spack/security).
2. Click "Report a vulnerability" in the upper right corner of that page.
3. Fill out the form and submit your draft security advisory.
More details are available in
[GitHub's docs](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability).
You can expect to hear back about security issues within two days.
If your security issue is accepted, we will do our best to release
a fix within a week. If fixing the issue will take longer than
this, we will discuss timeline options with you.
You can expect to hear back within two days.
If your security issue is accepted, we will do
our best to release a fix within a week. If
fixing the issue will take longer than this,
we will discuss timeline options with you.

View File

@@ -25,6 +25,8 @@ exit 1
# Line above is a shell no-op, and ends a python multi-line comment.
# The code above runs this file with our preferred python interpreter.
from __future__ import print_function
import os
import os.path
import sys

View File

@@ -14,7 +14,7 @@
::
@echo off
set spack="%SPACK_ROOT%"\bin\spack
set spack=%SPACK_ROOT%\bin\spack
::#######################################################################
:: This is a wrapper around the spack command that forwards calls to
@@ -51,43 +51,65 @@ setlocal enabledelayedexpansion
:: subcommands will never start with '-'
:: everything after the subcommand is an arg
:: we cannot allow batch "for" loop to directly process CL args
:: a number of batch reserved characters are commonly passed to
:: spack and allowing batch's "for" method to process the raw inputs
:: results in a large number of formatting issues
:: instead, treat the entire CLI as one string
:: and split by space manually
:: capture cl args in variable named cl_args
set cl_args=%*
:process_cl_args
rem Set first cl argument (denoted by %1) to be processed
set t=%1
rem shift moves all cl positional arguments left by one
rem meaning %2 is now %1, this allows us to iterate over each
rem argument
shift
rem assign next "first" cl argument to cl_args, will be null when
rem there are now further arguments to process
set cl_args=%1
if "!t:~0,1!" == "-" (
if defined _sp_subcommand (
rem We already have a subcommand, processing args now
rem tokens=1* returns the first processed token produced
rem by tokenizing the input string cl_args on spaces into
rem the named variable %%g
rem While this make look like a for loop, it only
rem executes a single time for each of the cl args
rem the actual iterative loop is performed by the
rem goto process_cl_args stanza
rem we are simply leveraging the "for" method's string
rem tokenization
for /f "tokens=1*" %%g in ("%cl_args%") do (
set t=%%~g
rem remainder of string is composed into %%h
rem these are the cl args yet to be processed
rem assign cl_args var to only the args to be processed
rem effectively discarding the current arg %%g
rem this will be nul when we have no further tokens to process
set cl_args=%%h
rem process the first space delineated cl arg
rem of this iteration
if "!t:~0,1!" == "-" (
if defined _sp_subcommand (
rem We already have a subcommand, processing args now
if not defined _sp_args (
set "_sp_args=!t!"
) else (
set "_sp_args=!_sp_args! !t!"
)
) else (
if not defined _sp_flags (
set "_sp_flags=!t!"
shift
) else (
set "_sp_flags=!_sp_flags! !t!"
shift
)
)
) else if not defined _sp_subcommand (
set "_sp_subcommand=!t!"
shift
) else (
if not defined _sp_args (
set "_sp_args=!t!"
shift
) else (
set "_sp_args=!_sp_args! !t!"
shift
)
) else (
if not defined _sp_flags (
set "_sp_flags=!t!"
) else (
set "_sp_flags=!_sp_flags! !t!"
)
)
) else if not defined _sp_subcommand (
set "_sp_subcommand=!t!"
) else (
if not defined _sp_args (
set "_sp_args=!t!"
) else (
set "_sp_args=!_sp_args! !t!"
)
)
rem if this is not nu;ll, we have more tokens to process
rem if this is not nil, we have more tokens to process
rem start above process again with remaining unprocessed cl args
if defined cl_args goto :process_cl_args
@@ -192,7 +214,7 @@ goto :end_switch
if defined _sp_args (
if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:-h=%" (
) else if NOT "%_sp_args%"=="%_sp_args: -h=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case

View File

@@ -1,146 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# #######################################################################
function Compare-CommonArgs {
$CMDArgs = $args[0]
# These aruments take precedence and call for no futher parsing of arguments
# invoke actual Spack entrypoint with that context and exit after
"--help", "-h", "--version", "-V" | ForEach-Object {
$arg_opt = $_
if(($CMDArgs) -and ([bool]($CMDArgs.Where({$_ -eq $arg_opt})))) {
return $true
}
}
return $false
}
function Read-SpackArgs {
$SpackCMD_params = @()
$SpackSubCommand = $NULL
$SpackSubCommandArgs = @()
$args_ = $args[0]
$args_ | ForEach-Object {
if (!$SpackSubCommand) {
if($_.SubString(0,1) -eq "-")
{
$SpackCMD_params += $_
}
else{
$SpackSubCommand = $_
}
}
else{
$SpackSubCommandArgs += $_
}
}
return $SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs
}
function Set-SpackEnv {
# This method is responsible
# for processing the return from $(spack <command>)
# which are returned as System.Object[]'s containing
# a list of env commands
# Invoke-Expression can only handle one command at a time
# so we iterate over the list to invoke the env modification
# expressions one at a time
foreach($envop in $args[0]){
Invoke-Expression $envop
}
}
function Invoke-SpackCD {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" cd -h
}
else {
$LOC = $(python "$Env:SPACK_ROOT/bin/spack" location $SpackSubCommandArgs)
if (($NULL -ne $LOC)){
if ( Test-Path -Path $LOC){
Set-Location $LOC
}
else{
exit 1
}
}
else {
exit 1
}
}
}
function Invoke-SpackEnv {
if (Compare-CommonArgs $SpackSubCommandArgs[0]) {
python "$Env:SPACK_ROOT/bin/spack" env -h
}
else {
$SubCommandSubCommand = $SpackSubCommandArgs[0]
$SubCommandSubCommandArgs = $SpackSubCommandArgs[1..$SpackSubCommandArgs.Count]
switch ($SubCommandSubCommand) {
"activate" {
if (Compare-CommonArgs $SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
}
elseif ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
}
elseif (!$SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env activate $SubCommandSubCommandArgs
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params env activate "--pwsh" $SubCommandSubCommandArgs)
Set-SpackEnv $SpackEnv
}
}
"deactivate" {
if ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python"$Env:SPACK_ROOT/bin/spack" env deactivate $SubCommandSubCommandArgs
}
elseif($SubCommandSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" env deactivate -h
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params env deactivate "--pwsh")
Set-SpackEnv $SpackEnv
}
}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}
}
}
function Invoke-SpackLoad {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
elseif ([bool]($SpackSubCommandArgs.Where({($_ -eq "--pwsh") -or ($_ -eq "--list")}))) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
else {
$SpackEnv = $(python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand "--pwsh" $SpackSubCommandArgs)
Set-SpackEnv $SpackEnv
}
}
$SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs = Read-SpackArgs $args
if (Compare-CommonArgs $SpackCMD_params) {
python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
exit $LASTEXITCODE
}
# Process Spack commands with special conditions
# all other commands are piped directly to Spack
switch($SpackSubCommand)
{
"cd" {Invoke-SpackCD}
"env" {Invoke-SpackEnv}
"load" {Invoke-SpackLoad}
"unload" {Invoke-SpackLoad}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}

View File

@@ -36,9 +36,3 @@ concretizer:
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: true
# Option to deal with possible duplicate nodes (i.e. different nodes from the same package) in the DAG.
duplicates:
# "none": allows a single node for any package in the DAG.
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: none

View File

@@ -216,11 +216,10 @@ config:
# manipulation by unprivileged user (e.g. AFS)
allow_sgid: true
# Whether to show status information during building and installing packages.
# This gives information about Spack's current progress as well as the current
# and total number of packages. Information is shown both in the terminal
# title and inline.
install_status: true
# Whether to set the terminal title to display status information during
# building and installing packages. This gives information about Spack's
# current progress as well as the current and total number of packages.
terminal_title: false
# Number of seconds a buildcache's index.json is cached locally before probing
# for updates, within a single Spack invocation. Defaults to 10 minutes.

View File

@@ -1,4 +1,2 @@
mirrors:
spack-public:
binary: false
url: https://mirror.spack.io
spack-public: https://mirror.spack.io

View File

@@ -49,7 +49,6 @@ packages:
pbs: [openpbs, torque]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]
qmake: [qt-base, qt]
rpc: [libtirpc]
scalapack: [netlib-scalapack, amdscalapack]
sycl: [hipsycl]
@@ -60,7 +59,6 @@ packages:
xxd: [xxd-standalone, vim]
yacc: [bison, byacc]
ziglang: [zig]
zlib-api: [zlib-ng+compat, zlib]
permissions:
read: world
write: user

View File

@@ -48,10 +48,14 @@ Here is an example where a build cache is created in a local directory named
.. code-block:: console
$ spack buildcache push ./spack-cache ninja
$ spack buildcache push --allow-root ./spack-cache ninja
==> Pushing binary packages to file:///home/spackuser/spack/spack-cache/build_cache
Note that ``ninja`` must be installed locally for this to work.
Not that ``ninja`` must be installed locally for this to work.
We're using the ``--allow-root`` flag to tell Spack that is OK when any of
the binaries we're pushing contain references to the local Spack install
directory.
Once you have a build cache, you can add it as a mirror, discussed next.
@@ -143,7 +147,7 @@ and then install from it exclusively, you would do:
$ spack mirror add E4S https://cache.e4s.io
$ spack buildcache keys --install --trust
$ spack install --use-buildcache only <package>
$ spack install --use-buildache only <package>
We use ``--install`` and ``--trust`` to say that we are installing keys to our
keyring, and trusting all downloaded keys.

View File

@@ -32,14 +32,9 @@ can't be found. You can readily check if any prerequisite for using Spack is mis
Spack will take care of bootstrapping any missing dependency marked as [B]. Dependencies marked as [-] are instead required to be found on the system.
% echo $?
1
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. The return code of this command summarizes the results, if any
dependencies are missing the return code is ``1``, otherwise ``0``. Running a command that
concretizes a spec, like:
they can be bootstrapped. Running a command that concretize a spec, like:
.. code-block:: console
@@ -49,7 +44,7 @@ concretizes a spec, like:
==> Installing "clingo-bootstrap@spack%apple-clang@12.0.0~docs~ipo+python build_type=Release arch=darwin-catalina-x86_64" from a buildcache
[ ... ]
automatically triggers the bootstrapping of clingo from pre-built binaries as expected.
triggers the bootstrapping of clingo from pre-built binaries as expected.
Users can also bootstrap all the dependencies needed by Spack in a single command, which
might be useful to setup containers or other similar environments:

View File

@@ -9,32 +9,9 @@
Bundle
------
``BundlePackage`` represents a set of packages that are expected to work
well together, such as a collection of commonly used software libraries.
The associated software is specified as dependencies.
If it makes sense, variants, conflicts, and requirements can be added to
the package. :ref:`Variants <variants>` ensure that common build options
are consistent across the packages supporting them. :ref:`Conflicts
and requirements <packaging_conflicts>` prevent attempts to build with known
bugs or limitations.
For example, if ``MyBundlePackage`` is known to only build on ``linux``,
it could use the ``require`` directive as follows:
.. code-block:: python
require("platform=linux", msg="MyBundlePackage only builds on linux")
Spack has a number of built-in bundle packages, such as:
* `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_
* `EcpProxyApps <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_
* `Libc <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/libc/package.py>`_
* `Xsdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/xsdk/package.py>`_
where ``Xsdk`` also inherits from ``CudaPackage`` and ``RocmPackage`` and
``Libc`` is a virtual bundle package for the C standard library.
``BundlePackage`` represents a set of packages that are expected to work well
together, such as a collection of commonly used software libraries. The
associated software is specified as bundle dependencies.
^^^^^^^^

View File

@@ -76,55 +76,6 @@ To build with with ``icx``, do ::
spack install patchelf%oneapi
Using oneAPI Spack environment
-------------------------------
In this example, we build lammps with ``icx`` using Spack environment for oneAPI packages created by Intel. The
compilers are installed with Spack like in example above.
Install the oneAPI compilers::
spack install intel-oneapi-compilers
Add the compilers to your ``compilers.yaml`` so Spack can use them::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
Verify that the compilers are available::
spack compiler list
Clone `spack-configs <https://github.com/spack/spack-configs>`_ repo and activate Intel oneAPI CPU environment::
git clone https://github.com/spack/spack-configs
spack env activate spack-configs/INTEL/CPU
spack concretize -f
`Intel oneAPI CPU environment <https://github.com/spack/spack-configs/blob/main/INTEL/CPU/spack.yaml>`_ contains applications tested and validated by Intel, this list is constantly extended. And currently it supports:
- `Devito <https://www.devitoproject.org/>`_
- `GROMACS <https://www.gromacs.org/>`_
- `HPCG <https://www.hpcg-benchmark.org/>`_
- `HPL <https://netlib.org/benchmark/hpl/>`_
- `LAMMPS <https://www.lammps.org/#gsc.tab=0>`_
- `OpenFOAM <https://www.openfoam.com/>`_
- `Quantum Espresso <https://www.quantum-espresso.org/>`_
- `STREAM <https://www.cs.virginia.edu/stream/>`_
- `WRF <https://github.com/wrf-model/WRF>`_
To build lammps with oneAPI compiler from this environment just run::
spack install lammps
Compiled binaries can be find using::
spack cd -i lammps
You can do the same for all other applications from this environment.
Using oneAPI MPI to Satisfy a Virtual Dependence
------------------------------------------------------

View File

@@ -32,7 +32,7 @@ By default, these phases run:
.. code-block:: console
$ sip-build --verbose --target-dir ...
$ python configure.py --bindir ... --destdir ...
$ make
$ make install
@@ -41,30 +41,30 @@ By default, these phases run:
Important files
^^^^^^^^^^^^^^^
Each SIP package comes with a custom configuration file written in Python.
For newer packages, this is called ``project.py``, while in older packages,
it may be called ``configure.py``. This script contains instructions to build
the project.
Each SIP package comes with a custom ``configure.py`` build script,
written in Python. This script contains instructions to build the project.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
``SIPPackage`` requires several dependencies. Python and SIP are needed at build-time
to run the aforementioned configure script. Python is also needed at run-time to
actually use the installed Python library. And as we are building Python bindings
for C/C++ libraries, Python is also needed as a link dependency. All of these
dependencies are automatically added via the base class.
``SIPPackage`` requires several dependencies. Python is needed to run
the ``configure.py`` build script, and to run the resulting Python
libraries. Qt is needed to provide the ``qmake`` command. SIP is also
needed to build the package. All of these dependencies are automatically
added via the base class
.. code-block:: python
extends("python", type=("build", "link", "run"))
depends_on("py-sip", type="build")
extends('python')
depends_on('qt', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to ``sip-build``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
depends_on('py-sip', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to ``configure.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Each phase comes with a ``<phase_args>`` function that can be used to pass
arguments to that particular phase. For example, if you need to pass
@@ -72,11 +72,11 @@ arguments to the configure phase, you can use:
.. code-block:: python
def configure_args(self):
return ["--no-python-dbus"]
def configure_args(self, spec, prefix):
return ['--no-python-dbus']
A list of valid options can be found by running ``sip-build --help``.
A list of valid options can be found by running ``python configure.py --help``.
^^^^^^^
Testing

View File

@@ -97,7 +97,9 @@ class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node:
del node["refspecific"]
return super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode
)
#
@@ -214,8 +216,6 @@ def setup(sphinx):
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
]

View File

@@ -292,13 +292,12 @@ It is also worth noting that:
non_bindable_shared_objects = ["libinterface.so"]
----------------------
``install_status``
``terminal_title``
----------------------
When set to ``true``, Spack will show information about its current progress
as well as the current and total package numbers. Progress is shown both
in the terminal title and inline. Setting it to ``false`` will not show any
progress information.
By setting this option to ``true``, Spack will update the terminal's title to
provide information about its current progress as well as the current and
total package numbers.
To work properly, this requires your terminal to reset its title after
Spack has finished its work, otherwise Spack's status information will

View File

@@ -916,9 +916,9 @@ function, as shown in the example below:
.. code-block:: yaml
projections:
zlib: "{name}-{version}"
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
zlib: {name}-{version}
^mpi: {name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}
all: {name}-{version}/{compiler.name}-{compiler.version}
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will

View File

@@ -1,113 +0,0 @@
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
==========================
Using External GPU Support
==========================
Many packages come with a ``+cuda`` or ``+rocm`` variant. With no added
configuration Spack will download and install the needed components.
It may be preferable to use existing system support: the following sections
help with using a system installation of GPU libraries.
-----------------------------------
Using an External ROCm Installation
-----------------------------------
Spack breaks down ROCm into many separate component packages. The following
is an example ``packages.yaml`` that organizes a consistent set of ROCm
components for use by dependent packages:
.. code-block:: yaml
packages:
all:
compiler: [rocmcc@=5.3.0]
variants: amdgpu_target=gfx90a
hip:
buildable: false
externals:
- spec: hip@5.3.0
prefix: /opt/rocm-5.3.0/hip
hsa-rocr-dev:
buildable: false
externals:
- spec: hsa-rocr-dev@5.3.0
prefix: /opt/rocm-5.3.0/
llvm-amdgpu:
buildable: false
externals:
- spec: llvm-amdgpu@5.3.0
prefix: /opt/rocm-5.3.0/llvm/
comgr:
buildable: false
externals:
- spec: comgr@5.3.0
prefix: /opt/rocm-5.3.0/
hipsparse:
buildable: false
externals:
- spec: hipsparse@5.3.0
prefix: /opt/rocm-5.3.0/
hipblas:
buildable: false
externals:
- spec: hipblas@5.3.0
prefix: /opt/rocm-5.3.0/
rocblas:
buildable: false
externals:
- spec: rocblas@5.3.0
prefix: /opt/rocm-5.3.0/
rocprim:
buildable: false
externals:
- spec: rocprim@5.3.0
prefix: /opt/rocm-5.3.0/rocprim/
This is in combination with the following compiler definition:
.. code-block:: yaml
compilers:
- compiler:
spec: rocmcc@=5.3.0
paths:
cc: /opt/rocm-5.3.0/bin/amdclang
cxx: /opt/rocm-5.3.0/bin/amdclang++
f77: null
fc: /opt/rocm-5.3.0/bin/amdflang
operating_system: rhel8
target: x86_64
This includes the following considerations:
- Each of the listed externals specifies ``buildable: false`` to force Spack
to use only the externals we defined.
- ``spack external find`` can automatically locate some of the ``hip``/``rocm``
packages, but not all of them, and furthermore not in a manner that
guarantees a complementary set if multiple ROCm installations are available.
- The ``prefix`` is the same for several components, but note that others
require listing one of the subdirectories as a prefix.
-----------------------------------
Using an External CUDA Installation
-----------------------------------
CUDA is split into fewer components and is simpler to specify:
.. code-block:: yaml
packages:
all:
variants:
- cuda_arch=70
cuda:
buildable: false
externals:
- spec: cuda@11.0.2
prefix: /opt/cuda/cuda-11.0.2/
where ``/opt/cuda/cuda-11.0.2/lib/`` contains ``libcudart.so``.

View File

@@ -77,7 +77,6 @@ or refer to the full manual below.
extensions
pipelines
signing
gpu_configuration
.. toctree::
:maxdepth: 2

View File

@@ -275,12 +275,10 @@ of the installed software. For instance, in the snippet below:
set:
BAR: 'bar'
# This anonymous spec selects any package that
# depends on mpi. The double colon at the
# depends on openmpi. The double colon at the
# end clears the set of rules that matched so far.
^mpi::
^openmpi::
environment:
prepend_path:
PATH: '{^mpi.prefix}/bin'
set:
BAR: 'baz'
# Selects any zlib package
@@ -295,9 +293,7 @@ of the installed software. For instance, in the snippet below:
- FOOBAR
you are instructing Spack to set the environment variable ``BAR=bar`` for every module,
unless the associated spec satisfies the abstract dependency ``^mpi`` in which case
``BAR=baz``, and the directory containing the respective MPI executables is prepended
to the ``PATH`` variable.
unless the associated spec satisfies ``^openmpi`` in which case ``BAR=baz``.
In addition in any spec that satisfies ``zlib`` the value ``foo`` will be
prepended to ``LD_LIBRARY_PATH`` and in any spec that satisfies ``zlib%gcc@4.8``
the variable ``FOOBAR`` will be unset.
@@ -400,30 +396,28 @@ that are already in the Lmod hierarchy.
.. note::
Tcl and Lua modules also allow for explicit conflicts between modulefiles.
Tcl modules
Tcl modules also allow for explicit conflicts between modulefiles.
.. code-block:: yaml
.. code-block:: yaml
modules:
default:
enable:
- tcl
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}'
all:
conflict:
- '{name}'
- 'intel/14.0.1'
modules:
default:
enable:
- tcl
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}'
all:
conflict:
- '{name}'
- 'intel/14.0.1'
will create module files that will conflict with ``intel/14.0.1`` and with the
base directory of the same module, effectively preventing the possibility to
load two or more versions of the same software at the same time. The tokens
that are available for use in this directive are the same understood by the
:meth:`~spack.spec.Spec.format` method.
For Lmod and Environment Modules versions prior 4.2, it is important to
express the conflict on both modulefiles conflicting with each other.
will create module files that will conflict with ``intel/14.0.1`` and with the
base directory of the same module, effectively preventing the possibility to
load two or more versions of the same software at the same time. The tokens
that are available for use in this directive are the same understood by
the :meth:`~spack.spec.Spec.format` method.
.. note::

View File

@@ -121,7 +121,7 @@ Since v0.19, Spack supports two ways of writing a package recipe. The most comm
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super().url_for_version(version)
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
@@ -155,7 +155,7 @@ builder class explicitly. Using the same example as above, this reads:
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super().url_for_version(version)
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
@@ -363,42 +363,6 @@ one of these::
If Spack finds none of these variables set, it will look for ``vim``, ``vi``, ``emacs``,
``nano``, and ``notepad``, in that order.
^^^^^^^^^^^^^^^^^
Bundling software
^^^^^^^^^^^^^^^^^
If you have a collection of software expected to work well together with
no source code of its own, you can create a :ref:`BundlePackage <bundlepackage>`.
Examples where bundle packages can be useful include defining suites of
applications (e.g, `EcpProxyApps
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_), commonly used libraries
(e.g., `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_),
and software development kits (e.g., `EcpDataVisSdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-data-vis-sdk/package.py>`_).
These versioned packages primarily consist of dependencies on the associated
software packages. They can include :ref:`variants <variants>` to ensure
common build options are consistently applied to dependencies. Known build
failures, such as not building on a platform or when certain compilers or
variants are used, can be flagged with :ref:`conflicts <packaging_conflicts>`.
Build requirements, such as only building with specific compilers, can similarly
be flagged with :ref:`requires <packaging_conflicts>`.
The ``spack create --template bundle`` command will create a skeleton
``BundlePackage`` ``package.py`` for you:
.. code-block:: console
$ spack create --template bundle --name coolsdk
Now you can fill in the basic package documentation, version(s), and software
package dependencies along with any other relevant customizations.
.. note::
Remember that bundle packages have no software of their own so there
is nothing to download.
^^^^^^^^^^^^^^^^^^^^^^^^^
Non-downloadable software
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -646,16 +610,7 @@ add a line like this in the package class:
version("8.2.0", md5="1c9f62f0778697a09d36121ead88e08e")
version("8.1.2", md5="d47dd09ed7ae6e7fd6f9a816d7f5fdf6")
.. note::
By convention, we list versions in descending order, from newest to oldest.
.. note::
:ref:`Bundle packages <bundlepackage>` do not have source code so
there is nothing to fetch. Consequently, their version directives
consist solely of the version name (e.g., ``version("202309")``).
Versions should be listed in descending order, from newest to oldest.
^^^^^^^^^^^^^
Date Versions
@@ -2288,7 +2243,7 @@ looks like this:
url = "http://www.openssl.org/source/openssl-1.0.1h.tar.gz"
version("1.0.1h", md5="8d6d684a9430d5cc98a62a5d8fbda8cf")
depends_on("zlib-api")
depends_on("zlib")
parallel = False
@@ -2723,7 +2678,7 @@ Conflicts and requirements
--------------------------
Sometimes packages have known bugs, or limitations, that would prevent them
from building e.g. against other dependencies or with certain compilers. Spack
to build e.g. against other dependencies or with certain compilers. Spack
makes it possible to express such constraints with the ``conflicts`` directive.
Adding the following to a package:
@@ -4818,17 +4773,17 @@ For example, running:
results in spack checking that the installation created the following **file**:
* ``self.prefix.bin.reframe``
* ``self.prefix/bin/reframe``
and the following **directories**:
* ``self.prefix.bin``
* ``self.prefix.config``
* ``self.prefix.docs``
* ``self.prefix.reframe``
* ``self.prefix.tutorials``
* ``self.prefix.unittests``
* ``self.prefix.cscs-checks``
* ``self.prefix/bin``
* ``self.prefix/config``
* ``self.prefix/docs``
* ``self.prefix/reframe``
* ``self.prefix/tutorials``
* ``self.prefix/unittests``
* ``self.prefix/cscs-checks``
If **any** of these paths are missing, then Spack considers the installation
to have failed.
@@ -4972,7 +4927,7 @@ installed executable. The check is implemented as follows:
@on_package_attributes(run_tests=True)
def check_list(self):
with working_dir(self.stage.source_path):
reframe = Executable(self.prefix.bin.reframe)
reframe = Executable(join_path(self.prefix, "bin", "reframe"))
reframe("-l")
.. warning::
@@ -5192,8 +5147,8 @@ embedded test parts.
for example in ["ex1", "ex2"]:
with test_part(
self,
f"test_example_{example}",
purpose=f"run installed {example}",
"test_example_{0}".format(example),
purpose="run installed {0}".format(example),
):
exe = which(join_path(self.prefix.bin, example))
exe()
@@ -5271,10 +5226,11 @@ Below illustrates using this feature to compile an example.
...
cxx = which(os.environ["CXX"])
cxx(
f"-L{self.prefix.lib}",
f"-I{self.prefix.include}",
f"{exe}.cpp",
"-o", exe
"-L{0}".format(self.prefix.lib),
"-I{0}".format(self.prefix.include),
"{0}.cpp".format(exe),
"-o",
exe
)
cxx_example = which(exe)
cxx_example()
@@ -5291,14 +5247,14 @@ Saving build-time files
We highly recommend re-using build-time test sources and pared down
input files for testing installed software. These files are easier
to keep synchronized with software capabilities since they reside
within the software's repository.
within the software's repository.
If that is not possible, you can add test-related files to the package
repository (see :ref:`adding custom files <cache_custom_files>`). It
will be important to maintain them so they work across listed or supported
versions of the package.
You can use the ``cache_extra_test_sources`` helper to copy directories
You can use the ``cache_extra_test_sources`` method to copy directories
and or files from the source build stage directory to the package's
installation directory.
@@ -5306,15 +5262,10 @@ The signature for ``cache_extra_test_sources`` is:
.. code-block:: python
def cache_extra_test_sources(pkg, srcs):
where each argument has the following meaning:
* ``pkg`` is an instance of the package for the spec under test.
* ``srcs`` is a string *or* a list of strings corresponding to the
paths of subdirectories and or files needed for stand-alone testing.
def cache_extra_test_sources(self, srcs):
where ``srcs`` is a string *or* a list of strings corresponding to the
paths of subdirectories and or files needed for stand-alone testing.
The paths must be relative to the staged source directory. Contents of
subdirectories and files are copied to a special test cache subdirectory
of the installation prefix. They are automatically copied to the appropriate
@@ -5335,18 +5286,21 @@ and using ``foo.c`` in a test method is illustrated below.
srcs = ["tests",
join_path("examples", "foo.c"),
join_path("examples", "bar.c")]
cache_extra_test_sources(self, srcs)
self.cache_extra_test_sources(srcs)
def test_foo(self):
exe = "foo"
src_dir = self.test_suite.current_test_cache_dir.examples
src_dir = join_path(
self.test_suite.current_test_cache_dir, "examples"
)
with working_dir(src_dir):
cc = which(os.environ["CC"])
cc(
f"-L{self.prefix.lib}",
f"-I{self.prefix.include}",
f"{exe}.c",
"-o", exe
"-L{0}".format(self.prefix.lib),
"-I{0}".format(self.prefix.include),
"{0}.c".format(exe),
"-o",
exe
)
foo = which(exe)
foo()
@@ -5372,9 +5326,9 @@ the files using the ``self.test_suite.current_test_cache_dir`` property.
In our example above, test methods can use the following paths to reference
the copy of each entry listed in ``srcs``, respectively:
* ``self.test_suite.current_test_cache_dir.tests``
* ``join_path(self.test_suite.current_test_cache_dir.examples, "foo.c")``
* ``join_path(self.test_suite.current_test_cache_dir.examples, "bar.c")``
* ``join_path(self.test_suite.current_test_cache_dir, "tests")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "foo.c")``
* ``join_path(self.test_suite.current_test_cache_dir, "examples", "bar.c")``
.. admonition:: Library packages should build stand-alone tests
@@ -5393,7 +5347,7 @@ the copy of each entry listed in ``srcs``, respectively:
If one or more of the copied files needs to be modified to reference
the installed software, it is recommended that those changes be made
to the cached files **once** in the ``copy_test_sources`` method and
***after** the call to ``cache_extra_test_sources()``. This will
***after** the call to ``self.cache_extra_test_sources()``. This will
reduce the amount of unnecessary work in the test method **and** avoid
problems testing in shared instances and facility deployments.
@@ -5440,7 +5394,7 @@ property as shown below.
"""build and run custom-example"""
data_dir = self.test_suite.current_test_data_dir
exe = "custom-example"
src = datadir.join(f"{exe}.cpp")
src = datadir.join("{0}.cpp".format(exe))
...
# TODO: Build custom-example using src and exe
...
@@ -5456,7 +5410,7 @@ Reading expected output from a file
The helper function ``get_escaped_text_output`` is available for packages
to retrieve and properly format the text from a file that contains the
expected output from running an executable that may contain special
expected output from running an executable that may contain special
characters.
The signature for ``get_escaped_text_output`` is:
@@ -5490,7 +5444,7 @@ added to the package's ``test`` subdirectory.
db_filename, ".dump", output=str.split, error=str.split
)
for exp in expected:
assert re.search(exp, out), f"Expected '{exp}' in output"
assert re.search(exp, out), "Expected '{0}' in output".format(exp)
If the file was instead copied from the ``tests`` subdirectory of the staged
source code, the path would be obtained as shown below.
@@ -5503,7 +5457,7 @@ source code, the path would be obtained as shown below.
db_filename = test_cache_dir.join("packages.db")
Alternatively, if the file was copied to the ``share/tests`` subdirectory
as part of the installation process, the test could access the path as
as part of the installation process, the test could access the path as
follows:
.. code-block:: python
@@ -5540,12 +5494,9 @@ Invoking the method is the equivalent of:
.. code-block:: python
errors = []
for check in expected:
if not re.search(check, actual):
errors.append(f"Expected '{check}' in output '{actual}'")
if errors:
raise RuntimeError("\n ".join(errors))
raise RuntimeError("Expected '{0}' in output '{1}'".format(check, actual))
.. _accessing-files:
@@ -5585,7 +5536,7 @@ repository, and installation.
- ``self.test_suite.test_dir_for_spec(self.spec)``
* - Current Spec's Build-time Files
- ``self.test_suite.current_test_cache_dir``
- ``join_path(self.test_suite.current_test_cache_dir.examples, "foo.c")``
- ``join_path(self.test_suite.current_test_cache_dir, "examples", "foo.c")``
* - Current Spec's Custom Test Files
- ``self.test_suite.current_test_data_dir``
- ``join_path(self.test_suite.current_test_data_dir, "hello.f90")``
@@ -5600,7 +5551,7 @@ Inheriting stand-alone tests
Stand-alone tests defined in parent (.e.g., :ref:`build-systems`) and
virtual (e.g., :ref:`virtual-dependencies`) packages are executed by
packages that inherit from or provide interface implementations for those
packages, respectively.
packages, respectively.
The table below summarizes the stand-alone tests that will be executed along
with those implemented in the package itself.
@@ -5670,7 +5621,7 @@ for ``openmpi``:
SKIPPED: test_version_oshcc: oshcc is not installed
...
==> [2023-03-10-16:04:02.215227] Completed testing
==> [2023-03-10-16:04:02.215597]
==> [2023-03-10-16:04:02.215597]
======================== SUMMARY: openmpi-4.1.4-ubmrigj ========================
Openmpi::test_bin_mpirun .. PASSED
Openmpi::test_bin_ompi_info .. PASSED
@@ -6120,7 +6071,7 @@ in the extra attributes can implement this method like this:
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
"""Check that "compilers" is in the extra attributes."""
msg = ("the extra attribute 'compilers' must be set for "
msg = ("the extra attribute "compilers" must be set for "
"the detected spec '{0}'".format(spec))
assert "compilers" in extra_attributes, msg

View File

@@ -1,13 +1,8 @@
sphinx==7.2.6
sphinx==6.2.1
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==1.3.0
sphinx_design==0.4.1
sphinx-rtd-theme==1.2.2
python-levenshtein==0.21.1
docutils==0.18.1
pygments==2.16.1
urllib3==2.0.4
pytest==7.4.2
isort==5.12.0
black==23.9.1
flake8==6.1.0
mypy==1.5.1
pygments==2.15.1
urllib3==2.0.3

View File

@@ -217,7 +217,13 @@ file would live in the ``build_cache`` directory of a binary mirror::
"binary_cache_checksum": {
"hash_algorithm": "sha256",
"hash": "4f1e46452c35a5e61bcacca205bae1bfcd60a83a399af201a29c95b7cc3e1423"
}
},
"buildinfo": {
"relative_prefix":
"linux-ubuntu18.04-haswell/gcc-7.5.0/zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow",
"relative_rpaths": false
}
}
-----BEGIN PGP SIGNATURE-----

428
lib/spack/env/cc vendored
View File

@@ -416,14 +416,30 @@ input_command="$*"
# The lists are all bell-separated to be as flexible as possible, as their
# contents may come from the command line, from ' '-separated lists,
# ':'-separated lists, etc.
include_dirs_list=""
lib_dirs_list=""
rpath_dirs_list=""
system_include_dirs_list=""
system_lib_dirs_list=""
system_rpath_dirs_list=""
isystem_system_include_dirs_list=""
isystem_include_dirs_list=""
libs_list=""
other_args_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
append system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
append rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
@@ -433,9 +449,9 @@ parse_Wl() {
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
append system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
@@ -443,9 +459,9 @@ parse_Wl() {
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
append system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
@@ -459,7 +475,7 @@ parse_Wl() {
return 1
;;
*)
append return_other_args_list "-Wl,$1"
append other_args_list "-Wl,$1"
;;
esac
fi
@@ -467,210 +483,177 @@ parse_Wl() {
done
}
categorize_arguments() {
unset IFS
while [ $# -ne 0 ]; do
return_other_args_list=""
return_isystem_was_used=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
# an RPATH to be added after the case statement.
rp=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Multiple consecutive spaces in the command line can
# result in blank arguments
if [ -z "$1" ]; then
shift
continue
fi
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
while [ $# -ne 0 ]; do
# an RPATH to be added after the case statement.
rp=""
# Multiple consecutive spaces in the command line can
# result in blank arguments
if [ -z "$1" ]; then
shift
continue
fi
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append return_other_args_list \"\$1\"
shift
continue
;;
esac
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
replaced="$after$stripped"
# it matched, remove it
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append other_args_list \"\$1\"
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
case "$1" in
-isystem*)
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
# and passed by ifx to the linker, which confuses it with a
# library. Filter it out.
# TODO: generalize filtering of args with an env var, so that
# TODO: we do not have to special case this here.
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
&& [ "$1" != "${1#-loopopt}" ]; then
shift
continue
fi
arg="${1#-l}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append return_other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
if ! parse_Wl ${1#-Wl,}; then
append return_other_args_list "$1"
fi
unset IFS
;;
-Xlinker)
shift
if [ $# -eq 0 ]; then
# -Xlinker without value: let the compiler error about it.
append return_other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append return_other_args_list -Xlinker
append return_other_args_list "$1"
;;
esac
fi
;;
"$dtags_to_strip")
;;
*)
append return_other_args_list "$1"
continue
;;
esac
shift
done
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append return_other_args_list -Xlinker
append return_other_args_list -rpath
replaced="$after$stripped"
# it matched, remove it
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append return_other_args_list -Wl,-rpath
fi
}
case "$1" in
-isystem*)
arg="${1#-isystem}"
isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append isystem_system_include_dirs_list "$arg"
else
append isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append system_include_dirs_list "$arg"
else
append include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append system_lib_dirs_list "$arg"
else
append lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
# and passed by ifx to the linker, which confuses it with a
# library. Filter it out.
# TODO: generalize filtering of args with an env var, so that
# TODO: we do not have to special case this here.
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
&& [ "$1" != "${1#-loopopt}" ]; then
shift
continue
fi
arg="${1#-l}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
if ! parse_Wl ${1#-Wl,}; then
append other_args_list "$1"
fi
unset IFS
;;
-Xlinker)
shift
if [ $# -eq 0 ]; then
# -Xlinker without value: let the compiler error about it.
append other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
if system_dir "$1"; then
append system_rpath_dirs_list "$1"
else
append rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append other_args_list -Xlinker
append other_args_list "$1"
;;
esac
fi
;;
"$dtags_to_strip")
;;
*)
append other_args_list "$1"
;;
esac
shift
done
categorize_arguments "$@"
include_dirs_list="$return_include_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append other_args_list -Xlinker
append other_args_list -rpath
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append other_args_list -Wl,-rpath
fi
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -690,14 +673,12 @@ elif [ "$SPACK_ADD_DEBUG_FLAGS" = "custom" ]; then
extend flags_list SPACK_DEBUG_FLAGS
fi
spack_flags_list=""
# Fortran flags come before CPPFLAGS
case "$mode" in
cc|ccld)
case $lang_flags in
F)
extend spack_flags_list SPACK_FFLAGS
extend flags_list SPACK_FFLAGS
;;
esac
;;
@@ -706,7 +687,7 @@ esac
# C preprocessor flags come before any C/CXX flags
case "$mode" in
cpp|as|cc|ccld)
extend spack_flags_list SPACK_CPPFLAGS
extend flags_list SPACK_CPPFLAGS
;;
esac
@@ -716,10 +697,10 @@ case "$mode" in
cc|ccld)
case $lang_flags in
C)
extend spack_flags_list SPACK_CFLAGS
extend flags_list SPACK_CFLAGS
;;
CXX)
extend spack_flags_list SPACK_CXXFLAGS
extend flags_list SPACK_CXXFLAGS
;;
esac
@@ -731,25 +712,10 @@ esac
# Linker flags
case "$mode" in
ld|ccld)
extend spack_flags_list SPACK_LDFLAGS
extend flags_list SPACK_LDFLAGS
;;
esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
@@ -775,8 +741,6 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend lib_dirs_list SPACK_LINK_DIRS
fi
libs_list=""
# add RPATHs if we're in in any linking mode
case "$mode" in
ld|ccld)
@@ -805,16 +769,12 @@ args_list="$flags_list"
# Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_include_dirs_list "-I"
extend args_list include_dirs_list "-I"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
if [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
@@ -822,15 +782,11 @@ case "$mode" in
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
# RPATHs arguments
@@ -839,25 +795,20 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
ld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;
esac
# Other arguments from the input command
extend args_list other_args_list
extend args_list spack_flags_other_args_list
# Inject SPACK_LDLIBS, if supplied
extend args_list libs_list "-l"
@@ -913,4 +864,3 @@ fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.1 (commit df43a1834460bf94516136951c4729a3100603ec)
* Version: 0.2.1 (commit 9e1117bd8a2f0581bced161f2a2e8d6294d0300b)
astunparse
----------------

View File

@@ -1,2 +1,2 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.1"
__version__ = "0.2.0"

View File

@@ -79,18 +79,14 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0):
self.features = features
self.compilers = compilers
self.generation = generation
# Cache the ancestor computation
self._ancestors = None
@property
def ancestors(self):
"""All the ancestors of this microarchitecture."""
if self._ancestors is None:
value = self.parents[:]
for parent in self.parents:
value.extend(a for a in parent.ancestors if a not in value)
self._ancestors = value
return self._ancestors
value = self.parents[:]
for parent in self.parents:
value.extend(a for a in parent.ancestors if a not in value)
return value
def _to_set(self):
"""Returns a set of the nodes in this microarchitecture DAG."""

View File

@@ -145,13 +145,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"intel": [
{
"versions": "16.0:",
"name": "corei7",
"flags": "-march={name} -mtune=generic -mpopcnt"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -224,13 +217,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"intel": [
{
"versions": "16.0:",
"name": "core-avx2",
"flags": "-march={name} -mtune={name} -fma -mf16c"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -314,13 +300,6 @@
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"intel": [
{
"versions": "16.0:",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
@@ -1433,92 +1412,6 @@
]
}
},
"sapphirerapids": {
"from": [
"icelake"
],
"vendor": "GenuineIntel",
"features": [
"mmx",
"sse",
"sse2",
"ssse3",
"sse4_1",
"sse4_2",
"popcnt",
"aes",
"pclmulqdq",
"avx",
"rdrand",
"f16c",
"movbe",
"fma",
"avx2",
"bmi1",
"bmi2",
"rdseed",
"adx",
"clflushopt",
"xsavec",
"xsaveopt",
"avx512f",
"avx512vl",
"avx512bw",
"avx512dq",
"avx512cd",
"avx512vbmi",
"avx512ifma",
"sha_ni",
"clwb",
"rdpid",
"gfni",
"avx512_vbmi2",
"avx512_vpopcntdq",
"avx512_bitalg",
"avx512_vnni",
"vpclmulqdq",
"vaes",
"avx512_bf16",
"cldemote",
"movdir64b",
"movdiri",
"pdcm",
"serialize",
"waitpkg"
],
"compilers": {
"gcc": [
{
"versions": "11.0:",
"flags": "-march={name} -mtune={name}"
}
],
"clang": [
{
"versions": "12.0:",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": "2021.2:",
"flags": "-march={name} -mtune={name}"
}
]
}
},
"k10": {
"from": ["x86_64"],
"vendor": "AuthenticAMD",
@@ -2172,6 +2065,8 @@
"pku",
"gfni",
"flush_l1d",
"erms",
"avic",
"avx512f",
"avx512dq",
"avx512ifma",
@@ -2188,12 +2083,12 @@
"compilers": {
"gcc": [
{
"versions": "10.3:12.2",
"versions": "10.3:13.0",
"name": "znver3",
"flags": "-march={name} -mtune={name} -mavx512f -mavx512dq -mavx512ifma -mavx512cd -mavx512bw -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512vnni -mavx512bitalg"
},
{
"versions": "12.3:",
"versions": "13.1:",
"name": "znver4",
"flags": "-march={name} -mtune={name}"
}

View File

@@ -65,6 +65,9 @@
up to date with CTest, just make sure the ``*_matches`` and
``*_exceptions`` lists are kept up to date with CTest's build handler.
"""
from __future__ import print_function
from __future__ import division
import re
import math
import multiprocessing
@@ -208,7 +211,7 @@
]
class LogEvent:
class LogEvent(object):
"""Class representing interesting events (e.g., errors) in a build log."""
def __init__(self, text, line_no,
source_file=None, source_line_no=None,
@@ -345,7 +348,7 @@ def _parse_unpack(args):
return _parse(*args)
class CTestLogParser:
class CTestLogParser(object):
"""Log file parser that extracts errors and warnings."""
def __init__(self, profile=False):
# whether to record timing information

View File

@@ -1,459 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""URL primitives that just require Python standard library."""
import itertools
import os.path
import re
from typing import Optional, Set, Tuple
from urllib.parse import urlsplit, urlunsplit
# Archive extensions allowed in Spack
PREFIX_EXTENSIONS = ("tar", "TAR")
EXTENSIONS = ("gz", "bz2", "xz", "Z")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = (
tuple(".".join(ext) for ext in itertools.product(PREFIX_EXTENSIONS, EXTENSIONS))
+ PREFIX_EXTENSIONS
+ EXTENSIONS
+ NO_TAR_EXTENSIONS
)
CONTRACTION_MAP = {"tgz": "tar.gz", "txz": "tar.xz", "tbz": "tar.bz2", "tbz2": "tar.bz2"}
def find_list_urls(url: str) -> Set[str]:
r"""Find good list URLs for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
PyPI https://pypi.org/simple/<name>/
LuaRocks https://luarocks.org/modules/<repo>/<name>
========= =======================================================
Note: this function is called by `spack versions`, `spack checksum`,
and `spack create`, but not by `spack fetch` or `spack install`.
Parameters:
url (str): The download URL for the package
Returns:
set: One or more list URLs for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r"(.*github\.com/[^/]+/[^/]+)", lambda m: m.group(1) + "/releases"),
# GitLab API endpoint
# e.g. https://gitlab.dkrz.de/api/v4/projects/k202009%2Flibaec/repository/archive.tar.gz?sha=v1.0.2
(
r"(.*gitlab[^/]+)/api/v4/projects/([^/]+)%2F([^/]+)",
lambda m: m.group(1) + "/" + m.group(2) + "/" + m.group(3) + "/tags",
),
# GitLab non-API endpoint
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r"(.*gitlab[^/]+/(?!api/v4/projects)[^/]+/[^/]+)", lambda m: m.group(1) + "/tags"),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r"(.*bitbucket.org/[^/]+/[^/]+)", lambda m: m.group(1) + "/downloads/?tab=tags"),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(
r"(.*\.r-project\.org/src/contrib)/([^_]+)",
lambda m: m.group(1) + "/Archive/" + m.group(2),
),
# PyPI
# e.g. https://pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://www.pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.python.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.io/packages/py2.py3/o/opencensus-context/opencensus_context-0.1.1-py2.py3-none-any.whl
(
r"(?:pypi|pythonhosted)[^/]+/packages/[^/]+/./([^/]+)",
lambda m: "https://pypi.org/simple/" + m.group(1) + "/",
),
# LuaRocks
# e.g. https://luarocks.org/manifests/gvvaughan/lpeg-1.0.2-1.src.rock
# e.g. https://luarocks.org/manifests/openresty/lua-cjson-2.1.0-1.src.rock
(
r"luarocks[^/]+/(?:modules|manifests)/(?P<org>[^/]+)/"
+ r"(?P<name>.+?)-[0-9.-]*\.src\.rock",
lambda m: "https://luarocks.org/modules/"
+ m.group("org")
+ "/"
+ m.group("name")
+ "/",
),
]
list_urls = {os.path.dirname(url)}
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
list_urls.add(fun(match))
return list_urls
def strip_query_and_fragment(url: str) -> Tuple[str, str]:
"""Strips query and fragment from a url, then returns the base url and the suffix.
Args:
url: URL to be stripped
Raises:
ValueError: when there is any error parsing the URL
"""
components = urlsplit(url)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ""
if query:
suffix += "?" + query
if frag:
suffix += "#" + frag
return urlunsplit(stripped), suffix
SOURCEFORGE_RE = re.compile(r"(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$")
def split_url_on_sourceforge_suffix(url: str) -> Tuple[str, ...]:
"""If the input is a sourceforge URL, returns base URL and "/download" suffix. Otherwise,
returns the input URL and an empty string.
"""
match = SOURCEFORGE_RE.search(url)
if match is not None:
return match.groups()
return url, ""
def has_extension(path_or_url: str, ext: str) -> bool:
"""Returns true if the extension in input is present in path, false otherwise."""
prefix, _ = split_url_on_sourceforge_suffix(path_or_url)
if not ext.startswith(r"\."):
ext = rf"\.{ext}$"
if re.search(ext, prefix):
return True
return False
def extension_from_path(path_or_url: Optional[str]) -> Optional[str]:
"""Tries to match an allowed archive extension to the input. Returns the first match,
or None if no match was found.
Raises:
ValueError: if the input is None
"""
if path_or_url is None:
raise ValueError("Can't call extension() on None")
for t in ALLOWED_ARCHIVE_TYPES:
if has_extension(path_or_url, t):
return t
return None
def remove_extension(path_or_url: str, *, extension: str) -> str:
"""Returns the input with the extension removed"""
suffix = rf"\.{extension}$"
return re.sub(suffix, "", path_or_url)
def check_and_remove_ext(path: str, *, extension: str) -> str:
"""Returns the input path with the extension removed, if the extension is present in path.
Otherwise, returns the input unchanged.
"""
if not has_extension(path, extension):
return path
path, _ = split_url_on_sourceforge_suffix(path)
return remove_extension(path, extension=extension)
def strip_extension(path_or_url: str, *, extension: Optional[str] = None) -> str:
"""If a path contains the extension in input, returns the path stripped of the extension.
Otherwise, returns the input path.
If extension is None, attempts to strip any allowed extension from path.
"""
if extension is None:
for t in ALLOWED_ARCHIVE_TYPES:
if has_extension(path_or_url, ext=t):
extension = t
break
else:
return path_or_url
return check_and_remove_ext(path_or_url, extension=extension)
def split_url_extension(url: str) -> Tuple[str, ...]:
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
prefix, suffix = split_url_on_sourceforge_suffix(url)
ext = extension_from_path(prefix)
if ext is not None:
prefix = strip_extension(prefix)
return prefix, ext, suffix
try:
prefix, suf = strip_query_and_fragment(prefix)
except ValueError:
# FIXME: tty.debug("Got error parsing path %s" % path)
# Ignore URL parse errors here
return url, ""
ext = extension_from_path(prefix)
prefix = strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ""
return prefix, ext, suffix
def strip_version_suffixes(path_or_url: str) -> str:
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path_or_url: The filename or URL for the package
Returns:
The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
r"[Ii]nstall",
r"all",
r"code",
r"[Ss]ources?",
r"file",
r"full",
r"single",
r"with[a-zA-Z_-]+",
r"rock",
r"src(_0)?",
r"public",
r"bin",
r"binary",
r"run",
r"[Uu]niversal",
r"jar",
r"complete",
r"dynamic",
r"oss",
r"gem",
r"tar",
r"sh",
# Download version
r"release",
r"bin",
r"stable",
r"[Ff]inal",
r"rel",
r"orig",
r"dist",
r"\+",
# License
r"gpl",
# Arch
# Needs to come before and after OS, appears in both orders
r"ia32",
r"intel",
r"amd64",
r"linux64",
r"x64",
r"64bit",
r"x86[_-]64",
r"i586_64",
r"x86",
r"i[36]86",
r"ppc64(le)?",
r"armv?(7l|6l|64)",
# Other
r"cpp",
r"gtk",
r"incubating",
# OS
r"[Ll]inux(_64)?",
r"LINUX",
r"[Uu]ni?x",
r"[Ss]un[Oo][Ss]",
r"[Mm]ac[Oo][Ss][Xx]?",
r"[Oo][Ss][Xx]",
r"[Dd]arwin(64)?",
r"[Aa]pple",
r"[Ww]indows",
r"[Ww]in(64|32)?",
r"[Cc]ygwin(64|32)?",
r"[Mm]ingw",
r"centos",
# Arch
# Needs to come before and after OS, appears in both orders
r"ia32",
r"intel",
r"amd64",
r"linux64",
r"x64",
r"64bit",
r"x86[_-]64",
r"i586_64",
r"x86",
r"i[36]86",
r"ppc64(le)?",
r"armv?(7l|6l|64)?",
# PyPI
r"[._-]py[23].*\.whl",
r"[._-]cp[23].*\.whl",
r"[._-]win.*\.exe",
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path_or_url = re.sub(r"[._-]?" + regex + "$", "", path_or_url)
return path_or_url
def expand_contracted_extension(extension: str) -> str:
"""Returns the expanded version of a known contracted extension.
This function maps extensions like ".tgz" to ".tar.gz". On unknown extensions,
return the input unmodified.
"""
extension = extension.strip(".")
return CONTRACTION_MAP.get(extension, extension)
def expand_contracted_extension_in_path(
path_or_url: str, *, extension: Optional[str] = None
) -> str:
"""Returns the input path or URL with any contraction extension expanded.
Args:
path_or_url: path or URL to be expanded
extension: if specified, only attempt to expand that extension
"""
extension = extension or extension_from_path(path_or_url)
if extension is None:
return path_or_url
expanded = expand_contracted_extension(extension)
if expanded != extension:
return re.sub(rf"{extension}", rf"{expanded}", path_or_url)
return path_or_url
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
"""Returns compression extension for a compressed archive"""
extension = expand_contracted_extension(extension)
for ext in [*EXTENSIONS]:
if ext in extension:
return ext
return None
def strip_compression_extension(path_or_url: str, ext: Optional[str] = None) -> str:
"""Strips the compression extension from the input, and returns it. For instance,
"foo.tgz" becomes "foo.tar".
If no extension is given, try a default list of extensions.
Args:
path_or_url: input to be stripped
ext: if given, extension to be stripped
"""
if not extension_from_path(path_or_url):
return path_or_url
expanded_path = expand_contracted_extension_in_path(path_or_url)
candidates = [ext] if ext is not None else EXTENSIONS
for current_extension in candidates:
modified_path = check_and_remove_ext(expanded_path, extension=current_extension)
if modified_path != expanded_path:
return modified_path
return expanded_path
def allowed_archive(path_or_url: str) -> bool:
"""Returns true if the input is a valid archive, False otherwise."""
return (
False if not path_or_url else any(path_or_url.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
)
def determine_url_file_extension(path: str) -> str:
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r"github.com/.+/(zip|tar)ball/", path)
if match:
if match.group(1) == "zip":
return "zip"
elif match.group(1) == "tar":
return "tar.gz"
prefix, ext, suffix = split_url_extension(path)
return ext

View File

@@ -3,42 +3,33 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import abc
from __future__ import print_function
import argparse
import errno
import io
import re
import sys
from argparse import ArgumentParser
from typing import IO, Any, Iterable, List, Optional, Sequence, Tuple, Union
class Command:
class Command(object):
"""Parsed representation of a command from argparse.
This is a single command from an argparse parser. ``ArgparseWriter`` creates these and returns
them from ``parse()``, and it passes one of these to each call to ``format()`` so that we can
take an action for a single command.
This is a single command from an argparse parser. ``ArgparseWriter``
creates these and returns them from ``parse()``, and it passes one of
these to each call to ``format()`` so that we can take an action for
a single command.
Parts of a Command:
- prog: command name (str)
- description: command description (str)
- usage: command usage (str)
- positionals: list of positional arguments (list)
- optionals: list of optional arguments (list)
- subcommands: list of subcommand parsers (list)
"""
def __init__(
self,
prog: str,
description: Optional[str],
usage: str,
positionals: List[Tuple[str, Optional[Iterable[Any]], Union[int, str, None], str]],
optionals: List[Tuple[Sequence[str], List[str], str, Union[int, str, None], str]],
subcommands: List[Tuple[ArgumentParser, str, str]],
) -> None:
"""Initialize a new Command instance.
Args:
prog: Program name.
description: Command description.
usage: Command usage.
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
"""
def __init__(self, prog, description, usage, positionals, optionals, subcommands):
self.prog = prog
self.description = description
self.usage = usage
@@ -47,34 +38,35 @@ def __init__(
self.subcommands = subcommands
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access to self._expand_help(),
# ArgparseWriter is not intended to be used as a formatter_class.
class ArgparseWriter(argparse.HelpFormatter, abc.ABC):
"""Analyze an argparse ArgumentParser for easy generation of help."""
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access
# to self._expand_help(), ArgparseWriter is not intended to be used as a
# formatter_class.
class ArgparseWriter(argparse.HelpFormatter):
"""Analyzes an argparse ArgumentParser for easy generation of help."""
def __init__(self, prog: str, out: IO = sys.stdout, aliases: bool = False) -> None:
"""Initialize a new ArgparseWriter instance.
def __init__(self, prog, out=None, aliases=False):
"""Initializes a new ArgparseWriter instance.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
Parameters:
prog (str): the program name
out (file object): the file to write to (default sys.stdout)
aliases (bool): whether or not to include subparsers for aliases
"""
super().__init__(prog)
super(ArgparseWriter, self).__init__(prog)
self.level = 0
self.prog = prog
self.out = out
self.out = sys.stdout if out is None else out
self.aliases = aliases
def parse(self, parser: ArgumentParser, prog: str) -> Command:
"""Parse the parser object and return the relavent components.
def parse(self, parser, prog):
"""Parses the parser object and returns the relavent components.
Args:
parser: Command parser.
prog: Program name.
Parameters:
parser (argparse.ArgumentParser): the parser
prog (str): the command name
Returns:
Information about the command from the parser.
(Command) information about the command from the parser
"""
self.parser = parser
@@ -88,7 +80,8 @@ def parse(self, parser: ArgumentParser, prog: str) -> Command:
groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip()
# Go through actions and split them into optionals, positionals, and subcommands
# Go through actions and split them into optionals, positionals,
# and subcommands
optionals = []
positionals = []
subcommands = []
@@ -96,97 +89,74 @@ def parse(self, parser: ArgumentParser, prog: str) -> Command:
if action.option_strings:
flags = action.option_strings
dest_flags = fmt._format_action_invocation(action)
nargs = action.nargs
help = (
self._expand_help(action)
if action.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
if action.choices is not None:
dest = [str(choice) for choice in action.choices]
else:
dest = [action.dest]
optionals.append((flags, dest, dest_flags, nargs, help))
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
optionals.append((flags, dest_flags, help))
elif isinstance(action, argparse._SubParsersAction):
for subaction in action._choices_actions:
subparser = action._name_parser_map[subaction.dest]
help = (
self._expand_help(subaction)
if subaction.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
subcommands.append((subparser, subaction.dest, help))
subcommands.append((subparser, subaction.dest))
# Look for aliases of the form 'name (alias, ...)'
if self.aliases and isinstance(subaction.metavar, str):
if self.aliases:
match = re.match(r"(.*) \((.*)\)", subaction.metavar)
if match:
aliases = match.group(2).split(", ")
for alias in aliases:
subparser = action._name_parser_map[alias]
help = (
self._expand_help(subaction)
if subaction.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
subcommands.append((subparser, alias, help))
subcommands.append((subparser, alias))
else:
args = fmt._format_action_invocation(action)
help = (
self._expand_help(action)
if action.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
positionals.append((args, action.choices, action.nargs, help))
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
positionals.append((args, help))
return Command(prog, description, usage, positionals, optionals, subcommands)
@abc.abstractmethod
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
def format(self, cmd):
"""Returns the string representation of a single node in the
parser tree.
Override this in subclasses to define how each subcommand should be displayed.
Override this in subclasses to define how each subcommand
should be displayed.
Args:
cmd: Parsed information about a command or subcommand.
Parameters:
(Command): parsed information about a command or subcommand
Returns:
String representation of this subcommand.
str: the string representation of this subcommand
"""
raise NotImplementedError
def _write(self, parser: ArgumentParser, prog: str, level: int = 0) -> None:
"""Recursively write a parser.
def _write(self, parser, prog, level=0):
"""Recursively writes a parser.
Args:
parser: Command parser.
prog: Program name.
level: Current level.
Parameters:
parser (argparse.ArgumentParser): the parser
prog (str): the command name
level (int): the current level
"""
self.level = level
cmd = self.parse(parser, prog)
self.out.write(self.format(cmd))
for subparser, prog, help in cmd.subcommands:
for subparser, prog in cmd.subcommands:
self._write(subparser, prog, level=level + 1)
def write(self, parser: ArgumentParser) -> None:
def write(self, parser):
"""Write out details about an ArgumentParser.
Args:
parser: Command parser.
parser (argparse.ArgumentParser): the parser
"""
try:
self._write(parser, self.prog)
except BrokenPipeError:
except IOError as e:
# Swallow pipe errors
pass
# Raises IOError in Python 2 and BrokenPipeError in Python 3
if e.errno != errno.EPIPE:
raise
_rst_levels = ["=", "-", "^", "~", ":", "`"]
@@ -195,33 +165,21 @@ def write(self, parser: ArgumentParser) -> None:
class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections."""
def __init__(
self,
prog: str,
out: IO = sys.stdout,
aliases: bool = False,
rst_levels: Sequence[str] = _rst_levels,
) -> None:
"""Initialize a new ArgparseRstWriter instance.
def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
rst_levels: List of characters for rst section headings.
Parameters:
prog (str): program name
out (file object): file to write to
aliases (bool): whether or not to include subparsers for aliases
rst_levels (list of str): list of characters
for rst section headings
"""
super().__init__(prog, out, aliases)
out = sys.stdout if out is None else out
super(ArgparseRstWriter, self).__init__(prog, out, aliases)
self.rst_levels = rst_levels
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of a node.
"""
def format(self, cmd):
string = io.StringIO()
string.write(self.begin_command(cmd.prog))
@@ -232,13 +190,13 @@ def format(self, cmd: Command) -> str:
if cmd.positionals:
string.write(self.begin_positionals())
for args, choices, nargs, help in cmd.positionals:
for args, help in cmd.positionals:
string.write(self.positional(args, help))
string.write(self.end_positionals())
if cmd.optionals:
string.write(self.begin_optionals())
for flags, dest, dest_flags, nargs, help in cmd.optionals:
for flags, dest_flags, help in cmd.optionals:
string.write(self.optional(dest_flags, help))
string.write(self.end_optionals())
@@ -247,15 +205,7 @@ def format(self, cmd: Command) -> str:
return string.getvalue()
def begin_command(self, prog: str) -> str:
"""Text to print before a command.
Args:
prog: Program name.
Returns:
Text before a command.
"""
def begin_command(self, prog):
return """
----
@@ -268,26 +218,10 @@ def begin_command(self, prog: str) -> str:
prog.replace(" ", "-"), prog, self.rst_levels[self.level] * len(prog)
)
def description(self, description: str) -> str:
"""Description of a command.
Args:
description: Command description.
Returns:
Description of a command.
"""
def description(self, description):
return description + "\n\n"
def usage(self, usage: str) -> str:
"""Example usage of a command.
Args:
usage: Command usage.
Returns:
Usage of a command.
"""
def usage(self, usage):
return """\
.. code-block:: console
@@ -297,24 +231,10 @@ def usage(self, usage: str) -> str:
usage
)
def begin_positionals(self) -> str:
"""Text to print before positional arguments.
Returns:
Positional arguments header.
"""
def begin_positionals(self):
return "\n**Positional arguments**\n\n"
def positional(self, name: str, help: str) -> str:
"""Description of a positional argument.
Args:
name: Argument name.
help: Help text.
Returns:
Positional argument description.
"""
def positional(self, name, help):
return """\
{0}
{1}
@@ -323,32 +243,13 @@ def positional(self, name: str, help: str) -> str:
name, help
)
def end_positionals(self) -> str:
"""Text to print after positional arguments.
Returns:
Positional arguments footer.
"""
def end_positionals(self):
return ""
def begin_optionals(self) -> str:
"""Text to print before optional arguments.
Returns:
Optional arguments header.
"""
def begin_optionals(self):
return "\n**Optional arguments**\n\n"
def optional(self, opts: str, help: str) -> str:
"""Description of an optional argument.
Args:
opts: Optional argument.
help: Help text.
Returns:
Optional argument description.
"""
def optional(self, opts, help):
return """\
``{0}``
{1}
@@ -357,23 +258,10 @@ def optional(self, opts: str, help: str) -> str:
opts, help
)
def end_optionals(self) -> str:
"""Text to print after optional arguments.
Returns:
Optional arguments footer.
"""
def end_optionals(self):
return ""
def begin_subcommands(self, subcommands: List[Tuple[ArgumentParser, str, str]]) -> str:
"""Table with links to other subcommands.
Arguments:
subcommands: List of subcommands.
Returns:
Subcommand linking text.
"""
def begin_subcommands(self, subcommands):
string = """
**Subcommands**
@@ -382,8 +270,116 @@ def begin_subcommands(self, subcommands: List[Tuple[ArgumentParser, str, str]])
"""
for cmd, _, _ in subcommands:
for cmd, _ in subcommands:
prog = re.sub(r"^[^ ]* ", "", cmd.prog)
string += " * :ref:`{0} <{1}>`\n".format(prog, cmd.prog.replace(" ", "-"))
return string + "\n"
class ArgparseCompletionWriter(ArgparseWriter):
"""Write argparse output as shell programmable tab completion functions."""
def format(self, cmd):
"""Returns the string representation of a single node in the
parser tree.
Override this in subclasses to define how each subcommand
should be displayed.
Parameters:
(Command): parsed information about a command or subcommand
Returns:
str: the string representation of this subcommand
"""
assert cmd.optionals # we should always at least have -h, --help
assert not (cmd.positionals and cmd.subcommands) # one or the other
# We only care about the arguments/flags, not the help messages
positionals = []
if cmd.positionals:
positionals, _ = zip(*cmd.positionals)
optionals, _, _ = zip(*cmd.optionals)
subcommands = []
if cmd.subcommands:
_, subcommands = zip(*cmd.subcommands)
# Flatten lists of lists
optionals = [x for xx in optionals for x in xx]
return (
self.start_function(cmd.prog)
+ self.body(positionals, optionals, subcommands)
+ self.end_function(cmd.prog)
)
def start_function(self, prog):
"""Returns the syntax needed to begin a function definition.
Parameters:
prog (str): the command name
Returns:
str: the function definition beginning
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
def end_function(self, prog=None):
"""Returns the syntax needed to end a function definition.
Parameters:
prog (str or None): the command name
Returns:
str: the function definition ending
"""
return "}\n"
def body(self, positionals, optionals, subcommands):
"""Returns the body of the function.
Parameters:
positionals (list): list of positional arguments
optionals (list): list of optional arguments
subcommands (list): list of subcommand parsers
Returns:
str: the function body
"""
return ""
def positionals(self, positionals):
"""Returns the syntax for reporting positional arguments.
Parameters:
positionals (list): list of positional arguments
Returns:
str: the syntax for positional arguments
"""
return ""
def optionals(self, optionals):
"""Returns the syntax for reporting optional flags.
Parameters:
optionals (list): list of optional arguments
Returns:
str: the syntax for optional flags
"""
return ""
def subcommands(self, subcommands):
"""Returns the syntax for reporting subcommands.
Parameters:
subcommands (list): list of subcommand parsers
Returns:
str: the syntax for subcommand parsers
"""
return ""

View File

@@ -11,7 +11,6 @@
import itertools
import numbers
import os
import pathlib
import posixpath
import re
import shutil
@@ -19,13 +18,11 @@
import sys
import tempfile
from contextlib import contextmanager
from itertools import accumulate
from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
import llnl.util.symlink
from llnl.util import tty
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
from llnl.util.symlink import islink, symlink
from spack.util.executable import Executable, which
from spack.util.path import path_to_os_path, system_path_filter
@@ -104,7 +101,7 @@ def _nop(args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (islink(src) and islink(dst))
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
@@ -172,7 +169,7 @@ def rename(src, dst):
if sys.platform == "win32":
# Windows path existence checks will sometimes fail on junctions/links/symlinks
# so check for that case
if os.path.exists(dst) or islink(dst):
if os.path.exists(dst) or os.path.islink(dst):
os.remove(dst)
os.rename(src, dst)
@@ -405,7 +402,7 @@ def groupid_to_group(x):
os.remove(backup_filename)
class FileFilter:
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
@@ -569,7 +566,7 @@ def set_install_permissions(path):
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if islink(path):
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
@@ -613,8 +610,6 @@ def chgrp(path, group, follow_symlinks=True):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
if os.stat(path).st_gid == gid:
return
if follow_symlinks:
os.chown(path, -1, gid)
else:
@@ -638,7 +633,7 @@ def chmod_x(entry, perms):
@system_path_filter
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link."""
if islink(dest):
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
@@ -724,12 +719,26 @@ def install(src, dest):
copy(src, dest, _permissions=True)
@system_path_filter
def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = os.readlink(link)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
@system_path_filter
def copy_tree(
src: str,
dest: str,
symlinks: bool = True,
allow_broken_symlinks: bool = sys.platform != "win32",
ignore: Optional[Callable[[str], bool]] = None,
_permissions: bool = False,
):
@@ -752,8 +761,6 @@ def copy_tree(
src (str): the directory to copy
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception. Defaults to true on unix.
ignore (typing.Callable): function indicating which files to ignore
_permissions (bool): for internal use only
@@ -761,8 +768,6 @@ def copy_tree(
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
if allow_broken_symlinks and sys.platform == "win32":
raise llnl.util.symlink.SymlinkError("Cannot allow broken symlinks on Windows!")
if _permissions:
tty.debug("Installing {0} to {1}".format(src, dest))
else:
@@ -776,11 +781,6 @@ def copy_tree(
if not files:
raise IOError("No such file or directory: '{0}'".format(src))
# For Windows hard-links and junctions, the source path must exist to make a symlink. Add
# all symlinks to this list while traversing the tree, then when finished, make all
# symlinks at the end.
links = []
for src in files:
abs_src = os.path.abspath(src)
if not abs_src.endswith(os.path.sep):
@@ -803,7 +803,7 @@ def copy_tree(
ignore=ignore,
follow_nonexisting=True,
):
if islink(s):
if os.path.islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = os.readlink(s)
@@ -817,9 +817,7 @@ def escaped_path(path):
tty.debug("Redirecting link {0} to {1}".format(target, new_target))
target = new_target
links.append((target, d, s))
continue
symlink(target, d)
elif os.path.isdir(link_target):
mkdirp(d)
else:
@@ -834,17 +832,9 @@ def escaped_path(path):
set_install_permissions(d)
copy_mode(s, d)
for target, d, s in links:
symlink(target, d, allow_broken_symlinks=allow_broken_symlinks)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
@system_path_filter
def install_tree(
src, dest, symlinks=True, ignore=None, allow_broken_symlinks=sys.platform != "win32"
):
def install_tree(src, dest, symlinks=True, ignore=None):
"""Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper
@@ -855,21 +845,12 @@ def install_tree(
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
ignore (typing.Callable): function indicating which files to ignore
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception.
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
copy_tree(
src,
dest,
symlinks=symlinks,
allow_broken_symlinks=allow_broken_symlinks,
ignore=ignore,
_permissions=True,
)
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True)
@system_path_filter
@@ -1273,12 +1254,7 @@ def traverse_tree(
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (typing.Callable): function indicating which files to ignore. This will also
ignore symlinks if they point to an ignored file (regardless of whether the symlink
is explicitly ignored); note this only supports one layer of indirection (i.e. if
you have x -> y -> z, and z is ignored but x/y are not, then y would be ignored
but not x). To avoid this, make sure the ignore function also ignores the symlink
paths too.
ignore (typing.Callable): function indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
@@ -1305,24 +1281,11 @@ def traverse_tree(
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# If the source path is a link and the link's source is ignored, then ignore the link too,
# but only do this if the ignore is defined.
if ignore is not None:
if islink(source_child) and not follow_links:
target = readlink(source_child)
all_parents = accumulate(target.split(os.sep), lambda x, y: os.path.join(x, y))
if any(map(ignore, all_parents)):
tty.warn(
f"Skipping {source_path} because the source or a part of the source's "
f"path is included in the ignores."
)
continue
# Treat as a directory
# TODO: for symlinks, os.path.isdir looks for the link target. If the
# target is relative to the link, then that may not resolve properly
# relative to our cwd - see resolve_link_target_relative_to_the_link
if os.path.isdir(source_child) and (follow_links or not islink(source_child)):
if os.path.isdir(source_child) and (follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
@@ -1348,11 +1311,7 @@ def traverse_tree(
def lexists_islink_isdir(path):
"""Computes the tuple (lexists(path), islink(path), isdir(path)) in a minimal
number of stat calls on unix. Use os.path and symlink.islink methods for windows."""
if sys.platform == "win32":
if not os.path.lexists(path):
return False, False, False
return os.path.lexists(path), islink(path), os.path.isdir(path)
number of stat calls."""
# First try to lstat, so we know if it's a link or not.
try:
lst = os.lstat(path)
@@ -1377,7 +1336,7 @@ def lexists_islink_isdir(path):
return True, is_link, is_dir
class BaseDirectoryVisitor:
class BaseDirectoryVisitor(object):
"""Base class and interface for :py:func:`visit_directory_tree`."""
def visit_file(self, root, rel_path, depth):
@@ -1567,7 +1526,7 @@ def remove_if_dead_link(path):
Parameters:
path (str): The potential dead link
"""
if islink(path) and not os.path.exists(path):
if os.path.islink(path) and not os.path.exists(path):
os.unlink(path)
@@ -1626,7 +1585,7 @@ def remove_linked_tree(path):
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
if os.path.exists(path):
if islink(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), **kwargs)
os.unlink(path)
else:
@@ -1793,14 +1752,9 @@ def find(root, files, recursive=True):
files = [files]
if recursive:
tty.debug(f"Find (recursive): {root} {str(files)}")
result = _find_recursive(root, files)
return _find_recursive(root, files)
else:
tty.debug(f"Find (not recursive): {root} {str(files)}")
result = _find_non_recursive(root, files)
tty.debug(f"Find complete: {root} {str(files)}")
return result
return _find_non_recursive(root, files)
@system_path_filter
@@ -1936,7 +1890,7 @@ class HeaderList(FileList):
include_regex = re.compile(r"(.*?)(\binclude\b)(.*)")
def __init__(self, files):
super().__init__(files)
super(HeaderList, self).__init__(files)
self._macro_definitions = []
self._directories = None
@@ -1962,7 +1916,7 @@ def _default_directories(self):
"""Default computation of directories based on the list of
header files.
"""
dir_list = super().directories
dir_list = super(HeaderList, self).directories
values = []
for d in dir_list:
# If the path contains a subdirectory named 'include' then stop
@@ -2398,7 +2352,7 @@ def find_all_libraries(root, recursive=False):
)
class WindowsSimulatedRPath:
class WindowsSimulatedRPath(object):
"""Class representing Windows filesystem rpath analog
One instance of this class is associated with a package (only on Windows)
@@ -2427,7 +2381,7 @@ def library_dependents(self):
"""
Set of directories where package binaries/libraries are located.
"""
return set([pathlib.Path(self.pkg.prefix.bin)]) | self._additional_library_dependents
return set([self.pkg.prefix.bin]) | self._additional_library_dependents
def add_library_dependent(self, *dest):
"""
@@ -2440,9 +2394,9 @@ def add_library_dependent(self, *dest):
"""
for pth in dest:
if os.path.isfile(pth):
self._additional_library_dependents.add(pathlib.Path(pth).parent)
self._additional_library_dependents.add(os.path.dirname)
else:
self._additional_library_dependents.add(pathlib.Path(pth))
self._additional_library_dependents.add(pth)
@property
def rpaths(self):
@@ -2455,7 +2409,7 @@ def rpaths(self):
dependent_libs.extend(list(find_all_shared_libraries(path, recursive=True)))
for extra_path in self._addl_rpaths:
dependent_libs.extend(list(find_all_shared_libraries(extra_path, recursive=True)))
return set([pathlib.Path(x) for x in dependent_libs])
return set(dependent_libs)
def add_rpath(self, *paths):
"""
@@ -2471,7 +2425,7 @@ def add_rpath(self, *paths):
"""
self._addl_rpaths = self._addl_rpaths | set(paths)
def _link(self, path: pathlib.Path, dest_dir: pathlib.Path):
def _link(self, path, dest_dir):
"""Perform link step of simulated rpathing, installing
simlinks of file in path to the dest_dir
location. This method deliberately prevents
@@ -2479,35 +2433,27 @@ def _link(self, path: pathlib.Path, dest_dir: pathlib.Path):
This is because it is both meaningless from an rpath
perspective, and will cause an error when Developer
mode is not enabled"""
def report_already_linked():
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(str(dest_file))
tty.debug(
"Linking library %s to %s failed, " % (str(path), str(dest_file))
+ "already linked."
if already_linked
else "library with name %s already exists at location %s."
% (str(file_name), str(dest_dir))
)
file_name = path.name
dest_file = dest_dir / file_name
if not dest_file.exists() and dest_dir.exists() and not dest_file == path:
file_name = os.path.basename(path)
dest_file = os.path.join(dest_dir, file_name)
if os.path.exists(dest_dir) and not dest_file == path:
try:
symlink(str(path), str(dest_file))
symlink(path, dest_file)
# For py2 compatibility, we have to catch the specific Windows error code
# associate with trying to create a file that already exists (winerror 183)
# Catch OSErrors missed by the SymlinkError checks
except OSError as e:
if sys.platform == "win32" and (e.winerror == 183 or e.errno == errno.EEXIST):
report_already_linked()
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(dest_file)
tty.debug(
"Linking library %s to %s failed, " % (path, dest_file) + "already linked."
if already_linked
else "library with name %s already exists at location %s."
% (file_name, dest_dir)
)
pass
else:
raise e
# catch errors we raise ourselves from Spack
except llnl.util.symlink.AlreadyExistsError:
report_already_linked()
def establish_link(self):
"""
@@ -2740,7 +2686,7 @@ def remove_directory_contents(dir):
"""Remove all contents of a directory."""
if os.path.exists(dir):
for entry in [os.path.join(dir, entry) for entry in os.listdir(dir)]:
if os.path.isfile(entry) or islink(entry):
if os.path.isfile(entry) or os.path.islink(entry):
os.unlink(entry)
else:
shutil.rmtree(entry)

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import division
import collections.abc
import contextlib
import functools
@@ -766,10 +768,10 @@ def pretty_seconds(seconds):
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
super(RequiredAttributeError, self).__init__(message)
class ObjectWrapper:
class ObjectWrapper(object):
"""Base class that wraps an object. Derived classes can add new behavior
while staying undercover.
@@ -796,7 +798,7 @@ def __init__(self, wrapped_object):
self.__dict__ = wrapped_object.__dict__
class Singleton:
class Singleton(object):
"""Simple wrapper for lazily initialized singleton objects."""
def __init__(self, factory):
@@ -821,7 +823,7 @@ def __getattr__(self, name):
# 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here.
if name in ["_instance", "instance"]:
raise AttributeError(f"cannot create {name}")
raise AttributeError()
return getattr(self.instance, name)
def __getitem__(self, name):
@@ -843,6 +845,27 @@ def __repr__(self):
return repr(self.instance)
class LazyReference(object):
"""Lazily evaluated reference to part of a singleton."""
def __init__(self, ref_function):
self.ref_function = ref_function
def __getattr__(self, name):
if name == "ref_function":
raise AttributeError()
return getattr(self.ref_function(), name)
def __getitem__(self, name):
return self.ref_function()[name]
def __str__(self):
return str(self.ref_function())
def __repr__(self):
return repr(self.ref_function())
def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file.
@@ -920,7 +943,7 @@ def _wrapper(args):
return _wrapper
class Devnull:
class Devnull(object):
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
@@ -1037,7 +1060,7 @@ def __str__(self):
return str(self.data)
class GroupedExceptionHandler:
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
@@ -1068,7 +1091,7 @@ def grouped_message(self, with_tracebacks: bool = True) -> str:
return "due to the following failures:\n{0}".format("\n".join(each_exception_message))
class GroupedExceptionForwarder:
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
@@ -1088,7 +1111,7 @@ def __exit__(self, exc_type, exc_value, tb):
return True
class classproperty:
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)

View File

@@ -5,6 +5,8 @@
"""LinkTree class for setting up trees of symbolic links."""
from __future__ import print_function
import filecmp
import os
import shutil
@@ -285,7 +287,7 @@ def visit_symlinked_file(self, root, rel_path, depth):
self.visit_file(root, rel_path, depth)
class LinkTree:
class LinkTree(object):
"""Class to create trees of symbolic links from a source directory.
LinkTree objects are constructed with a source root. Their
@@ -430,12 +432,12 @@ class MergeConflictError(Exception):
class ConflictingSpecsError(MergeConflictError):
def __init__(self, spec_1, spec_2):
super().__init__(spec_1, spec_2)
super(MergeConflictError, self).__init__(spec_1, spec_2)
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super().__init__("Package merge blocked by file: %s" % path)
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError):
@@ -450,4 +452,4 @@ def __init__(self, conflicts):
msg += "\n `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst
)
super().__init__(msg)
super(MergeConflictSummary, self).__init__(msg)

View File

@@ -9,10 +9,9 @@
import sys
import time
from datetime import datetime
from types import TracebackType
from typing import IO, Any, Callable, ContextManager, Dict, Generator, Optional, Tuple, Type, Union
from llnl.util import lang, tty
import llnl.util.tty as tty
from llnl.util.lang import pretty_seconds
import spack.util.string
@@ -35,15 +34,12 @@
]
ReleaseFnType = Optional[Callable[[], bool]]
#: A useful replacement for functions that should return True when not provided
#: for example.
true_fn = lambda: True
def true_fn() -> bool:
"""A function that always returns True."""
return True
class OpenFile:
class OpenFile(object):
"""Record for keeping track of open lockfiles (with reference counting).
There's really only one ``OpenFile`` per inode, per process, but we record the
@@ -52,12 +48,12 @@ class OpenFile:
file descriptors as well in the future.
"""
def __init__(self, fh: IO) -> None:
def __init__(self, fh):
self.fh = fh
self.refs = 0
class OpenFileTracker:
class OpenFileTracker(object):
"""Track open lockfiles, to minimize number of open file descriptors.
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
@@ -82,11 +78,11 @@ class OpenFileTracker:
work in Python and assume the GIL.
"""
def __init__(self) -> None:
def __init__(self):
"""Create a new ``OpenFileTracker``."""
self._descriptors: Dict[Any, OpenFile] = {}
self._descriptors = {}
def get_fh(self, path: str) -> IO:
def get_fh(self, path):
"""Get a filehandle for a lockfile.
This routine will open writable files for read/write even if you're asking
@@ -94,7 +90,7 @@ def get_fh(self, path: str) -> IO:
(write) lock later if requested.
Arguments:
path: path to lock file we want a filehandle for
path (str): path to lock file we want a filehandle for
"""
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), "r+"
@@ -143,7 +139,7 @@ def get_fh(self, path: str) -> IO:
def release_by_stat(self, stat):
key = (stat.st_dev, stat.st_ino, os.getpid())
open_file = self._descriptors.get(key)
assert open_file, "Attempted to close non-existing inode: %s" % stat.st_ino
assert open_file, "Attempted to close non-existing inode: %s" % stat.st_inode
open_file.refs -= 1
if not open_file.refs:
@@ -161,7 +157,7 @@ def purge(self):
#: Open file descriptors for locks in this process. Used to prevent one process
#: from opening the sam file many times for different byte range locks
FILE_TRACKER = OpenFileTracker()
file_tracker = OpenFileTracker()
def _attempts_str(wait_time, nattempts):
@@ -170,10 +166,10 @@ def _attempts_str(wait_time, nattempts):
return ""
attempts = spack.util.string.plural(nattempts, "attempt")
return " after {} and {}".format(lang.pretty_seconds(wait_time), attempts)
return " after {} and {}".format(pretty_seconds(wait_time), attempts)
class LockType:
class LockType(object):
READ = 0
WRITE = 1
@@ -192,11 +188,11 @@ def to_module(tid):
return lock
@staticmethod
def is_valid(op: int) -> bool:
def is_valid(op):
return op == LockType.READ or op == LockType.WRITE
class Lock:
class Lock(object):
"""This is an implementation of a filesystem lock using Python's lockf.
In Python, ``lockf`` actually calls ``fcntl``, so this should work with
@@ -211,16 +207,7 @@ class Lock:
overlapping byte ranges in the same file).
"""
def __init__(
self,
path: str,
*,
start: int = 0,
length: int = 0,
default_timeout: Optional[float] = None,
debug: bool = False,
desc: str = "",
) -> None:
def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, desc=""):
"""Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally,
@@ -233,17 +220,17 @@ def __init__(
beginning of the file.
Args:
path: path to the lock
start: optional byte offset at which the lock starts
length: optional number of bytes to lock
default_timeout: seconds to wait for lock attempts,
path (str): path to the lock
start (int): optional byte offset at which the lock starts
length (int): optional number of bytes to lock
default_timeout (int): number of seconds to wait for lock attempts,
where None means to wait indefinitely
debug: debug mode specific to locking
desc: optional debug message lock description, which is
debug (bool): debug mode specific to locking
desc (str): optional debug message lock description, which is
helpful for distinguishing between different Spack locks.
"""
self.path = path
self._file: Optional[IO] = None
self._file = None
self._reads = 0
self._writes = 0
@@ -255,7 +242,7 @@ def __init__(
self.debug = debug
# optional debug description
self.desc = f" ({desc})" if desc else ""
self.desc = " ({0})".format(desc) if desc else ""
# If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the
@@ -263,15 +250,11 @@ def __init__(
self.default_timeout = default_timeout or None
# PID and host of lock holder (only used in debug mode)
self.pid: Optional[int] = None
self.old_pid: Optional[int] = None
self.host: Optional[str] = None
self.old_host: Optional[str] = None
self.pid = self.old_pid = None
self.host = self.old_host = None
@staticmethod
def _poll_interval_generator(
_wait_times: Optional[Tuple[float, float, float]] = None
) -> Generator[float, None, None]:
def _poll_interval_generator(_wait_times=None):
"""This implements a backoff scheme for polling a contended resource
by suggesting a succession of wait times between polls.
@@ -294,21 +277,21 @@ def _poll_interval_generator(
num_requests += 1
yield wait_time
def __repr__(self) -> str:
def __repr__(self):
"""Formal representation of the lock."""
rep = "{0}(".format(self.__class__.__name__)
for attr, value in self.__dict__.items():
rep += "{0}={1}, ".format(attr, value.__repr__())
return "{0})".format(rep.strip(", "))
def __str__(self) -> str:
def __str__(self):
"""Readable string (with key fields) of the lock."""
location = "{0}[{1}:{2}]".format(self.path, self._start, self._length)
timeout = "timeout={0}".format(self.default_timeout)
activity = "#reads={0}, #writes={1}".format(self._reads, self._writes)
return "({0}, {1}, {2})".format(location, timeout, activity)
def _lock(self, op: int, timeout: Optional[float] = None) -> Tuple[float, int]:
def _lock(self, op, timeout=None):
"""This takes a lock using POSIX locks (``fcntl.lockf``).
The lock is implemented as a spin lock using a nonblocking call
@@ -327,7 +310,7 @@ def _lock(self, op: int, timeout: Optional[float] = None) -> Tuple[float, int]:
# Create file and parent directories if they don't exist.
if self._file is None:
self._ensure_parent_directory()
self._file = FILE_TRACKER.get_fh(self.path)
self._file = file_tracker.get_fh(self.path)
if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == "r":
# Attempt to upgrade to write lock w/a read-only file.
@@ -336,7 +319,7 @@ def _lock(self, op: int, timeout: Optional[float] = None) -> Tuple[float, int]:
self._log_debug(
"{} locking [{}:{}]: timeout {}".format(
op_str.lower(), self._start, self._length, lang.pretty_seconds(timeout or 0)
op_str.lower(), self._start, self._length, pretty_seconds(timeout or 0)
)
)
@@ -360,20 +343,15 @@ def _lock(self, op: int, timeout: Optional[float] = None) -> Tuple[float, int]:
total_wait_time = time.time() - start_time
raise LockTimeoutError(op_str.lower(), self.path, total_wait_time, num_attempts)
def _poll_lock(self, op: int) -> bool:
def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether
the locking attempt succeeds
"""
assert self._file is not None, "cannot poll a lock without the file being set"
module_op = LockType.to_module(op)
try:
# Try to get the lock (will raise if not available.)
fcntl.lockf(
self._file.fileno(),
module_op | fcntl.LOCK_NB,
self._length,
self._start,
os.SEEK_SET,
self._file, module_op | fcntl.LOCK_NB, self._length, self._start, os.SEEK_SET
)
# help for debugging distributed locking
@@ -399,7 +377,7 @@ def _poll_lock(self, op: int) -> bool:
return False
def _ensure_parent_directory(self) -> str:
def _ensure_parent_directory(self):
parent = os.path.dirname(self.path)
# relative paths to lockfiles in the current directory have no parent
@@ -418,22 +396,20 @@ def _ensure_parent_directory(self) -> str:
raise
return parent
def _read_log_debug_data(self) -> None:
def _read_log_debug_data(self):
"""Read PID and host data out of the file if it is there."""
assert self._file is not None, "cannot read debug log without the file being set"
self.old_pid = self.pid
self.old_host = self.host
line = self._file.read()
if line:
pid, host = line.strip().split(",")
_, _, pid = pid.rpartition("=")
_, _, self.pid = pid.rpartition("=")
_, _, self.host = host.rpartition("=")
self.pid = int(pid)
self.pid = int(self.pid)
def _write_log_debug_data(self) -> None:
def _write_log_debug_data(self):
"""Write PID and host data to the file, recording old values."""
assert self._file is not None, "cannot write debug log without the file being set"
self.old_pid = self.pid
self.old_host = self.host
@@ -447,21 +423,20 @@ def _write_log_debug_data(self) -> None:
self._file.flush()
os.fsync(self._file.fileno())
def _unlock(self) -> None:
def _unlock(self):
"""Releases a lock using POSIX locks (``fcntl.lockf``)
Releases the lock regardless of mode. Note that read locks may
be masquerading as write locks, but this removes either.
"""
assert self._file is not None, "cannot unlock without the file being set"
fcntl.lockf(self._file.fileno(), fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET)
FILE_TRACKER.release_by_fh(self._file)
fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET)
file_tracker.release_by_fh(self._file)
self._file = None
self._reads = 0
self._writes = 0
def acquire_read(self, timeout: Optional[float] = None) -> bool:
def acquire_read(self, timeout=None):
"""Acquires a recursive, shared lock for reading.
Read and write locks can be acquired and released in arbitrary
@@ -486,7 +461,7 @@ def acquire_read(self, timeout: Optional[float] = None) -> bool:
self._reads += 1
return False
def acquire_write(self, timeout: Optional[float] = None) -> bool:
def acquire_write(self, timeout=None):
"""Acquires a recursive, exclusive lock for writing.
Read and write locks can be acquired and released in arbitrary
@@ -516,7 +491,7 @@ def acquire_write(self, timeout: Optional[float] = None) -> bool:
self._writes += 1
return False
def is_write_locked(self) -> bool:
def is_write_locked(self):
"""Check if the file is write locked
Return:
@@ -533,7 +508,7 @@ def is_write_locked(self) -> bool:
return False
def downgrade_write_to_read(self, timeout: Optional[float] = None) -> None:
def downgrade_write_to_read(self, timeout=None):
"""
Downgrade from an exclusive write lock to a shared read.
@@ -552,7 +527,7 @@ def downgrade_write_to_read(self, timeout: Optional[float] = None) -> None:
else:
raise LockDowngradeError(self.path)
def upgrade_read_to_write(self, timeout: Optional[float] = None) -> None:
def upgrade_read_to_write(self, timeout=None):
"""
Attempts to upgrade from a shared read lock to an exclusive write.
@@ -571,7 +546,7 @@ def upgrade_read_to_write(self, timeout: Optional[float] = None) -> None:
else:
raise LockUpgradeError(self.path)
def release_read(self, release_fn: ReleaseFnType = None) -> bool:
def release_read(self, release_fn=None):
"""Releases a read lock.
Arguments:
@@ -607,7 +582,7 @@ def release_read(self, release_fn: ReleaseFnType = None) -> bool:
self._reads -= 1
return False
def release_write(self, release_fn: ReleaseFnType = None) -> bool:
def release_write(self, release_fn=None):
"""Releases a write lock.
Arguments:
@@ -648,65 +623,65 @@ def release_write(self, release_fn: ReleaseFnType = None) -> bool:
else:
return False
def cleanup(self) -> None:
def cleanup(self):
if self._reads == 0 and self._writes == 0:
os.unlink(self.path)
else:
raise LockError("Attempting to cleanup active lock.")
def _get_counts_desc(self) -> str:
def _get_counts_desc(self):
return (
"(reads {0}, writes {1})".format(self._reads, self._writes) if tty.is_verbose() else ""
)
def _log_acquired(self, locktype, wait_time, nattempts) -> None:
def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Acquired at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, "{0}{1}".format(desc, attempts_part)))
def _log_acquiring(self, locktype) -> None:
def _log_acquiring(self, locktype):
self._log_debug(self._status_msg(locktype, "Acquiring"), level=3)
def _log_debug(self, *args, **kwargs) -> None:
def _log_debug(self, *args, **kwargs):
"""Output lock debug messages."""
kwargs["level"] = kwargs.get("level", 2)
tty.debug(*args, **kwargs)
def _log_downgraded(self, wait_time, nattempts) -> None:
def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Downgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("READ LOCK", "{0}{1}".format(desc, attempts_part)))
def _log_downgrading(self) -> None:
def _log_downgrading(self):
self._log_debug(self._status_msg("WRITE LOCK", "Downgrading"), level=3)
def _log_released(self, locktype) -> None:
def _log_released(self, locktype):
now = datetime.now()
desc = "Released at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype) -> None:
def _log_releasing(self, locktype):
self._log_debug(self._status_msg(locktype, "Releasing"), level=3)
def _log_upgraded(self, wait_time, nattempts) -> None:
def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Upgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("WRITE LOCK", "{0}{1}".format(desc, attempts_part)))
def _log_upgrading(self) -> None:
def _log_upgrading(self):
self._log_debug(self._status_msg("READ LOCK", "Upgrading"), level=3)
def _status_msg(self, locktype: str, status: str) -> str:
def _status_msg(self, locktype, status):
status_desc = "[{0}] {1}".format(status, self._get_counts_desc())
return "{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}".format(
locktype, self, status_desc
)
class LockTransaction:
class LockTransaction(object):
"""Simple nested transaction context manager that uses a file lock.
Arguments:
@@ -734,13 +709,7 @@ class LockTransaction:
"""
def __init__(
self,
lock: Lock,
acquire: Union[ReleaseFnType, ContextManager] = None,
release: Union[ReleaseFnType, ContextManager] = None,
timeout: Optional[float] = None,
) -> None:
def __init__(self, lock, acquire=None, release=None, timeout=None):
self._lock = lock
self._timeout = timeout
self._acquire_fn = acquire
@@ -755,20 +724,15 @@ def __enter__(self):
else:
return self._as
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> bool:
def __exit__(self, type, value, traceback):
suppress = False
def release_fn():
if self._release_fn is not None:
return self._release_fn(exc_type, exc_value, traceback)
return self._release_fn(type, value, traceback)
if self._as and hasattr(self._as, "__exit__"):
if self._as.__exit__(exc_type, exc_value, traceback):
if self._as.__exit__(type, value, traceback):
suppress = True
if self._exit(release_fn):
@@ -776,12 +740,6 @@ def release_fn():
return suppress
def _enter(self) -> bool:
return NotImplemented
def _exit(self, release_fn: ReleaseFnType) -> bool:
return NotImplemented
class ReadTransaction(LockTransaction):
"""LockTransaction context manager that does a read and releases it."""
@@ -812,7 +770,7 @@ class LockDowngradeError(LockError):
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super().__init__(msg)
super(LockDowngradeError, self).__init__(msg)
class LockLimitError(LockError):
@@ -824,10 +782,10 @@ class LockTimeoutError(LockError):
def __init__(self, lock_type, path, time, attempts):
fmt = "Timed out waiting for a {} lock after {}.\n Made {} {} on file: {}"
super().__init__(
super(LockTimeoutError, self).__init__(
fmt.format(
lock_type,
lang.pretty_seconds(time),
pretty_seconds(time),
attempts,
"attempt" if attempts == 1 else "attempts",
path,
@@ -840,7 +798,7 @@ class LockUpgradeError(LockError):
def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path
super().__init__(msg)
super(LockUpgradeError, self).__init__(msg)
class LockPermissionError(LockError):
@@ -852,7 +810,7 @@ class LockROFileError(LockPermissionError):
def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path
super().__init__(msg)
super(LockROFileError, self).__init__(msg)
class CantCreateLockError(LockPermissionError):
@@ -861,4 +819,4 @@ class CantCreateLockError(LockPermissionError):
def __init__(self, path):
msg = "cannot create lock '%s': " % path
msg += "file does not exist and location is not writable"
super().__init__(msg)
super(LockError, self).__init__(msg)

View File

@@ -2,190 +2,77 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import os
import re
import shutil
import subprocess
import sys
import tempfile
from os.path import exists, join
from llnl.util import lang, tty
from spack.error import SpackError
from spack.util.path import system_path_filter
from llnl.util import lang
if sys.platform == "win32":
from win32file import CreateHardLink
is_windows = sys.platform == "win32"
def symlink(source_path: str, link_path: str, allow_broken_symlinks: bool = not is_windows):
def symlink(real_path, link_path):
"""
Create a link.
Create a symbolic link.
On non-Windows and Windows with System Administrator
privleges this will be a normal symbolic link via
os.symlink.
On Windows without privledges the link will be a
junction for a directory and a hardlink for a file.
On Windows the various link types are:
Symbolic Link: A link to a file or directory on the
same or different volume (drive letter) or even to
a remote file or directory (using UNC in its path).
Need System Administrator privileges to make these.
Hard Link: A link to a file on the same volume (drive
letter) only. Every file (file's data) has at least 1
hard link (file's name). But when this method creates
a new hard link there will be 2. Deleting all hard
links effectively deletes the file. Don't need System
Administrator privileges.
Junction: A link to a directory on the same or different
volume (drive letter) but not to a remote directory. Don't
need System Administrator privileges.
Parameters:
source_path (str): The real file or directory that the link points to.
Must be absolute OR relative to the link.
link_path (str): The path where the link will exist.
allow_broken_symlinks (bool): On Linux or Mac, don't raise an exception if the source_path
doesn't exist. This will still raise an exception on Windows.
On Windows, use junctions if os.symlink fails.
"""
source_path = os.path.normpath(source_path)
win_source_path = source_path
link_path = os.path.normpath(link_path)
# Never allow broken links on Windows.
if sys.platform == "win32" and allow_broken_symlinks:
raise ValueError("allow_broken_symlinks parameter cannot be True on Windows.")
if not allow_broken_symlinks:
# Perform basic checks to make sure symlinking will succeed
if os.path.lexists(link_path):
raise AlreadyExistsError(
f"Link path ({link_path}) already exists. Cannot create link."
)
if not os.path.exists(source_path):
if os.path.isabs(source_path) and not allow_broken_symlinks:
# An absolute source path that does not exist will result in a broken link.
raise SymlinkError(
f"Source path ({source_path}) is absolute but does not exist. Resulting "
f"link would be broken so not making link."
)
else:
# os.symlink can create a link when the given source path is relative to
# the link path. Emulate this behavior and check to see if the source exists
# relative to the link path ahead of link creation to prevent broken
# links from being made.
link_parent_dir = os.path.dirname(link_path)
relative_path = os.path.join(link_parent_dir, source_path)
if os.path.exists(relative_path):
# In order to work on windows, the source path needs to be modified to be
# relative because hardlink/junction dont resolve relative paths the same
# way as os.symlink. This is ignored on other operating systems.
win_source_path = relative_path
elif not allow_broken_symlinks:
raise SymlinkError(
f"The source path ({source_path}) is not relative to the link path "
f"({link_path}). Resulting link would be broken so not making link."
)
# Create the symlink
if sys.platform == "win32" and not _windows_can_symlink():
_windows_create_link(win_source_path, link_path)
if sys.platform != "win32":
os.symlink(real_path, link_path)
elif _win32_can_symlink():
# Windows requires target_is_directory=True when the target is a dir.
os.symlink(real_path, link_path, target_is_directory=os.path.isdir(real_path))
else:
os.symlink(source_path, link_path, target_is_directory=os.path.isdir(source_path))
try:
# Try to use junctions
_win32_junction(real_path, link_path)
except OSError as e:
if e.errno == errno.EEXIST:
# EEXIST error indicates that file we're trying to "link"
# is already present, don't bother trying to copy which will also fail
# just raise
raise
else:
# If all else fails, fall back to copying files
shutil.copyfile(real_path, link_path)
def islink(path: str) -> bool:
"""Override os.islink to give correct answer for spack logic.
For Non-Windows: a link can be determined with the os.path.islink method.
Windows-only methods will return false for other operating systems.
For Windows: spack considers symlinks, hard links, and junctions to
all be links, so if any of those are True, return True.
Args:
path (str): path to check if it is a link.
Returns:
bool - whether the path is any kind link or not.
"""
return any([os.path.islink(path), _windows_is_junction(path), _windows_is_hardlink(path)])
def islink(path):
return os.path.islink(path) or _win32_is_junction(path)
def _windows_is_hardlink(path: str) -> bool:
"""Determines if a path is a windows hard link. This is accomplished
by looking at the number of links using os.stat. A non-hard-linked file
will have a st_nlink value of 1, whereas a hard link will have a value
larger than 1. Note that both the original and hard-linked file will
return True because they share the same inode.
# '_win32' functions based on
# https://github.com/Erotemic/ubelt/blob/master/ubelt/util_links.py
def _win32_junction(path, link):
# junctions require absolute paths
if not os.path.isabs(link):
link = os.path.abspath(link)
Args:
path (str): Windows path to check for a hard link
# os.symlink will fail if link exists, emulate the behavior here
if exists(link):
raise OSError(errno.EEXIST, "File exists: %s -> %s" % (link, path))
Returns:
bool - Whether the path is a hard link or not.
"""
if sys.platform != "win32" or os.path.islink(path) or not os.path.exists(path):
return False
if not os.path.isabs(path):
parent = os.path.join(link, os.pardir)
path = os.path.join(parent, path)
path = os.path.abspath(path)
return os.stat(path).st_nlink > 1
def _windows_is_junction(path: str) -> bool:
"""Determines if a path is a windows junction. A junction can be
determined using a bitwise AND operation between the file's
attribute bitmask and the known junction bitmask (0x400).
Args:
path (str): A non-file path
Returns:
bool - whether the path is a junction or not.
"""
if sys.platform != "win32" or os.path.islink(path) or os.path.isfile(path):
return False
import ctypes.wintypes
get_file_attributes = ctypes.windll.kernel32.GetFileAttributesW # type: ignore[attr-defined]
get_file_attributes.argtypes = (ctypes.wintypes.LPWSTR,)
get_file_attributes.restype = ctypes.wintypes.DWORD
invalid_file_attributes = 0xFFFFFFFF
reparse_point = 0x400
file_attr = get_file_attributes(str(path))
if file_attr == invalid_file_attributes:
return False
return file_attr & reparse_point > 0
CreateHardLink(link, path)
@lang.memoized
def _windows_can_symlink() -> bool:
"""
Determines if windows is able to make a symlink depending on
the system configuration and the level of the user's permissions.
"""
if sys.platform != "win32":
tty.warn("windows_can_symlink method can't be used on non-Windows OS.")
return False
def _win32_can_symlink():
tempdir = tempfile.mkdtemp()
dpath = os.path.join(tempdir, "dpath")
fpath = os.path.join(tempdir, "fpath.txt")
dpath = join(tempdir, "dpath")
fpath = join(tempdir, "fpath.txt")
dlink = os.path.join(tempdir, "dlink")
flink = os.path.join(tempdir, "flink.txt")
dlink = join(tempdir, "dlink")
flink = join(tempdir, "flink.txt")
import llnl.util.filesystem as fs
@@ -209,140 +96,24 @@ def _windows_can_symlink() -> bool:
return can_symlink_directories and can_symlink_files
def _windows_create_link(source: str, link: str):
def _win32_is_junction(path):
"""
Attempts to create a Hard Link or Junction as an alternative
to a symbolic link. This is called when symbolic links cannot
be created.
Determines if a path is a win32 junction
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_link method can't be used on non-Windows OS.")
elif os.path.isdir(source):
_windows_create_junction(source=source, link=link)
elif os.path.isfile(source):
_windows_create_hard_link(path=source, link=link)
else:
raise SymlinkError(
f"Cannot create link from {source}. It is neither a file nor a directory."
)
if os.path.islink(path):
return False
if sys.platform == "win32":
import ctypes.wintypes
def _windows_create_junction(source: str, link: str):
"""Duly verify that the path and link are eligible to create a junction,
then create the junction.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_junction method can't be used on non-Windows OS.")
elif not os.path.exists(source):
raise SymlinkError("Source path does not exist, cannot create a junction.")
elif os.path.lexists(link):
raise AlreadyExistsError("Link path already exists, cannot create a junction.")
elif not os.path.isdir(source):
raise SymlinkError("Source path is not a directory, cannot create a junction.")
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
import subprocess
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
cmd = ["cmd", "/C", "mklink", "/J", link, source]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
tty.debug(out.decode())
if proc.returncode != 0:
err = err.decode()
tty.error(err)
raise SymlinkError("Make junction command returned a non-zero return code.", err)
res = GetFileAttributes(path)
return res != INVALID_FILE_ATTRIBUTES and bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
def _windows_create_hard_link(path: str, link: str):
"""Duly verify that the path and link are eligible to create a hard
link, then create the hard link.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_hard_link method can't be used on non-Windows OS.")
elif not os.path.exists(path):
raise SymlinkError(f"File path {path} does not exist. Cannot create hard link.")
elif os.path.lexists(link):
raise AlreadyExistsError(f"Link path ({link}) already exists. Cannot create hard link.")
elif not os.path.isfile(path):
raise SymlinkError(f"File path ({link}) is not a file. Cannot create hard link.")
else:
tty.debug(f"Creating hard link {link} pointing to {path}")
CreateHardLink(link, path)
def readlink(path: str):
"""Spack utility to override of os.readlink method to work cross platform"""
if _windows_is_hardlink(path):
return _windows_read_hard_link(path)
elif _windows_is_junction(path):
return _windows_read_junction(path)
else:
return os.readlink(path)
def _windows_read_hard_link(link: str) -> str:
"""Find all of the files that point to the same inode as the link"""
if sys.platform != "win32":
raise SymlinkError("Can't read hard link on non-Windows OS.")
link = os.path.abspath(link)
fsutil_cmd = ["fsutil", "hardlink", "list", link]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading hard link: {err.decode()}")
# fsutil response does not include the drive name, so append it back to each linked file.
drive, link_tail = os.path.splitdrive(os.path.abspath(link))
links = set([os.path.join(drive, p) for p in out.decode().splitlines()])
links.remove(link)
if len(links) == 1:
return links.pop()
elif len(links) > 1:
# TODO: How best to handle the case where 3 or more paths point to a single inode?
raise SymlinkError(f"Found multiple paths pointing to the same inode {links}")
else:
raise SymlinkError("Cannot determine hard link source path.")
def _windows_read_junction(link: str):
"""Find the path that a junction points to."""
if sys.platform != "win32":
raise SymlinkError("Can't read junction on non-Windows OS.")
link = os.path.abspath(link)
link_basename = os.path.basename(link)
link_parent = os.path.dirname(link)
fsutil_cmd = ["dir", "/a:l", link_parent]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading junction: {err.decode()}")
matches = re.search(rf"<JUNCTION>\s+{link_basename} \[(.*)]", out.decode())
if matches:
return matches.group(1)
else:
raise SymlinkError("Could not find junction path.")
@system_path_filter
def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = readlink(link)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
class SymlinkError(SpackError):
"""Exception class for errors raised while creating symlinks,
junctions and hard links
"""
class AlreadyExistsError(SymlinkError):
"""Link path already exists."""
return False

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import unicode_literals
import contextlib
import io
import os
@@ -12,7 +14,6 @@
import traceback
from datetime import datetime
from sys import platform as _platform
from typing import NoReturn
if _platform != "win32":
import fcntl
@@ -245,7 +246,7 @@ def warn(message, *args, **kwargs):
info("Warning: " + str(message), *args, **kwargs)
def die(message, *args, **kwargs) -> NoReturn:
def die(message, *args, **kwargs):
kwargs.setdefault("countback", 4)
error(message, *args, **kwargs)
sys.exit(1)

View File

@@ -6,6 +6,8 @@
"""
Routines for printing columnar output. See ``colify()`` for more information.
"""
from __future__ import division, unicode_literals
import io
import os
import sys

View File

@@ -59,6 +59,8 @@
To output an @, use '@@'. To output a } inside braces, use '}}'.
"""
from __future__ import unicode_literals
import re
import sys
from contextlib import contextmanager
@@ -68,7 +70,7 @@ class ColorParseError(Exception):
"""Raised when a color format fails to parse."""
def __init__(self, message):
super().__init__(message)
super(ColorParseError, self).__init__(message)
# Text styles for ansi codes
@@ -203,7 +205,7 @@ def color_when(value):
set_color_when(old_value)
class match_to_ansi:
class match_to_ansi(object):
def __init__(self, color=True, enclose=False):
self.color = _color_when_value(color)
self.enclose = enclose
@@ -319,7 +321,7 @@ def cescape(string):
return string
class ColorStream:
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color

View File

@@ -5,6 +5,8 @@
"""Utility classes for logging the output of blocks of code.
"""
from __future__ import unicode_literals
import atexit
import ctypes
import errno
@@ -65,7 +67,7 @@ def _strip(line):
return _escape.sub("", line)
class keyboard_input:
class keyboard_input(object):
"""Context manager to disable line editing and echoing.
Use this with ``sys.stdin`` for keyboard input, e.g.::
@@ -242,7 +244,7 @@ def __exit__(self, exc_type, exception, traceback):
signal.signal(signum, old_handler)
class Unbuffered:
class Unbuffered(object):
"""Wrapper for Python streams that forces them to be unbuffered.
This is implemented by forcing a flush after each write.
@@ -287,7 +289,7 @@ def _file_descriptors_work(*streams):
return False
class FileWrapper:
class FileWrapper(object):
"""Represents a file. Can be an open stream, a path to a file (not opened
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
@@ -329,7 +331,7 @@ def close(self):
self.file.close()
class MultiProcessFd:
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
@@ -429,7 +431,7 @@ def log_output(*args, **kwargs):
return nixlog(*args, **kwargs)
class nixlog:
class nixlog(object):
"""
Under the hood, we spawn a daemon and set up a pipe between this
process and the daemon. The daemon writes our output to both the
@@ -750,7 +752,7 @@ def close(self):
os.close(self.saved_stream)
class winlog:
class winlog(object):
"""
Similar to nixlog, with underlying
functionality ported to support Windows.
@@ -780,7 +782,7 @@ def __enter__(self):
raise RuntimeError("file argument must be set by __init__ ")
# Open both write and reading on logfile
if isinstance(self.logfile, io.StringIO):
if type(self.logfile) == io.StringIO:
self._ioflag = True
# cannot have two streams on tempfile, so we must make our own
sys.stdout = self.logfile

View File

@@ -13,6 +13,8 @@
Note: The functionality in this module is unsupported on Windows
"""
from __future__ import print_function
import multiprocessing
import os
import re
@@ -34,7 +36,7 @@
pass
class ProcessController:
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process (the controller) to drive another (the
@@ -155,7 +157,7 @@ def wait_running(self):
self.wait(lambda: "T" not in self.proc_status())
class PseudoShell:
class PseudoShell(object):
"""Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some

View File

@@ -13,7 +13,7 @@
from spack.util.executable import Executable, ProcessError
class ABI:
class ABI(object):
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""

View File

@@ -60,7 +60,7 @@ def _search_duplicate_compilers(error_cls):
GROUPS = collections.defaultdict(list)
class Error:
class Error(object):
"""Information on an error reported in a test."""
def __init__(self, summary, details):
@@ -286,7 +286,7 @@ def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test method is not included in build-time callbacks"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None)
# TODO (post-34236): "test*"->"test_*" once remove deprecated methods
@@ -312,7 +312,7 @@ def _check_patch_urls(pkgs, error_cls):
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for condition, patches in pkg_cls.patches.items():
for patch in patches:
if not isinstance(patch, spack.patch.UrlPatch):
@@ -342,7 +342,7 @@ def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
errors = []
for pkg_name in pkgs:
name_definitions = collections.defaultdict(list)
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for cls_item in inspect.getmro(pkg_cls):
for name in RESERVED_NAMES:
@@ -383,7 +383,7 @@ def _ensure_packages_are_pickeleable(pkgs, error_cls):
"""Ensure that package objects are pickleable"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = pkg_cls(spack.spec.Spec(pkg_name))
try:
pickle.dumps(pkg)
@@ -424,7 +424,7 @@ def _ensure_all_versions_can_produce_a_fetcher(pkgs, error_cls):
"""Ensure all versions in a package can produce a fetcher"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = pkg_cls(spack.spec.Spec(pkg_name))
try:
spack.fetch_strategy.check_pkg_attributes(pkg)
@@ -449,7 +449,7 @@ def _ensure_docstring_and_no_fixme(pkgs, error_cls):
]
for pkg_name in pkgs:
details = []
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
with open(filename, "r") as package_file:
for i, line in enumerate(package_file):
pattern = next((r for r in fixme_regexes if r.search(line)), None)
@@ -461,7 +461,7 @@ def _ensure_docstring_and_no_fixme(pkgs, error_cls):
error_msg = "Package '{}' contains boilerplate that need to be removed"
errors.append(error_cls(error_msg.format(pkg_name), details))
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
if not pkg_cls.__doc__:
error_msg = "Package '{}' miss a docstring"
errors.append(error_cls(error_msg.format(pkg_name), []))
@@ -474,7 +474,7 @@ def _ensure_all_packages_use_sha256_checksums(pkgs, error_cls):
"""Ensure no packages use md5 checksums"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
if pkg_cls.manual_download:
continue
@@ -511,7 +511,7 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
"""Ensure that methods modifying the build environment are ported to builder classes."""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
buildsystem_variant, _ = pkg_cls.variants["build_system"]
buildsystem_names = [getattr(x, "value", x) for x in buildsystem_variant.values]
builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in buildsystem_names]
@@ -538,7 +538,7 @@ def _linting_package_file(pkgs, error_cls):
"""Check for correctness of links"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
# Does the homepage have http, and if so, does https work?
if pkg_cls.homepage.startswith("http://"):
@@ -562,7 +562,7 @@ def _unknown_variants_in_directives(pkgs, error_cls):
"""Report unknown or wrong variants in directives for this package"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
# Check "conflicts" directive
for conflict, triggers in pkg_cls.conflicts.items():
@@ -628,15 +628,15 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
"""Report unknown dependencies and wrong variants for dependencies"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
for dependency_name, dependency_data in pkg_cls.dependencies.items():
# No need to analyze virtual packages
if spack.repo.PATH.is_virtual(dependency_name):
if spack.repo.path.is_virtual(dependency_name):
continue
try:
dependency_pkg_cls = spack.repo.PATH.get_pkg_class(dependency_name)
dependency_pkg_cls = spack.repo.path.get_pkg_class(dependency_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
@@ -675,7 +675,7 @@ def _ensure_variant_defaults_are_parsable(pkgs, error_cls):
"""Ensures that variant defaults are present and parsable from cli"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for variant_name, entry in pkg_cls.variants.items():
variant, _ = entry
default_is_parsable = (
@@ -709,53 +709,27 @@ def _ensure_variant_defaults_are_parsable(pkgs, error_cls):
return errors
@package_directives
def _ensure_variants_have_descriptions(pkgs, error_cls):
"""Ensures that all variants have a description."""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
for variant_name, entry in pkg_cls.variants.items():
variant, _ = entry
if not variant.description:
error_msg = "Variant '{}' in package '{}' is missing a description"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
return errors
@package_directives
def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls):
"""Report if version constraints used in directives are not satisfiable"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
dependencies_to_check = []
for dependency_name, dependency_data in pkg_cls.dependencies.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.PATH.is_virtual(dependency_name):
if spack.repo.path.is_virtual(dependency_name):
continue
dependencies_to_check.extend([edge.spec for edge in dependency_data.values()])
host_architecture = spack.spec.ArchSpec.default_arch()
for s in dependencies_to_check:
dependency_pkg_cls = None
try:
dependency_pkg_cls = spack.repo.PATH.get_pkg_class(s.name)
# Some packages have hacks that might cause failures on some platform
# Allow to explicitly set conditions to skip version checks in that case
skip_conditions = getattr(dependency_pkg_cls, "skip_version_audit", [])
skip_version_check = False
for condition in skip_conditions:
if host_architecture.satisfies(spack.spec.Spec(condition).architecture):
skip_version_check = True
break
assert skip_version_check or any(
v.intersects(s.versions) for v in list(dependency_pkg_cls.versions)
)
dependency_pkg_cls = spack.repo.path.get_pkg_class(s.name)
assert any(v.intersects(s.versions) for v in list(dependency_pkg_cls.versions))
except Exception:
summary = (
"{0}: dependency on {1} cannot be satisfied " "by known versions of {1.name}"
@@ -787,7 +761,7 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
except variant_exceptions as e:
summary = pkg.name + ': wrong variant in "{0}" directive'
summary = summary.format(directive)
filename = spack.repo.PATH.filename_for_package_name(pkg.name)
filename = spack.repo.path.filename_for_package_name(pkg.name)
error_msg = str(e).strip()
if isinstance(e, KeyError):

View File

@@ -9,6 +9,7 @@
import io
import itertools
import json
import multiprocessing.pool
import os
import re
import shutil
@@ -34,7 +35,6 @@
import spack.cmd
import spack.config as config
import spack.database as spack_db
import spack.error
import spack.hooks
import spack.hooks.sbang
import spack.mirror
@@ -49,11 +49,9 @@
import spack.util.gpg
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
import spack.util.timer as timer
import spack.util.url as url_util
import spack.util.web as web_util
from spack.caches import misc_cache_location
from spack.package_prefs import get_package_dir_permissions, get_package_group
from spack.relocate_text import utf8_paths_to_single_binary_regex
from spack.spec import Spec
from spack.stage import Stage
@@ -63,22 +61,6 @@
_build_cache_keys_relative_path = "_pgp"
class BuildCacheDatabase(spack_db.Database):
"""A database for binary buildcaches.
A database supports writing buildcache index files, in which case certain fields are not
needed in each install record, and no locking is required. To use this feature, it provides
``lock_cfg=NO_LOCK``, and override the list of ``record_fields``.
"""
record_fields = ("spec", "ref_count", "in_buildcache")
def __init__(self, root):
super().__init__(root, lock_cfg=spack_db.NO_LOCK)
self._write_transaction_impl = llnl.util.lang.nullcontext
self._read_transaction_impl = llnl.util.lang.nullcontext
class FetchCacheError(Exception):
"""Error thrown when fetching the cache failed, usually a composite error list."""
@@ -98,14 +80,14 @@ def __init__(self, errors):
else:
err = errors[0]
self.message = "{0}: {1}".format(err.__class__.__name__, str(err))
super().__init__(self.message)
super(FetchCacheError, self).__init__(self.message)
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class BinaryCacheIndex:
class BinaryCacheIndex(object):
"""
The BinaryCacheIndex tracks what specs are available on (usually remote)
binary caches.
@@ -208,7 +190,8 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
tmpdir = tempfile.mkdtemp()
try:
db = BuildCacheDatabase(tmpdir)
db_root_dir = os.path.join(tmpdir, "db_root")
db = spack_db.Database(None, db_dir=db_root_dir, enable_transaction_locking=False)
try:
self._index_file_cache.init_entry(cache_key)
@@ -334,9 +317,9 @@ def update(self, with_cooldown=False):
from each configured mirror and stored locally (both in memory and
on disk under ``_index_cache_root``)."""
self._init_local_index_cache()
configured_mirror_urls = [
m.fetch_url for m in spack.mirror.MirrorCollection(binary=True).values()
]
mirrors = spack.mirror.MirrorCollection()
configured_mirror_urls = [m.fetch_url for m in mirrors.values()]
items_to_remove = []
spec_cache_clear_needed = False
spec_cache_regenerate_needed = not self._mirrors_for_spec
@@ -534,7 +517,9 @@ class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
super(NoOverwriteException, self).__init__(
f"Refusing to overwrite the following file: {file_path}"
)
class NoGpgException(spack.error.SpackError):
@@ -543,7 +528,7 @@ class NoGpgException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
@@ -552,7 +537,7 @@ class NoKeyException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
@@ -563,7 +548,7 @@ class PickKeyException(spack.error.SpackError):
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
@@ -580,7 +565,7 @@ class NoChecksumException(spack.error.SpackError):
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
super(NoChecksumException, self).__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
@@ -593,7 +578,7 @@ class NewLayoutException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NewLayoutException, self).__init__(msg)
class UnsignedPackageException(spack.error.SpackError):
@@ -720,7 +705,7 @@ def get_buildfile_manifest(spec):
# look for them to decide if text file needs to be relocated or not
prefixes = [d.prefix for d in spec.traverse(root=True, deptype="all") if not d.external]
prefixes.append(spack.hooks.sbang.sbang_install_path())
prefixes.append(str(spack.store.STORE.layout.root))
prefixes.append(str(spack.store.layout.root))
# Create a giant regex that matches all prefixes
regex = utf8_paths_to_single_binary_regex(prefixes)
@@ -733,7 +718,7 @@ def get_buildfile_manifest(spec):
for rel_path in visitor.symlinks:
abs_path = os.path.join(root, rel_path)
link = os.readlink(abs_path)
if os.path.isabs(link) and link.startswith(spack.store.STORE.layout.root):
if os.path.isabs(link) and link.startswith(spack.store.layout.root):
data["link_to_relocate"].append(rel_path)
# Non-symlinks.
@@ -781,9 +766,9 @@ def get_buildinfo_dict(spec):
return {
"sbang_install_path": spack.hooks.sbang.sbang_install_path(),
"buildpath": spack.store.STORE.layout.root,
"buildpath": spack.store.layout.root,
"spackprefix": spack.paths.prefix,
"relative_prefix": os.path.relpath(spec.prefix, spack.store.STORE.layout.root),
"relative_prefix": os.path.relpath(spec.prefix, spack.store.layout.root),
"relocate_textfiles": manifest["text_to_relocate"],
"relocate_binaries": manifest["binary_to_relocate"],
"relocate_links": manifest["link_to_relocate"],
@@ -877,18 +862,32 @@ def _read_specs_and_push_index(file_list, read_method, cache_prefix, db, temp_di
db: A spack database used for adding specs and then writing the index.
temp_dir (str): Location to write index.json and hash for pushing
concurrency (int): Number of parallel processes to use when fetching
"""
for file in file_list:
contents = read_method(file)
# Need full spec.json name or this gets confused with index.json.
if file.endswith(".json.sig"):
specfile_json = Spec.extract_json_from_clearsig(contents)
fetched_spec = Spec.from_dict(specfile_json)
elif file.endswith(".json"):
fetched_spec = Spec.from_json(contents)
else:
continue
Return:
None
"""
def _fetch_spec_from_mirror(spec_url):
spec_file_contents = read_method(spec_url)
if spec_file_contents:
# Need full spec.json name or this gets confused with index.json.
if spec_url.endswith(".json.sig"):
specfile_json = Spec.extract_json_from_clearsig(spec_file_contents)
return Spec.from_dict(specfile_json)
if spec_url.endswith(".json"):
return Spec.from_json(spec_file_contents)
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
fetched_specs = tp.map(
llnl.util.lang.star(_fetch_spec_from_mirror), [(f,) for f in file_list]
)
finally:
tp.terminate()
tp.join()
for fetched_spec in fetched_specs:
db.add(fetched_spec, None)
db.mark(fetched_spec, "in_buildcache", True)
@@ -1062,10 +1061,13 @@ def generate_package_index(cache_prefix, concurrency=32):
tty.debug("Retrieving spec descriptor files from {0} to build index".format(cache_prefix))
tmpdir = tempfile.mkdtemp()
db = BuildCacheDatabase(tmpdir)
db.root = None
db_root_dir = db.database_directory
db_root_dir = os.path.join(tmpdir, "db_root")
db = spack_db.Database(
None,
db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=["spec", "ref_count", "in_buildcache"],
)
try:
_read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
@@ -1196,17 +1198,9 @@ def tar_add_metadata(tar: tarfile.TarFile, path: str, data: dict):
tar.addfile(deterministic_tarinfo(tarinfo), io.BytesIO(bstring))
def deterministic_tarinfo_without_buildinfo(tarinfo: tarfile.TarInfo):
"""Skip buildinfo file when creating a tarball, and normalize other tarinfo fields."""
if tarinfo.name.endswith("/.spack/binary_distribution"):
return None
return deterministic_tarinfo(tarinfo)
def _do_create_tarball(tarfile_path: str, binaries_dir: str, pkg_dir: str, buildinfo: dict):
def _do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo):
with gzip_compressed_tarfile(tarfile_path) as tar:
tar.add(name=binaries_dir, arcname=pkg_dir, filter=deterministic_tarinfo_without_buildinfo)
tar.add(name=binaries_dir, arcname=pkg_dir, filter=deterministic_tarinfo)
tar_add_metadata(tar, buildinfo_file_name(pkg_dir), buildinfo)
@@ -1214,6 +1208,9 @@ class PushOptions(NamedTuple):
#: Overwrite existing tarball/metadata files in buildcache
force: bool = False
#: Allow absolute paths to package prefixes when creating a tarball
allow_root: bool = False
#: Regenerated indices after pushing
regenerate_index: bool = False
@@ -1258,7 +1255,7 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
# without concretizing with the current spack packages
# and preferences
spec_file = spack.store.STORE.layout.spec_file_path(spec)
spec_file = spack.store.layout.spec_file_path(spec)
specfile_name = tarball_name(spec, ".spec.json")
specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
signed_specfile_path = "{0}.sig".format(specfile_path)
@@ -1286,6 +1283,9 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
# create info for later relocation and create tar
buildinfo = get_buildinfo_dict(spec)
if not options.allow_root:
ensure_package_relocatable(buildinfo, binaries_dir)
_do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo)
# get the sha256 checksum of the tarball
@@ -1300,7 +1300,15 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
else:
raise ValueError("{0} not a valid spec file type".format(spec_file))
spec_dict["buildcache_layout_version"] = 1
spec_dict["binary_cache_checksum"] = {"hash_algorithm": "sha256", "hash": checksum}
bchecksum = {}
bchecksum["hash_algorithm"] = "sha256"
bchecksum["hash"] = checksum
spec_dict["binary_cache_checksum"] = bchecksum
# Add original install prefix relative to layout root to spec.json.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo["relative_prefix"] = os.path.relpath(spec.prefix, spack.store.layout.root)
spec_dict["buildinfo"] = buildinfo
with open(specfile_path, "w") as outfile:
# Note: when using gpg clear sign, we need to avoid long lines (19995 chars).
@@ -1357,7 +1365,7 @@ def specs_to_be_packaged(
packageable = lambda n: not n.external and n.installed
# Mass install check
with spack.store.STORE.db.read_transaction():
with spack.store.db.read_transaction():
return list(filter(packageable, nodes))
@@ -1418,7 +1426,7 @@ def try_fetch(url_to_fetch):
try:
stage.fetch()
except spack.error.FetchError:
except web_util.FetchError:
stage.destroy()
return None
@@ -1459,9 +1467,8 @@ def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
"signature_verified": "true-if-binary-pkg-was-already-verified"
}
"""
configured_mirrors = spack.mirror.MirrorCollection(binary=True).values()
if not configured_mirrors:
tty.die("Please add a spack mirror to allow download of pre-compiled packages.")
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " + "download of pre-compiled packages.")
tarball = tarball_path_name(spec, ".spack")
specfile_prefix = tarball_name(spec, ".spec")
@@ -1478,7 +1485,11 @@ def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
# we need was in an un-indexed mirror. No need to check any
# mirror for the spec twice though.
try_first = [i["mirror_url"] for i in mirrors_for_spec] if mirrors_for_spec else []
try_next = [i.fetch_url for i in configured_mirrors if i.fetch_url not in try_first]
try_next = [
i.fetch_url
for i in spack.mirror.MirrorCollection().values()
if i.fetch_url not in try_first
]
for url in try_first + try_next:
mirrors_to_try.append(
@@ -1556,6 +1567,12 @@ def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
return None
def ensure_package_relocatable(buildinfo, binaries_dir):
"""Check if package binaries are relocatable."""
binaries = [os.path.join(binaries_dir, f) for f in buildinfo["relocate_binaries"]]
relocate.ensure_binaries_are_relocatable(binaries)
def dedupe_hardlinks_if_necessary(root, buildinfo):
"""Updates a buildinfo dict for old archives that did
not dedupe hardlinks. De-duping hardlinks is necessary
@@ -1594,7 +1611,7 @@ def relocate_package(spec):
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.STORE.layout.root)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
@@ -1779,46 +1796,16 @@ def _extract_inner_tarball(spec, filename, extract_to, unsigned, remote_checksum
return tarfile_path
def _tar_strip_component(tar: tarfile.TarFile, prefix: str):
"""Strip the top-level directory `prefix` from the member names in a tarfile."""
# Including trailing /, otherwise we end up with absolute paths.
regex = re.compile(re.escape(prefix) + "/*")
# Remove the top-level directory from the member (link)names.
# Note: when a tarfile is created, relative in-prefix symlinks are
# expanded to matching member names of tarfile entries. So, we have
# to ensure that those are updated too.
# Absolute symlinks are copied verbatim -- relocation should take care of
# them.
for m in tar.getmembers():
result = regex.match(m.name)
assert result is not None
m.name = m.name[result.end() :]
if m.linkname:
result = regex.match(m.linkname)
if result:
m.linkname = m.linkname[result.end() :]
def extract_tarball(spec, download_result, unsigned=False, force=False, timer=timer.NULL_TIMER):
def extract_tarball(spec, download_result, unsigned=False, force=False):
"""
extract binary tarball for given package into install area
"""
timer.start("extract")
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
# Create the install prefix
fsys.mkdirp(
spec.prefix,
mode=get_package_dir_permissions(spec),
group=get_package_group(spec),
default_perms="parents",
)
specfile_path = download_result["specfile_stage"].save_filename
with open(specfile_path, "r") as inputfile:
@@ -1872,59 +1859,56 @@ def extract_tarball(spec, download_result, unsigned=False, force=False, timer=ti
tarfile_path, size, contents, "sha256", expected, local_checksum
)
try:
with closing(tarfile.open(tarfile_path, "r")) as tar:
# Remove install prefix from tarfil to extract directly into spec.prefix
_tar_strip_component(tar, prefix=_ensure_common_prefix(tar))
tar.extractall(path=spec.prefix)
except Exception:
shutil.rmtree(spec.prefix, ignore_errors=True)
_delete_staged_downloads(download_result)
raise
new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get("buildinfo", {})
old_relative_prefix = buildinfo.get("relative_prefix", new_relative_prefix)
rel = buildinfo.get("relative_rpaths")
info = "old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s"
tty.debug(info % (old_relative_prefix, new_relative_prefix, rel), level=2)
# Extract the tarball into the store root, presumably on the same filesystem.
# The directory created is the base directory name of the old prefix.
# Moving the old prefix name to the new prefix location should preserve
# hard links and symbolic links.
extract_tmp = os.path.join(spack.store.layout.root, ".tmp")
mkdirp(extract_tmp)
extracted_dir = os.path.join(extract_tmp, old_relative_prefix.split(os.path.sep)[-1])
with closing(tarfile.open(tarfile_path, "r")) as tar:
try:
tar.extractall(path=extract_tmp)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(extracted_dir)
raise e
try:
shutil.move(extracted_dir, spec.prefix)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(extracted_dir)
raise e
os.remove(tarfile_path)
os.remove(specfile_path)
timer.stop("extract")
timer.start("relocate")
try:
relocate_package(spec)
except Exception as e:
shutil.rmtree(spec.prefix, ignore_errors=True)
shutil.rmtree(spec.prefix)
raise e
else:
manifest_file = os.path.join(
spec.prefix,
spack.store.STORE.layout.metadata_dir,
spack.store.STORE.layout.manifest_file_name,
spec.prefix, spack.store.layout.metadata_dir, spack.store.layout.manifest_file_name
)
if not os.path.exists(manifest_file):
spec_id = spec.format("{name}/{hash:7}")
tty.warn("No manifest file in tarball for spec %s" % spec_id)
finally:
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
_delete_staged_downloads(download_result)
timer.stop("relocate")
def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
# Get the shortest length directory.
common_prefix = min((e.name for e in tar.getmembers() if e.isdir()), key=len, default=None)
if common_prefix is None:
raise ValueError("Tarball does not contain a common prefix")
# Validate that each file starts with the prefix
for member in tar.getmembers():
if not member.name.startswith(common_prefix):
raise ValueError(
f"Tarball contains file {member.name} outside of prefix {common_prefix}"
)
return common_prefix
def install_root_node(spec, unsigned=False, force=False, sha256=None):
@@ -1973,7 +1957,7 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
extract_tarball(spec, download_result, unsigned, force)
spack.hooks.post_install(spec, False)
spack.store.STORE.db.add(spec, spack.store.STORE.layout)
spack.store.db.add(spec, spack.store.layout)
def install_single_spec(spec, unsigned=False, force=False):
@@ -1998,9 +1982,7 @@ def try_direct_fetch(spec, mirrors=None):
specfile_is_signed = False
found_specs = []
binary_mirrors = spack.mirror.MirrorCollection(mirrors=mirrors, binary=True).values()
for mirror in binary_mirrors:
for mirror in spack.mirror.MirrorCollection(mirrors=mirrors).values():
buildcache_fetch_url_json = url_util.join(
mirror.fetch_url, _build_cache_relative_path, specfile_name
)
@@ -2063,7 +2045,7 @@ def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False):
if spec is None:
return []
if not spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True):
if not spack.mirror.MirrorCollection(mirrors=mirrors_to_check):
tty.debug("No Spack mirrors are currently configured")
return {}
@@ -2102,7 +2084,7 @@ def clear_spec_cache():
def get_keys(install=False, trust=False, force=False, mirrors=None):
"""Get pgp public keys available on mirror with suffix .pub"""
mirror_collection = mirrors or spack.mirror.MirrorCollection(binary=True)
mirror_collection = mirrors or spack.mirror.MirrorCollection()
if not mirror_collection:
tty.die("Please add a spack mirror to allow " + "download of build caches.")
@@ -2145,7 +2127,7 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except spack.error.FetchError:
except web_util.FetchError:
continue
tty.debug("Found key {0}".format(fingerprint))
@@ -2263,7 +2245,7 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None):
"""
rebuilds = {}
for mirror in spack.mirror.MirrorCollection(mirrors, binary=True).values():
for mirror in spack.mirror.MirrorCollection(mirrors).values():
tty.debug("Checking for built specs at {0}".format(mirror.fetch_url))
rebuild_list = []
@@ -2297,7 +2279,7 @@ def _download_buildcache_entry(mirror_root, descriptions):
try:
stage.fetch()
break
except spack.error.FetchError as e:
except web_util.FetchError as e:
tty.debug(e)
else:
if fail_if_missing:
@@ -2307,7 +2289,7 @@ def _download_buildcache_entry(mirror_root, descriptions):
def download_buildcache_entry(file_descriptions, mirror_url=None):
if not mirror_url and not spack.mirror.MirrorCollection(binary=True):
if not mirror_url and not spack.mirror.MirrorCollection():
tty.die(
"Please provide or add a spack mirror to allow " + "download of buildcache entries."
)
@@ -2316,7 +2298,7 @@ def download_buildcache_entry(file_descriptions, mirror_url=None):
mirror_root = os.path.join(mirror_url, _build_cache_relative_path)
return _download_buildcache_entry(mirror_root, file_descriptions)
for mirror in spack.mirror.MirrorCollection(binary=True).values():
for mirror in spack.mirror.MirrorCollection().values():
mirror_root = os.path.join(mirror.fetch_url, _build_cache_relative_path)
if _download_buildcache_entry(mirror_root, file_descriptions):
@@ -2355,7 +2337,7 @@ def download_single_spec(concrete_spec, destination, mirror_url=None):
return download_buildcache_entry(files_to_fetch, mirror_url)
class BinaryCacheQuery:
class BinaryCacheQuery(object):
"""Callable object to query if a spec is in a binary cache"""
def __init__(self, all_architectures):
@@ -2374,12 +2356,22 @@ def __init__(self, all_architectures):
self.possible_specs = specs
def __call__(self, spec: Spec, **kwargs):
def __call__(self, spec, **kwargs):
"""
Args:
spec: The spec being searched for
spec (str): The spec being searched for in its string representation or hash.
"""
return [s for s in self.possible_specs if s.satisfies(spec)]
matches = []
if spec.startswith("/"):
# Matching a DAG hash
query_hash = spec.replace("/", "")
for candidate_spec in self.possible_specs:
if candidate_spec.dag_hash().startswith(query_hash):
matches.append(candidate_spec)
else:
# Matching a spec constraint
matches = [s for s in self.possible_specs if s.satisfies(spec)]
return matches
class FetchIndexError(Exception):

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Function and classes needed to bootstrap Spack itself."""
from .config import ensure_bootstrap_configuration, is_bootstrapping, store_path
from .config import ensure_bootstrap_configuration, is_bootstrapping
from .core import all_core_root_specs, ensure_core_dependencies, ensure_patchelf_in_path_or_raise
from .environment import BootstrapEnvironment, ensure_environment_dependencies
from .status import status_message
@@ -18,5 +18,4 @@
"ensure_environment_dependencies",
"BootstrapEnvironment",
"status_message",
"store_path",
]

View File

@@ -50,7 +50,7 @@ def _try_import_from_store(
# We have to run as part of this python interpreter
query_spec += " ^" + spec_for_current_python()
installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
installed_specs = spack.store.db.query(query_spec, installed=True)
for candidate_spec in installed_specs:
pkg = candidate_spec["python"].package
@@ -183,7 +183,7 @@ def _executables_in_store(
executables_str = ", ".join(executables)
msg = "[BOOTSTRAP EXECUTABLES {0}] Try installed specs with query '{1}'"
tty.debug(msg.format(executables_str, query_spec))
installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
installed_specs = spack.store.db.query(query_spec, installed=True)
if installed_specs:
for concrete_spec in installed_specs:
bin_dir = concrete_spec.prefix.bin

View File

@@ -124,9 +124,9 @@ def _read_and_sanitize_configuration() -> Dict[str, Any]:
def _bootstrap_config_scopes() -> Sequence["spack.config.ConfigScope"]:
tty.debug("[BOOTSTRAP CONFIG SCOPE] name=_builtin")
config_scopes: MutableSequence["spack.config.ConfigScope"] = [
spack.config.InternalConfigScope("_builtin", spack.config.CONFIG_DEFAULTS)
spack.config.InternalConfigScope("_builtin", spack.config.config_defaults)
]
configuration_paths = (spack.config.CONFIGURATION_DEFAULTS_PATH, ("bootstrap", _config_path()))
configuration_paths = (spack.config.configuration_defaults_path, ("bootstrap", _config_path()))
for name, path in configuration_paths:
platform = spack.platforms.host().name
platform_scope = spack.config.ConfigScope(
@@ -150,19 +150,18 @@ def _add_compilers_if_missing() -> None:
@contextlib.contextmanager
def _ensure_bootstrap_configuration() -> Generator:
spack.store.ensure_singleton_created()
bootstrap_store_path = store_path()
user_configuration = _read_and_sanitize_configuration()
with spack.environment.no_active_environment():
with spack.platforms.prevent_cray_detection(), spack.platforms.use_platform(
spack.platforms.real_host()
), spack.repo.use_repositories(spack.paths.packages_path):
), spack.repo.use_repositories(spack.paths.packages_path), spack.store.use_store(
bootstrap_store_path
):
# Default configuration scopes excluding command line
# and builtin but accounting for platform specific scopes
config_scopes = _bootstrap_config_scopes()
with spack.config.use_configuration(*config_scopes), spack.store.use_store(
bootstrap_store_path, extra_data={"padded_length": 0}
):
with spack.config.use_configuration(*config_scopes):
# We may need to compile code from sources, so ensure we
# have compilers for the current platform
_add_compilers_if_missing()

View File

@@ -476,22 +476,15 @@ def ensure_executables_in_path_or_raise(
def _add_externals_if_missing() -> None:
search_list = [
# clingo
"cmake",
"bison",
spack.repo.path.get_pkg_class("cmake"),
spack.repo.path.get_pkg_class("bison"),
# GnuPG
"gawk",
# develop deps
"git",
spack.repo.path.get_pkg_class("gawk"),
]
if IS_WINDOWS:
search_list.append("winbison")
externals = spack.detection.by_path(search_list)
# System git is typically deprecated, so mark as non-buildable to force it as external
non_buildable_externals = {k: externals.pop(k) for k in ("git",) if k in externals}
spack.detection.update_configuration(externals, scope="bootstrap", buildable=True)
spack.detection.update_configuration(
non_buildable_externals, scope="bootstrap", buildable=False
)
search_list.append(spack.repo.path.get_pkg_class("winbison"))
detected_packages = spack.detection.by_executable(search_list)
spack.detection.update_configuration(detected_packages, scope="bootstrap")
def clingo_root_spec() -> str:

View File

@@ -15,15 +15,14 @@
from llnl.util import tty
import spack.build_environment
import spack.environment
import spack.tengine
import spack.util.cpus
import spack.util.executable
from spack.environment import depfile
from ._common import _root_spec
from .config import root_path, spec_for_current_python, store_path
from .core import _add_externals_if_missing
class BootstrapEnvironment(spack.environment.Environment):
@@ -137,7 +136,7 @@ def _install_with_depfile(self) -> None:
"-C",
str(self.environment_root()),
"-j",
str(spack.util.cpus.determine_number_of_jobs(parallel=True)),
str(spack.build_environment.determine_number_of_jobs(parallel=True)),
**kwargs,
)
@@ -186,7 +185,6 @@ def pytest_root_spec() -> str:
def ensure_environment_dependencies() -> None:
"""Ensure Spack dependencies from the bootstrap environment are installed and ready to use"""
_add_externals_if_missing()
with BootstrapEnvironment() as env:
env.update_installations()
env.update_syspath_and_environ()

View File

@@ -68,7 +68,7 @@
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import spack_install_test_log
from spack.installer import InstallError
from spack.util.cpus import determine_number_of_jobs
from spack.util.cpus import cpus_available
from spack.util.environment import (
SYSTEM_DIRS,
EnvironmentModifications,
@@ -148,7 +148,7 @@ class MakeExecutable(Executable):
def __init__(self, name, jobs, **kwargs):
supports_jobserver = kwargs.pop("supports_jobserver", True)
super().__init__(name, **kwargs)
super(MakeExecutable, self).__init__(name, **kwargs)
self.supports_jobserver = supports_jobserver
self.jobs = jobs
@@ -175,7 +175,7 @@ def __call__(self, *args, **kwargs):
if jobs_env_jobs is not None:
kwargs["extra_env"] = {jobs_env: str(jobs_env_jobs)}
return super().__call__(*args, **kwargs)
return super(MakeExecutable, self).__call__(*args, **kwargs)
def _on_cray():
@@ -537,6 +537,39 @@ def update_compiler_args_for_dep(dep):
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
def determine_number_of_jobs(
parallel=False, command_line=None, config_default=None, max_cpus=None
):
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel (bool or None): true when package supports parallel builds
command_line (int or None): command line override
config_default (int or None): config default number of jobs
max_cpus (int or None): maximum number of CPUs available. When None, this
value is automatically determined.
"""
if not parallel:
return 1
if command_line is None and "command_line" in spack.config.scopes():
command_line = spack.config.get("config:build_jobs", scope="command_line")
if command_line is not None:
return command_line
max_cpus = max_cpus or cpus_available()
# in some rare cases _builtin config may not be set, so default to max 16
config_default = config_default or spack.config.get("config:build_jobs", 16)
return min(max_cpus, config_default)
def set_module_variables_for_package(pkg):
"""Populate the Python module of a package with some useful global names.
This makes things easier for package writers.
@@ -994,7 +1027,7 @@ def get_cmake_prefix_path(pkg):
def _setup_pkg_and_run(
serialized_pkg, function, kwargs, write_pipe, input_multiprocess_fd, jsfd1, jsfd2
serialized_pkg, function, kwargs, child_pipe, input_multiprocess_fd, jsfd1, jsfd2
):
context = kwargs.get("context", "build")
@@ -1015,12 +1048,12 @@ def _setup_pkg_and_run(
pkg, dirty=kwargs.get("dirty", False), context=context
)
return_value = function(pkg, kwargs)
write_pipe.send(return_value)
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
write_pipe.send(e)
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
@@ -1069,10 +1102,10 @@ def _setup_pkg_and_run(
context,
package_context,
)
write_pipe.send(ce)
child_pipe.send(ce)
finally:
write_pipe.close()
child_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
@@ -1116,7 +1149,7 @@ def child_fun():
For more information on `multiprocessing` child process creation
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
parent_pipe, child_pipe = multiprocessing.Pipe()
input_multiprocess_fd = None
jobserver_fd1 = None
jobserver_fd2 = None
@@ -1141,7 +1174,7 @@ def child_fun():
serialized_pkg,
function,
kwargs,
write_pipe,
child_pipe,
input_multiprocess_fd,
jobserver_fd1,
jobserver_fd2,
@@ -1150,12 +1183,6 @@ def child_fun():
p.start()
# We close the writable end of the pipe now to be sure that p is the
# only process which owns a handle for it. This ensures that when p
# closes its handle for the writable end, read_pipe.recv() will
# promptly report the readable end as being ready.
write_pipe.close()
except InstallError as e:
e.pkg = pkg
raise
@@ -1165,16 +1192,7 @@ def child_fun():
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def exitcode_msg(p):
typ = "exit" if p.exitcode >= 0 else "signal"
return f"{typ} {abs(p.exitcode)}"
try:
child_result = read_pipe.recv()
except EOFError:
p.join()
raise InstallError(f"The process has stopped unexpectedly ({exitcode_msg(p)})")
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
@@ -1194,10 +1212,6 @@ def exitcode_msg(p):
child_result.print_context()
raise child_result
# Fallback. Usually caught beforehand in EOFError above.
if p.exitcode != 0:
raise InstallError(f"The process failed unexpectedly ({exitcode_msg(p)})")
return child_result
@@ -1242,8 +1256,9 @@ def make_stack(tb, stack=None):
func = getattr(obj, tb.tb_frame.f_code.co_name, "")
if func:
typename, *_ = func.__qualname__.partition(".")
if isinstance(obj, CONTEXT_BASES) and typename not in basenames:
break
if isinstance(obj, CONTEXT_BASES) and typename not in basenames:
break
else:
return None
@@ -1317,7 +1332,7 @@ class ChildError(InstallError):
build_errors = [("spack.util.executable", "ProcessError")]
def __init__(self, msg, module, classname, traceback_string, log_name, log_type, context):
super().__init__(msg)
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string

View File

@@ -39,7 +39,7 @@ def check_paths(path_list, filetype, predicate):
check_paths(pkg.sanity_check_is_file, "file", os.path.isfile)
check_paths(pkg.sanity_check_is_dir, "directory", os.path.isdir)
ignore_file = llnl.util.lang.match_predicate(spack.store.STORE.layout.hidden_file_regexes)
ignore_file = llnl.util.lang.match_predicate(spack.store.layout.hidden_file_regexes)
if all(map(ignore_file, os.listdir(pkg.prefix))):
msg = "Install failed for {0}. Nothing was installed!"
raise spack.installer.InstallError(msg.format(pkg.name))

View File

@@ -55,8 +55,7 @@ def flags_to_build_system_args(self, flags):
setattr(self, "configure_flag_args", [])
for flag, values in flags.items():
if values:
var_name = "LIBS" if flag == "ldlibs" else flag.upper()
values_str = "{0}={1}".format(var_name, " ".join(values))
values_str = "{0}={1}".format(flag.upper(), " ".join(values))
self.configure_flag_args.append(values_str)
# Spack's fflags are meant for both F77 and FC, therefore we
# additionaly set FCFLAGS if required.

View File

@@ -162,6 +162,17 @@ def initconfig_compiler_entries(self):
libs_string = libs_format_string.format(lang)
entries.append(cmake_cache_string(libs_string, libs_flags))
# Set the generator in the cached config
if self.spec.satisfies("generator=make"):
entries.append(cmake_cache_string("CMAKE_GENERATOR", "Unix Makefiles"))
if self.spec.satisfies("generator=ninja"):
entries.append(cmake_cache_string("CMAKE_GENERATOR", "Ninja"))
entries.append(
cmake_cache_string(
"CMAKE_MAKE_PROGRAM", "{0}/ninja".format(spec["ninja"].prefix.bin)
)
)
return entries
def initconfig_mpi_entries(self):
@@ -278,7 +289,6 @@ def std_initconfig_entries(self):
"# CMake executable path: {0}".format(self.pkg.spec["cmake"].command.path),
"#------------------{0}\n".format("-" * 60),
cmake_cache_path("CMAKE_PREFIX_PATH", cmake_prefix_path),
self.define_cmake_cache_from_variant("CMAKE_BUILD_TYPE", "build_type"),
]
def initconfig_package_entries(self):
@@ -301,7 +311,7 @@ def initconfig(self, pkg, spec, prefix):
@property
def std_cmake_args(self):
args = super().std_cmake_args
args = super(CachedCMakeBuilder, self).std_cmake_args
args.extend(["-C", self.cache_path])
return args

View File

@@ -248,8 +248,7 @@ def std_cmake_args(self):
@staticmethod
def std_args(pkg, generator=None):
"""Computes the standard cmake arguments for a generic package"""
default_generator = "Ninja" if sys.platform == "win32" else "Unix Makefiles"
generator = generator or default_generator
generator = generator or "Unix Makefiles"
valid_primary_generators = ["Unix Makefiles", "Ninja"]
primary_generator = _extract_primary_generator(generator)
if primary_generator not in valid_primary_generators:
@@ -274,6 +273,7 @@ def std_args(pkg, generator=None):
generator,
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
define("CMAKE_BUILD_TYPE", build_type),
define("BUILD_TESTING", pkg.run_tests),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
@@ -296,46 +296,8 @@ def std_args(pkg, generator=None):
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
]
)
return args
@staticmethod
def define_cuda_architectures(pkg):
"""Returns the str ``-DCMAKE_CUDA_ARCHITECTURES:STRING=(expanded cuda_arch)``.
``cuda_arch`` is variant composed of a list of target CUDA architectures and
it is declared in the cuda package.
This method is no-op for cmake<3.18 and when ``cuda_arch`` variant is not set.
"""
cmake_flag = str()
if "cuda_arch" in pkg.spec.variants and pkg.spec.satisfies("^cmake@3.18:"):
cmake_flag = CMakeBuilder.define(
"CMAKE_CUDA_ARCHITECTURES", pkg.spec.variants["cuda_arch"].value
)
return cmake_flag
@staticmethod
def define_hip_architectures(pkg):
"""Returns the str ``-DCMAKE_HIP_ARCHITECTURES:STRING=(expanded amdgpu_target)``.
``amdgpu_target`` is variant composed of a list of the target HIP
architectures and it is declared in the rocm package.
This method is no-op for cmake<3.18 and when ``amdgpu_target`` variant is
not set.
"""
cmake_flag = str()
if "amdgpu_target" in pkg.spec.variants and pkg.spec.satisfies("^cmake@3.21:"):
cmake_flag = CMakeBuilder.define(
"CMAKE_HIP_ARCHITECTURES", pkg.spec.variants["amdgpu_target"].value
)
return cmake_flag
@staticmethod
def define(cmake_var, value):
"""Return a CMake command line argument that defines a variable.
@@ -450,6 +412,7 @@ def cmake_args(self):
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
* BUILD_TESTING
which will be set automatically.
"""

View File

@@ -102,10 +102,11 @@ def cuda_flags(arch_list):
depends_on("cuda@11.0:", when="cuda_arch=80")
depends_on("cuda@11.1:", when="cuda_arch=86")
depends_on("cuda@11.4:", when="cuda_arch=87")
depends_on("cuda@11.8:", when="cuda_arch=89")
depends_on("cuda@12.0:", when="cuda_arch=90")
depends_on("cuda@11.4:", when="cuda_arch=87")
depends_on("cuda@11.8:", when="cuda_arch=89")
depends_on("cuda@11.8:", when="cuda_arch=90")
# From the NVIDIA install guide we know of conflicts for particular
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
@@ -154,7 +155,7 @@ def cuda_flags(arch_list):
conflicts("%pgi@:15.3,15.5:", when="+cuda ^cuda@7.5 target=x86_64:")
conflicts("%pgi@:16.2,16.0:16.3", when="+cuda ^cuda@8 target=x86_64:")
conflicts("%pgi@:15,18:", when="+cuda ^cuda@9.0:9.1 target=x86_64:")
conflicts("%pgi@:16,19:", when="+cuda ^cuda@9.2.88:10.0 target=x86_64:")
conflicts("%pgi@:16,19:", when="+cuda ^cuda@9.2.88:10 target=x86_64:")
conflicts("%pgi@:17,20:", when="+cuda ^cuda@10.1.105:10.2.89 target=x86_64:")
conflicts("%pgi@:17,21:", when="+cuda ^cuda@11.0.2:11.1.0 target=x86_64:")
conflicts("%clang@:3.4", when="+cuda ^cuda@:7.5 target=x86_64:")

View File

@@ -209,5 +209,5 @@ def install(self, pkg, spec, prefix):
def check(self):
"""Search Meson-generated files for the target ``test`` and run it if found."""
with fs.working_dir(self.build_directory):
self.pkg._if_ninja_target_execute("test")
self.pkg._if_ninja_target_execute("check")
self._if_ninja_target_execute("test")
self._if_ninja_target_execute("check")

View File

@@ -95,7 +95,7 @@ def makefile_root(self):
return self.stage.source_path
@property
def makefile_name(self):
def nmakefile_name(self):
"""Name of the current makefile. This is currently an empty value.
If a project defines this value, it will be used with the /f argument
to provide nmake an explicit makefile. This is usefule in scenarios where
@@ -126,8 +126,8 @@ def build(self, pkg, spec, prefix):
"""Run "nmake" on the build targets specified by the builder."""
opts = self.std_nmake_args
opts += self.nmake_args()
if self.makefile_name:
opts.append("/F{}".format(self.makefile_name))
if self.nmakefile_name:
opts.append("/f {}".format(self.nmakefile_name))
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).nmake(
*opts, *self.build_targets, ignore_quotes=self.ignore_quotes
@@ -139,8 +139,8 @@ def install(self, pkg, spec, prefix):
opts = self.std_nmake_args
opts += self.nmake_args()
opts += self.nmake_install_args()
if self.makefile_name:
opts.append("/F{}".format(self.makefile_name))
if self.nmakefile_name:
opts.append("/f {}".format(self.nmakefile_name))
opts.append(self.define("PREFIX", prefix))
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).nmake(

View File

@@ -121,7 +121,7 @@ def setup_run_environment(self, env):
$ source {prefix}/{component}/{version}/env/vars.sh
"""
# Only if environment modifications are desired (default is +envmods)
if "~envmods" not in self.spec:
if "+envmods" in self.spec:
env.extend(
EnvironmentModifications.from_sourcing_file(
join_path(self.component_prefix, "env", "vars.sh")
@@ -175,7 +175,7 @@ def libs(self):
return find_libraries("*", root=lib_path, shared=True, recursive=True)
class IntelOneApiStaticLibraryList:
class IntelOneApiStaticLibraryList(object):
"""Provides ld_flags when static linking is needed
Oneapi puts static and dynamic libraries in the same directory, so

View File

@@ -16,7 +16,6 @@
import spack.builder
import spack.config
import spack.deptypes as dt
import spack.detection
import spack.multimethod
import spack.package_base
@@ -24,14 +23,13 @@
import spack.store
from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError, SpecError
from spack.install_test import test_part
from spack.version import Version
from ._checks import BaseBuilder, execute_install_time_tests
class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart")
maintainers("adamjstewart", "pradyunsg")
@property
def import_modules(self):
@@ -169,20 +167,18 @@ def remove_files_from_view(self, view, merge_map):
view.remove_files(to_remove)
def test_imports(self):
def test(self):
"""Attempts to import modules of the installed package."""
# Make sure we are importing the installed modules,
# not the ones in the source directory
python = inspect.getmodule(self).python
for module in self.import_modules:
with test_part(
self,
f"test_imports_{module}",
purpose=f"checking import of {module}",
self.run_test(
inspect.getmodule(self).python.path,
["-c", "import {0}".format(module)],
purpose="checking import of {0}".format(module),
work_dir="spack-test",
):
python("-c", f"import {module}")
)
def update_external_dependencies(self, extendee_spec=None):
"""
@@ -202,7 +198,7 @@ def update_external_dependencies(self, extendee_spec=None):
else:
python = self.get_external_python_for_prefix()
if not python.concrete:
repo = spack.repo.PATH.repo_for_pkg(python)
repo = spack.repo.path.repo_for_pkg(python)
python.namespace = repo.namespace
# Ensure architecture information is present
@@ -227,48 +223,7 @@ def update_external_dependencies(self, extendee_spec=None):
python.external_path = self.spec.external_path
python._mark_concrete()
self.spec.add_dependency_edge(python, depflag=dt.BUILD | dt.LINK | dt.RUN, virtuals=())
def get_external_python_for_prefix(self):
"""
For an external package that extends python, find the most likely spec for the python
it depends on.
First search: an "installed" external that shares a prefix with this package
Second search: a configured external that shares a prefix with this package
Third search: search this prefix for a python package
Returns:
spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec
"""
python_externals_installed = [
s for s in spack.store.STORE.db.query("python") if s.prefix == self.spec.external_path
]
if python_externals_installed:
return python_externals_installed[0]
python_external_config = spack.config.get("packages:python:externals", [])
python_externals_configured = [
spack.spec.parse_with_version_concrete(item["spec"])
for item in python_external_config
if item["prefix"] == self.spec.external_path
]
if python_externals_configured:
return python_externals_configured[0]
python_externals_detection = spack.detection.by_path(
["python"], path_hints=[self.spec.external_path]
)
python_externals_detected = [
d.spec
for d in python_externals_detection.get("python", [])
if d.prefix == self.spec.external_path
]
if python_externals_detected:
return python_externals_detected[0]
raise StopIteration("No external python could be detected for %s to depend on" % self.spec)
self.spec.add_dependency_edge(python, deptypes=("build", "link", "run"), virtuals=())
class PythonPackage(PythonExtension):
@@ -315,16 +270,54 @@ def list_url(cls):
name = cls.pypi.split("/")[0]
return "https://pypi.org/simple/" + name + "/"
def get_external_python_for_prefix(self):
"""
For an external package that extends python, find the most likely spec for the python
it depends on.
First search: an "installed" external that shares a prefix with this package
Second search: a configured external that shares a prefix with this package
Third search: search this prefix for a python package
Returns:
spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec
"""
python_externals_installed = [
s for s in spack.store.db.query("python") if s.prefix == self.spec.external_path
]
if python_externals_installed:
return python_externals_installed[0]
python_external_config = spack.config.get("packages:python:externals", [])
python_externals_configured = [
spack.spec.parse_with_version_concrete(item["spec"])
for item in python_external_config
if item["prefix"] == self.spec.external_path
]
if python_externals_configured:
return python_externals_configured[0]
python_externals_detection = spack.detection.by_executable(
[spack.repo.path.get_pkg_class("python")], path_hints=[self.spec.external_path]
)
python_externals_detected = [
d.spec
for d in python_externals_detection.get("python", [])
if d.prefix == self.spec.external_path
]
if python_externals_detected:
return python_externals_detected[0]
raise StopIteration("No external python could be detected for %s to depend on" % self.spec)
@property
def headers(self):
"""Discover header files in platlib."""
# Remove py- prefix in package name
name = self.spec.name[3:]
# Headers may be in either location
include = self.prefix.join(self.spec["python"].package.include).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
include = self.prefix.join(self.spec["python"].package.include)
platlib = self.prefix.join(self.spec["python"].package.platlib)
headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)
if headers:
@@ -338,14 +331,13 @@ def libs(self):
"""Discover libraries in platlib."""
# Remove py- prefix in package name
name = self.spec.name[3:]
library = "lib" + self.spec.name[3:].replace("-", "?")
root = self.prefix.join(self.spec["python"].package.platlib)
root = self.prefix.join(self.spec["python"].package.platlib).join(name)
libs = fs.find_all_libraries(root, recursive=True)
if libs:
return libs
for shared in [True, False]:
libs = fs.find_libraries(library, root, shared=shared, recursive=True)
if libs:
return libs
msg = "Unable to recursively locate {} libraries in {}"
raise NoLibrariesError(msg.format(self.spec.name, root))
@@ -406,8 +398,7 @@ def build_directory(self):
def config_settings(self, spec, prefix):
"""Configuration settings to be passed to the PEP 517 build backend.
Requires pip 22.1 or newer.
Requires pip 22.1+, which requires Python 3.7+.
Args:
spec (spack.spec.Spec): build spec
@@ -421,8 +412,6 @@ def config_settings(self, spec, prefix):
def install_options(self, spec, prefix):
"""Extra arguments to be supplied to the setup.py install command.
Requires pip 23.0 or older.
Args:
spec (spack.spec.Spec): build spec
prefix (spack.util.prefix.Prefix): installation prefix
@@ -436,8 +425,6 @@ def global_options(self, spec, prefix):
"""Extra global options to be supplied to the setup.py call before the install
or bdist_wheel command.
Deprecated in pip 23.1.
Args:
spec (spack.spec.Spec): build spec
prefix (spack.util.prefix.Prefix): installation prefix

View File

@@ -28,7 +28,7 @@ class QMakePackage(spack.package_base.PackageBase):
build_system("qmake")
depends_on("qmake", type="build", when="build_system=qmake")
depends_on("qt", type="build", when="build_system=qmake")
@spack.builder.builder("qmake")

View File

@@ -10,10 +10,9 @@
import llnl.util.tty as tty
import spack.builder
from spack.build_environment import SPACK_NO_PARALLEL_MAKE
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs
from spack.directives import build_system, extends, maintainers
from spack.package_base import PackageBase
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError
@@ -93,7 +92,7 @@ def install(self, pkg, spec, prefix):
"--copy",
"-i",
"-j",
str(determine_number_of_jobs(parallel=parallel)),
str(determine_number_of_jobs(parallel)),
"--",
os.getcwd(),
]

View File

@@ -140,6 +140,8 @@ class ROCmPackage(PackageBase):
depends_on("hsa-rocr-dev", when="+rocm")
depends_on("hip +rocm", when="+rocm")
conflicts("^blt@:0.3.6", when="+rocm")
# need amd gpu type for rocm builds
conflicts("amdgpu_target=none", when="+rocm")

View File

@@ -7,14 +7,13 @@
import re
import llnl.util.tty as tty
from llnl.util.filesystem import find, working_dir
from llnl.util.filesystem import find, join_path, working_dir
import spack.builder
import spack.install_test
import spack.package_base
from spack.directives import build_system, depends_on, extends
from spack.multimethod import when
from spack.util.executable import Executable
from ._checks import BaseBuilder, execute_install_time_tests
@@ -40,8 +39,9 @@ class SIPPackage(spack.package_base.PackageBase):
build_system("sip")
with when("build_system=sip"):
extends("python", type=("build", "link", "run"))
depends_on("py-sip", type="build")
extends("python")
depends_on("qt")
depends_on("py-sip")
@property
def import_modules(self):
@@ -113,13 +113,13 @@ class SIPBuilder(BaseBuilder):
* install
The configure phase already adds a set of default flags. To see more
options, run ``sip-build --help``.
options, run ``python configure.py --help``.
"""
phases = ("configure", "build", "install")
#: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "build_args", "install_args")
legacy_methods = ("configure_file", "configure_args", "build_args", "install_args")
#: Names associated with package attributes in the old build-system format
legacy_attributes = (
@@ -130,17 +130,34 @@ class SIPBuilder(BaseBuilder):
"build_directory",
)
build_directory = "build"
def configure_file(self):
"""Returns the name of the configure file to use."""
return "configure.py"
def configure(self, pkg, spec, prefix):
"""Configure the package."""
configure = self.configure_file()
# https://www.riverbankcomputing.com/static/Docs/sip/command_line_tools.html
args = ["--verbose", "--target-dir", inspect.getmodule(self.pkg).python_platlib]
args.extend(self.configure_args())
args = self.configure_args()
sip_build = Executable(spec["py-sip"].prefix.bin.join("sip-build"))
sip_build(*args)
args.extend(
[
"--verbose",
"--confirm-license",
"--qmake",
spec["qt"].prefix.bin.qmake,
"--sip",
spec["py-sip"].prefix.bin.sip,
"--sip-incdir",
join_path(spec["py-sip"].prefix, spec["python"].package.include),
"--bindir",
prefix.bin,
"--destdir",
inspect.getmodule(self.pkg).python_platlib,
]
)
self.pkg.python(configure, *args)
def configure_args(self):
"""Arguments to pass to configure."""
@@ -150,8 +167,7 @@ def build(self, pkg, spec, prefix):
"""Build the package."""
args = self.build_args()
with working_dir(self.build_directory):
inspect.getmodule(self.pkg).make(*args)
inspect.getmodule(self.pkg).make(*args)
def build_args(self):
"""Arguments to pass to build."""
@@ -161,11 +177,21 @@ def install(self, pkg, spec, prefix):
"""Install the package."""
args = self.install_args()
with working_dir(self.build_directory):
inspect.getmodule(self.pkg).make("install", *args)
inspect.getmodule(self.pkg).make("install", parallel=False, *args)
def install_args(self):
"""Arguments to pass to install."""
return []
spack.builder.run_after("install")(execute_install_time_tests)
@spack.builder.run_after("install")
def extend_path_setup(self):
# See github issue #14121 and PR #15297
module = self.pkg.spec["py-sip"].variants["module"].value
if module != "sip":
module = module.split(".")[0]
with working_dir(inspect.getmodule(self.pkg).python_platlib):
with open(os.path.join(module, "__init__.py"), "a") as f:
f.write("from pkgutil import extend_path\n")
f.write("__path__ = extend_path(__path__, __name__)\n")

View File

@@ -63,7 +63,7 @@ def create(pkg):
return _BUILDERS[id(pkg)]
class _PhaseAdapter:
class _PhaseAdapter(object):
def __init__(self, builder, phase_fn):
self.builder = builder
self.phase_fn = phase_fn
@@ -115,7 +115,7 @@ class hierarchy (look at AspellDictPackage for an example of that)
# package. The semantic should be the same as the method in the base builder were still
# present in the base class of the package.
class _ForwardToBaseBuilder:
class _ForwardToBaseBuilder(object):
def __init__(self, wrapped_pkg_object, root_builder):
self.wrapped_package_object = wrapped_pkg_object
self.root_builder = root_builder
@@ -188,7 +188,7 @@ def __init__(self, pkg):
# Attribute containing the package wrapped in dispatcher with a `__getattr__`
# method that will forward certain calls to the default builder.
self.pkg_with_dispatcher = _ForwardToBaseBuilder(pkg, root_builder=self)
super().__init__(pkg)
super(Adapter, self).__init__(pkg)
# These two methods don't follow the (self, spec, prefix) signature of phases nor
# the (self) signature of methods, so they are added explicitly to avoid using a
@@ -388,7 +388,7 @@ def __new__(mcs, name, bases, attr_dict):
return super(_PackageAdapterMeta, mcs).__new__(mcs, name, bases, attr_dict)
class InstallationPhase:
class InstallationPhase(object):
"""Manages a single phase of the installation.
This descriptor stores at creation time the name of the method it should
@@ -530,9 +530,9 @@ def setup_build_environment(self, env):
modifications to be applied when the package is built. Package authors
can call methods on it to alter the build environment.
"""
if not hasattr(super(), "setup_build_environment"):
if not hasattr(super(Builder, self), "setup_build_environment"):
return
super().setup_build_environment(env)
super(Builder, self).setup_build_environment(env)
def setup_dependent_build_environment(self, env, dependent_spec):
"""Sets up the build environment of packages that depend on this one.
@@ -563,9 +563,9 @@ def setup_dependent_build_environment(self, env, dependent_spec):
the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
if not hasattr(super(), "setup_dependent_build_environment"):
if not hasattr(super(Builder, self), "setup_dependent_build_environment"):
return
super().setup_dependent_build_environment(env, dependent_spec)
super(Builder, self).setup_dependent_build_environment(env, dependent_spec)
def __getitem__(self, idx):
key = self.phases[idx]

View File

@@ -20,9 +20,9 @@
def misc_cache_location():
"""The ``MISC_CACHE`` is Spack's cache for small data.
"""The ``misc_cache`` is Spack's cache for small data.
Currently the ``MISC_CACHE`` stores indexes for virtual dependency
Currently the ``misc_cache`` stores indexes for virtual dependency
providers and for which packages provide which tags.
"""
path = spack.config.get("config:misc_cache", spack.paths.default_misc_cache_path)
@@ -35,7 +35,7 @@ def _misc_cache():
#: Spack's cache for small data
MISC_CACHE: Union[
misc_cache: Union[
spack.util.file_cache.FileCache, llnl.util.lang.Singleton
] = llnl.util.lang.Singleton(_misc_cache)
@@ -58,7 +58,7 @@ def _fetch_cache():
return spack.fetch_strategy.FsCache(path)
class MirrorCache:
class MirrorCache(object):
def __init__(self, root, skip_unstable_versions):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions
@@ -91,6 +91,6 @@ def symlink(self, mirror_ref):
#: Spack's local cache for downloaded source archives
FETCH_CACHE: Union[
fetch_cache: Union[
spack.fetch_strategy.FsCache, llnl.util.lang.Singleton
] = llnl.util.lang.Singleton(_fetch_cache)

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import os
import re
@@ -147,7 +149,7 @@ def get_command(cmd_name):
return getattr(get_module(cmd_name), pname)
class _UnquotedFlags:
class _UnquotedFlags(object):
"""Use a heuristic in `.extract()` to detect whether the user is trying to set
multiple flags like the docker ENV attribute allows (e.g. 'cflags=-Os -pipe').
@@ -273,9 +275,9 @@ def disambiguate_spec_from_hashes(spec, hashes, local=False, installed=True, fir
See ``spack.database.Database._query`` for details.
"""
if local:
matching_specs = spack.store.STORE.db.query_local(spec, hashes=hashes, installed=installed)
matching_specs = spack.store.db.query_local(spec, hashes=hashes, installed=installed)
else:
matching_specs = spack.store.STORE.db.query(spec, hashes=hashes, installed=installed)
matching_specs = spack.store.db.query(spec, hashes=hashes, installed=installed)
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
@@ -291,7 +293,7 @@ def ensure_single_spec_or_die(spec, matching_specs):
if len(matching_specs) <= 1:
return
format_string = "{name}{@version}{%compiler.name}{@compiler.version}{arch=architecture}"
format_string = "{name}{@version}{%compiler}{arch=architecture}"
args = ["%s matches multiple packages." % spec, "Matching packages:"]
args += [
colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat(format_string) for s in matching_specs
@@ -342,9 +344,9 @@ def iter_groups(specs, indent, all_headers):
print()
header = "%s{%s} / %s{%s}" % (
spack.spec.ARCHITECTURE_COLOR,
spack.spec.architecture_color,
architecture if architecture else "no arch",
spack.spec.COMPILER_COLOR,
spack.spec.compiler_color,
f"{compiler.display_str}" if compiler else "no compiler",
)
@@ -383,7 +385,7 @@ def display_specs(specs, args=None, **kwargs):
deps (bool): Display dependencies with specs
long (bool): Display short hashes with specs
very_long (bool): Display full hashes with specs (supersedes ``long``)
namespaces (bool): Print namespaces along with names
namespace (bool): Print namespaces along with names
show_flags (bool): Show compiler flags with specs
variants (bool): Show variants with specs
indent (int): indent each line this much
@@ -407,7 +409,7 @@ def get_arg(name, default=None):
paths = get_arg("paths", False)
deps = get_arg("deps", False)
hashes = get_arg("long", False)
namespaces = get_arg("namespaces", False)
namespace = get_arg("namespace", False)
flags = get_arg("show_flags", False)
full_compiler = get_arg("show_full_compiler", False)
variants = get_arg("variants", False)
@@ -428,7 +430,7 @@ def get_arg(name, default=None):
format_string = get_arg("format", None)
if format_string is None:
nfmt = "{fullname}" if namespaces else "{name}"
nfmt = "{fullname}" if namespace else "{name}"
ffmt = ""
if full_compiler or flags:
ffmt += "{%compiler.name}"
@@ -473,7 +475,7 @@ def format_list(specs):
out = ""
# getting lots of prefixes requires DB lookups. Ensure
# all spec.prefix calls are in one transaction.
with spack.store.STORE.db.read_transaction():
with spack.store.db.read_transaction():
for string, spec in formatted:
if not string:
# print newline from above
@@ -545,7 +547,7 @@ class PythonNameError(spack.error.SpackError):
def __init__(self, name):
self.name = name
super().__init__("{0} is not a permissible Python name.".format(name))
super(PythonNameError, self).__init__("{0} is not a permissible Python name.".format(name))
class CommandNameError(spack.error.SpackError):
@@ -553,7 +555,9 @@ class CommandNameError(spack.error.SpackError):
def __init__(self, name):
self.name = name
super().__init__("{0} is not a permissible Spack command name.".format(name))
super(CommandNameError, self).__init__(
"{0} is not a permissible Spack command name.".format(name)
)
########################################
@@ -584,14 +588,14 @@ def require_active_env(cmd_name):
if env:
return env
tty.die(
"`spack %s` requires an environment" % cmd_name,
"activate an environment first:",
" spack env activate ENV",
"or use:",
" spack -e ENV %s ..." % cmd_name,
)
else:
tty.die(
"`spack %s` requires an environment" % cmd_name,
"activate an environment first:",
" spack env activate ENV",
"or use:",
" spack -e ENV %s ..." % cmd_name,
)
def find_environment(args):

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import archspec.cpu

View File

@@ -47,7 +47,7 @@ def configs(parser, args):
def packages(parser, args):
pkgs = args.name or spack.repo.PATH.all_package_names()
pkgs = args.name or spack.repo.path.all_package_names()
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
_process_reports(reports)
@@ -57,7 +57,7 @@ def packages_https(parser, args):
if not args.check_all and not args.name:
tty.die("Please specify one or more packages to audit, or --all.")
pkgs = args.name or spack.repo.PATH.all_package_names()
pkgs = args.name or spack.repo.path.all_package_names()
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
_process_reports(reports)

View File

@@ -59,7 +59,7 @@ def setup_parser(subparser):
subparser.add_argument(
"package_or_file",
help="name of package to show contributions for, or path to a file in the spack repo",
help="name of package to show contributions for, " "or path to a file in the spack repo",
)
@@ -126,7 +126,7 @@ def blame(parser, args):
blame_file = path
if not blame_file:
pkg_cls = spack.repo.PATH.get_pkg_class(args.package_or_file)
pkg_cls = spack.repo.path.get_pkg_class(args.package_or_file)
blame_file = pkg_cls.module.__file__.rstrip("c") # .pyc -> .py
# get git blame for the package

View File

@@ -2,9 +2,10 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os.path
import shutil
import sys
import tempfile
import llnl.util.filesystem
@@ -69,10 +70,11 @@
def _add_scope_option(parser):
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
parser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
help="configuration scope to read/modify",
)
@@ -169,7 +171,7 @@ def _reset(args):
if not ok_to_continue:
raise RuntimeError("Aborting")
for scope in spack.config.CONFIG.file_scopes:
for scope in spack.config.config.file_scopes:
# The default scope should stay untouched
if scope.name == "defaults":
continue
@@ -186,7 +188,7 @@ def _reset(args):
if os.path.exists(bootstrap_yaml):
shutil.move(bootstrap_yaml, backup_file)
spack.config.CONFIG.clear_caches()
spack.config.config.clear_caches()
def _root(args):
@@ -326,7 +328,6 @@ def _status(args):
if missing:
print(llnl.util.tty.color.colorize(legend))
print()
sys.exit(1)
def _add(args):

View File

@@ -2,14 +2,12 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import glob
import json
import os
import shutil
import sys
import tempfile
from typing import List
import llnl.util.tty as tty
import llnl.util.tty.color as clr
@@ -20,7 +18,7 @@
import spack.cmd.common.arguments as arguments
import spack.config
import spack.environment as ev
import spack.error
import spack.hash_types as ht
import spack.mirror
import spack.relocate
import spack.repo
@@ -30,6 +28,7 @@
import spack.util.url as url_util
import spack.util.web as web_util
from spack.cmd import display_specs
from spack.error import SpecError
from spack.spec import Spec, save_dependency_specfiles
from spack.stage import Stage
from spack.util.string import plural
@@ -39,60 +38,57 @@
level = "long"
def setup_parser(subparser: argparse.ArgumentParser):
setattr(setup_parser, "parser", subparser)
def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help="buildcache sub-commands")
push = subparsers.add_parser("push", aliases=["create"], help=push_fn.__doc__)
push.add_argument("-f", "--force", action="store_true", help="overwrite tarball if it exists")
push.add_argument("-f", "--force", action="store_true", help="overwrite tarball if it exists.")
push.add_argument(
"-u", "--unsigned", action="store_true", help="push unsigned buildcache tarballs"
)
push.add_argument(
"--allow-root",
"-a",
"--allow-root",
action="store_true",
help="allow install root string in binary files after RPATH substitution",
)
push_sign = push.add_mutually_exclusive_group(required=False)
push_sign.add_argument(
"--unsigned", "-u", action="store_true", help="push unsigned buildcache tarballs"
push.add_argument(
"-k", "--key", metavar="key", type=str, default=None, help="Key for signing."
)
push_sign.add_argument(
"--key", "-k", metavar="key", type=str, default=None, help="key for signing"
)
push.add_argument("mirror", type=str, help="mirror name, path, or URL")
push.add_argument("mirror", type=str, help="Mirror name, path, or URL.")
push.add_argument(
"--update-index",
"--rebuild-index",
action="store_true",
default=False,
help="regenerate buildcache index after building package(s)",
help="Regenerate buildcache index after building package(s)",
)
push.add_argument(
"--spec-file", default=None, help="create buildcache entry for spec from json or yaml file"
"--spec-file", default=None, help="Create buildcache entry for spec from json or yaml file"
)
push.add_argument(
"--only",
default="package,dependencies",
dest="things_to_install",
choices=["package", "dependencies"],
help="select the buildcache mode. "
"The default is to build a cache for the package along with all its dependencies. "
"Alternatively, one can decide to build a cache for only the package or only the "
"dependencies",
)
push.add_argument(
"--fail-fast",
action="store_true",
help="stop pushing on first failure (default is best effort)",
help=(
"Select the buildcache mode. the default is to"
" build a cache for the package along with all"
" its dependencies. Alternatively, one can"
" decide to build a cache for only the package"
" or only the dependencies"
),
)
arguments.add_common_arguments(push, ["specs"])
push.set_defaults(func=push_fn)
install = subparsers.add_parser("install", help=install_fn.__doc__)
install.add_argument(
"-f", "--force", action="store_true", help="overwrite install directory if it exists"
"-f", "--force", action="store_true", help="overwrite install directory if it exists."
)
install.add_argument(
"-m", "--multiple", action="store_true", help="allow all matching packages"
"-m", "--multiple", action="store_true", help="allow all matching packages "
)
install.add_argument(
"-u",
@@ -111,7 +107,7 @@ def setup_parser(subparser: argparse.ArgumentParser):
install.set_defaults(func=install_fn)
listcache = subparsers.add_parser("list", help=list_fn.__doc__)
arguments.add_common_arguments(listcache, ["long", "very_long", "namespaces"])
arguments.add_common_arguments(listcache, ["long", "very_long"])
listcache.add_argument(
"-v",
"--variants",
@@ -146,49 +142,49 @@ def setup_parser(subparser: argparse.ArgumentParser):
"-m",
"--mirror-url",
default=None,
help="override any configured mirrors with this mirror URL",
help="Override any configured mirrors with this mirror URL",
)
check.add_argument(
"-o", "--output-file", default=None, help="file where rebuild info should be written"
"-o", "--output-file", default=None, help="File where rebuild info should be written"
)
# used to construct scope arguments below
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
check.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
default=spack.config.default_modify_scope(),
help="configuration scope containing mirrors to check",
)
check_spec_or_specfile = check.add_mutually_exclusive_group(required=True)
check_spec_or_specfile.add_argument(
"-s", "--spec", help="check single spec instead of release specs file"
check.add_argument(
"-s", "--spec", default=None, help="Check single spec instead of release specs file"
)
check_spec_or_specfile.add_argument(
check.add_argument(
"--spec-file",
help="check single spec from json or yaml file instead of release specs file",
default=None,
help=("Check single spec from json or yaml file instead of release specs file"),
)
check.set_defaults(func=check_fn)
# Download tarball and specfile
download = subparsers.add_parser("download", help=download_fn.__doc__)
download_spec_or_specfile = download.add_mutually_exclusive_group(required=True)
download_spec_or_specfile.add_argument(
"-s", "--spec", help="download built tarball for spec from mirror"
)
download_spec_or_specfile.add_argument(
"--spec-file", help="download built tarball for spec (from json or yaml file) from mirror"
download.add_argument(
"-s", "--spec", default=None, help="Download built tarball for spec from mirror"
)
download.add_argument(
"-p",
"--path",
required=True,
"--spec-file",
default=None,
help="path to directory where tarball should be downloaded",
help=("Download built tarball for spec (from json or yaml file) from mirror"),
)
download.add_argument(
"-p", "--path", default=None, help="Path to directory where tarball should be downloaded"
)
download.set_defaults(func=download_fn)
@@ -196,53 +192,53 @@ def setup_parser(subparser: argparse.ArgumentParser):
getbuildcachename = subparsers.add_parser(
"get-buildcache-name", help=get_buildcache_name_fn.__doc__
)
getbuildcachename_spec_or_specfile = getbuildcachename.add_mutually_exclusive_group(
required=True
getbuildcachename.add_argument(
"-s", "--spec", default=None, help="Spec string for which buildcache name is desired"
)
getbuildcachename_spec_or_specfile.add_argument(
"-s", "--spec", help="spec string for which buildcache name is desired"
)
getbuildcachename_spec_or_specfile.add_argument(
"--spec-file", help="path to spec json or yaml file for which buildcache name is desired"
getbuildcachename.add_argument(
"--spec-file",
default=None,
help=("Path to spec json or yaml file for which buildcache name is desired"),
)
getbuildcachename.set_defaults(func=get_buildcache_name_fn)
# Given the root spec, save the yaml of the dependent spec to a file
savespecfile = subparsers.add_parser("save-specfile", help=save_specfile_fn.__doc__)
savespecfile_spec_or_specfile = savespecfile.add_mutually_exclusive_group(required=True)
savespecfile_spec_or_specfile.add_argument("--root-spec", help="root spec of dependent spec")
savespecfile_spec_or_specfile.add_argument(
"--root-specfile", help="path to json or yaml file containing root spec of dependent spec"
savespecfile.add_argument("--root-spec", default=None, help="Root spec of dependent spec")
savespecfile.add_argument(
"--root-specfile",
default=None,
help="Path to json or yaml file containing root spec of dependent spec",
)
savespecfile.add_argument(
"-s",
"--specs",
required=True,
help="list of dependent specs for which saved yaml is desired",
default=None,
help="List of dependent specs for which saved yaml is desired",
)
savespecfile.add_argument(
"--specfile-dir", required=True, help="path to directory where spec yamls should be saved"
"--specfile-dir", default=None, help="Path to directory where spec yamls should be saved"
)
savespecfile.set_defaults(func=save_specfile_fn)
# Sync buildcache entries from one mirror to another
sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
sync.add_argument(
"--manifest-glob", help="a quoted glob pattern identifying copy manifest files"
"--manifest-glob",
default=None,
help="A quoted glob pattern identifying copy manifest files",
)
sync.add_argument(
"src_mirror",
metavar="source mirror",
type=arguments.mirror_name_or_url,
nargs="?",
help="source mirror name, path, or URL",
help="Source mirror name, path, or URL",
)
sync.add_argument(
"dest_mirror",
metavar="destination mirror",
type=arguments.mirror_name_or_url,
nargs="?",
help="destination mirror name, path, or URL",
help="Destination mirror name, path, or URL",
)
sync.set_defaults(func=sync_fn)
@@ -251,47 +247,72 @@ def setup_parser(subparser: argparse.ArgumentParser):
"update-index", aliases=["rebuild-index"], help=update_index_fn.__doc__
)
update_index.add_argument(
"mirror", type=arguments.mirror_name_or_url, help="destination mirror name, path, or URL"
"mirror", type=arguments.mirror_name_or_url, help="Destination mirror name, path, or URL"
)
update_index.add_argument(
"-k",
"--keys",
default=False,
action="store_true",
help="if provided, key index will be updated as well as package index",
help="If provided, key index will be updated as well as package index",
)
update_index.set_defaults(func=update_index_fn)
def _matching_specs(specs: List[Spec]) -> List[Spec]:
"""Disambiguate specs and return a list of matching specs"""
return [spack.cmd.disambiguate_spec(s, ev.active_environment(), installed=any) for s in specs]
def _matching_specs(specs, spec_file):
"""Return a list of matching specs read from either a spec file (JSON or YAML),
a query over the store or a query over the active environment.
"""
env = ev.active_environment()
hashes = env.all_hashes() if env else None
if spec_file:
return spack.store.specfile_matches(spec_file, hashes=hashes)
if specs:
constraints = spack.cmd.parse_specs(specs)
return spack.store.find(constraints, hashes=hashes)
if env:
return [concrete for _, concrete in env.concretized_specs()]
tty.die(
"build cache file creation requires at least one"
" installed package spec, an active environment,"
" or else a path to a json or yaml file containing a spec"
" to install"
)
def _concrete_spec_from_args(args):
spec_str, specfile_path = args.spec, args.spec_file
if not spec_str and not specfile_path:
tty.error("must provide either spec string or path to YAML or JSON specfile")
sys.exit(1)
if spec_str:
try:
constraints = spack.cmd.parse_specs(spec_str)
spec = spack.store.find(constraints)[0]
spec.concretize()
except SpecError as spec_error:
tty.error("Unable to concretize spec {0}".format(spec_str))
tty.debug(spec_error)
sys.exit(1)
return spec
return Spec.from_specfile(specfile_path)
def push_fn(args):
"""create a binary package and push it to a mirror"""
if args.spec_file:
tty.warn(
"The flag `--spec-file` is deprecated and will be removed in Spack 0.22. "
"Use positional arguments instead."
)
if args.specs or args.spec_file:
specs = _matching_specs(spack.cmd.parse_specs(args.specs or args.spec_file))
else:
specs = spack.cmd.require_active_env("buildcache push").all_specs()
mirror = arguments.mirror_name_or_url(args.mirror)
if args.allow_root:
tty.warn(
"The flag `--allow-root` is the default in Spack 0.21, will be removed in Spack 0.22"
)
url = mirror.push_url
specs = bindist.specs_to_be_packaged(
specs,
_matching_specs(args.specs, args.spec_file),
root="package" in args.things_to_install,
dependencies="dependencies" in args.things_to_install,
)
@@ -302,7 +323,6 @@ def push_fn(args):
tty.info(f"Selected {len(specs)} specs to push to {url}")
skipped = []
failed = []
# tty printing
color = clr.get_color_when()
@@ -318,6 +338,7 @@ def push_fn(args):
bindist.PushOptions(
force=args.force,
unsigned=args.unsigned,
allow_root=args.allow_root,
key=args.key,
regenerate_index=args.update_index,
),
@@ -333,17 +354,11 @@ def push_fn(args):
except bindist.NoOverwriteException:
skipped.append(format_spec(spec))
# Catch any other exception unless the fail fast option is set
except Exception as e:
if args.fail_fast or isinstance(e, (bindist.PickKeyException, bindist.NoKeyException)):
raise
failed.append((format_spec(spec), e))
if skipped:
if len(specs) == 1:
tty.info("The spec is already in the buildcache. Use --force to overwrite it.")
elif len(skipped) == len(specs):
tty.info("All specs are already in the buildcache. Use --force to overwrite them.")
tty.info("All specs are already in the buildcache. Use --force to overwite them.")
else:
tty.info(
"The following {} specs were skipped as they already exist in the buildcache:\n"
@@ -353,17 +368,6 @@ def push_fn(args):
)
)
if failed:
if len(failed) == 1:
raise failed[0][1]
raise spack.error.SpackError(
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
"\n".join(
elide_list([f" {spec}: {e.__class__.__name__}: {e}" for spec, e in failed], 5)
),
)
def install_fn(args):
"""install from a binary package"""
@@ -407,31 +411,32 @@ def keys_fn(args):
def preview_fn(args):
"""analyze an installed spec and reports whether executables and libraries are relocatable"""
tty.warn(
"`spack buildcache preview` is deprecated since `spack buildcache push --allow-root` is "
"now the default. This command will be removed in Spack 0.22"
)
"""analyze an installed spec and reports whether executables
and libraries are relocatable
"""
constraints = spack.cmd.parse_specs(args.specs)
specs = spack.store.find(constraints, multiple=True)
# Cycle over the specs that match
for spec in specs:
print("Relocatable nodes")
print("--------------------------------")
print(spec.tree(status_fn=spack.relocate.is_relocatable))
def check_fn(args):
"""check specs against remote binary mirror(s) to see if any need to be rebuilt
this command uses the process exit code to indicate its result, specifically, if the
exit code is non-zero, then at least one of the indicated specs needs to be rebuilt
"""Check specs (either a single spec from --spec, or else the full set
of release specs) against remote binary mirror(s) to see if any need
to be rebuilt. This command uses the process exit code to indicate
its result, specifically, if the exit code is non-zero, then at least
one of the indicated specs needs to be rebuilt.
"""
if args.spec_file:
tty.warn(
"The flag `--spec-file` is deprecated and will be removed in Spack 0.22. "
"Use --spec instead."
)
specs = spack.cmd.parse_specs(args.spec or args.spec_file)
if specs:
specs = _matching_specs(specs, specs)
if args.spec or args.spec_file:
specs = [_concrete_spec_from_args(args)]
else:
specs = spack.cmd.require_active_env("buildcache check").all_specs()
env = spack.cmd.require_active_env(cmd_name="buildcache")
env.concretize()
specs = env.all_specs()
if not specs:
tty.msg("No specs provided, exiting.")
@@ -455,61 +460,63 @@ def check_fn(args):
def download_fn(args):
"""download buildcache entry from a remote mirror to local folder
"""Download buildcache entry from a remote mirror to local folder. This
command uses the process exit code to indicate its result, specifically,
a non-zero exit code indicates that the command failed to download at
least one of the required buildcache components."""
if not args.spec and not args.spec_file:
tty.msg("No specs provided, exiting.")
return
this command uses the process exit code to indicate its result, specifically, a non-zero exit
code indicates that the command failed to download at least one of the required buildcache
components
"""
if args.spec_file:
tty.warn(
"The flag `--spec-file` is deprecated and will be removed in Spack 0.22. "
"Use --spec instead."
)
if not args.path:
tty.msg("No download path provided, exiting")
return
specs = _matching_specs(spack.cmd.parse_specs(args.spec or args.spec_file))
spec = _concrete_spec_from_args(args)
result = bindist.download_single_spec(spec, args.path)
if len(specs) != 1:
tty.die("a single spec argument is required to download from a buildcache")
if not bindist.download_single_spec(specs[0], args.path):
if not result:
sys.exit(1)
def get_buildcache_name_fn(args):
"""get name (prefix) of buildcache entries for this spec"""
tty.warn("This command is deprecated and will be removed in Spack 0.22.")
specs = _matching_specs(spack.cmd.parse_specs(args.spec or args.spec_file))
if len(specs) != 1:
tty.die("a single spec argument is required to get buildcache name")
print(bindist.tarball_name(specs[0], ""))
"""Get name (prefix) of buildcache entries for this spec"""
spec = _concrete_spec_from_args(args)
buildcache_name = bindist.tarball_name(spec, "")
print("{0}".format(buildcache_name))
def save_specfile_fn(args):
"""get full spec for dependencies and write them to files in the specified output directory
uses exit code to signal success or failure. an exit code of zero means the command was likely
successful. if any errors or exceptions are encountered, or if expected command-line arguments
are not provided, then the exit code will be non-zero
"""Get full spec for dependencies, relative to root spec, and write them
to files in the specified output directory. Uses exit code to signal
success or failure. An exit code of zero means the command was likely
successful. If any errors or exceptions are encountered, or if expected
command-line arguments are not provided, then the exit code will be
non-zero.
"""
if not args.root_spec and not args.root_specfile:
tty.msg("No root spec provided, exiting.")
sys.exit(1)
if not args.specs:
tty.msg("No dependent specs provided, exiting.")
sys.exit(1)
if not args.specfile_dir:
tty.msg("No yaml directory provided, exiting.")
sys.exit(1)
if args.root_specfile:
tty.warn(
"The flag `--root-specfile` is deprecated and will be removed in Spack 0.22. "
"Use --root-spec instead."
)
specs = spack.cmd.parse_specs(args.root_spec or args.root_specfile)
if len(specs) != 1:
tty.die("a single spec argument is required to save specfile")
root = specs[0]
if not root.concrete:
root.concretize()
with open(args.root_specfile) as fd:
root_spec_as_json = fd.read()
spec_format = "yaml" if args.root_specfile.endswith("yaml") else "json"
else:
root_spec = Spec(args.root_spec)
root_spec.concretize()
root_spec_as_json = root_spec.to_json(hash=ht.dag_hash)
spec_format = "json"
save_dependency_specfiles(
root, args.specfile_dir, dependencies=spack.cmd.parse_specs(args.specs)
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format
)
@@ -527,7 +534,7 @@ def copy_buildcache_file(src_url, dest_url, local_path=None):
temp_stage.create()
temp_stage.fetch()
web_util.push_to_url(local_path, dest_url, keep_original=True)
except spack.error.FetchError as e:
except web_util.FetchError as e:
# Expected, since we have to try all the possible extensions
tty.debug("no such file: {0}".format(src_url))
tty.debug(e)
@@ -539,17 +546,17 @@ def copy_buildcache_file(src_url, dest_url, local_path=None):
def sync_fn(args):
"""sync binaries (and associated metadata) from one mirror to another
"""Syncs binaries (and associated metadata) from one mirror to another.
Requires an active environment in order to know which specs to sync.
requires an active environment in order to know which specs to sync
Args:
src (str): Source mirror URL
dest (str): Destination mirror URL
"""
if args.manifest_glob:
manifest_copy(glob.glob(args.manifest_glob))
return 0
if args.src_mirror is None or args.dest_mirror is None:
tty.die("Provide mirrors to sync from and to.")
src_mirror = args.src_mirror
dest_mirror = args.dest_mirror
@@ -629,7 +636,7 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
def update_index_fn(args):
"""update a buildcache index"""
"""Update a buildcache index."""
update_index(args.mirror, update_keys=args.keys)

View File

@@ -3,22 +3,21 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import re
import sys
import llnl.util.lang
from llnl.util import tty
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.repo
import spack.spec
import spack.stage
import spack.util.crypto
from spack.cmd.common import arguments
from spack.package_base import PackageBase, deprecated_version, preferred_version
from spack.package_base import deprecated_version, preferred_version
from spack.util.editor import editor
from spack.util.format import get_version_lines
from spack.util.naming import valid_fully_qualified_module_name
from spack.version import Version
@@ -34,39 +33,36 @@ def setup_parser(subparser):
default=False,
help="don't clean up staging area when command completes",
)
subparser.add_argument(
sp = subparser.add_mutually_exclusive_group()
sp.add_argument(
"-b",
"--batch",
action="store_true",
default=False,
help="don't ask which versions to checksum",
)
subparser.add_argument(
sp.add_argument(
"-l",
"--latest",
action="store_true",
default=False,
help="checksum the latest available version",
help="checksum the latest available version only",
)
subparser.add_argument(
sp.add_argument(
"-p",
"--preferred",
action="store_true",
default=False,
help="checksum the known Spack preferred version",
help="checksum the preferred version only",
)
modes_parser = subparser.add_mutually_exclusive_group()
modes_parser.add_argument(
subparser.add_argument(
"-a",
"--add-to-package",
action="store_true",
default=False,
help="add new versions to package",
)
modes_parser.add_argument(
"--verify", action="store_true", default=False, help="verify known package checksums"
)
arguments.add_common_arguments(subparser, ["package", "jobs"])
arguments.add_common_arguments(subparser, ["package"])
subparser.add_argument(
"versions", nargs=argparse.REMAINDER, help="versions to generate checksums for"
)
@@ -83,174 +79,89 @@ def checksum(parser, args):
tty.die("`spack checksum` accepts package names, not URLs.")
# Get the package we're going to generate checksums for
pkg_cls = spack.repo.PATH.get_pkg_class(args.package)
pkg_cls = spack.repo.path.get_pkg_class(args.package)
pkg = pkg_cls(spack.spec.Spec(args.package))
# Build a list of versions to checksum
versions = [Version(v) for v in args.versions]
# Define placeholder for remote versions.
# This'll help reduce redundant work if we need to check for the existance
# of remote versions more than once.
remote_versions = None
# Add latest version if requested
if args.latest:
remote_versions = pkg.fetch_remote_versions(args.jobs)
if len(remote_versions) > 0:
latest_version = sorted(remote_versions.keys(), reverse=True)[0]
versions.append(latest_version)
# Add preferred version if requested
if args.preferred:
versions.append(preferred_version(pkg))
# Store a dict of the form version -> URL
url_dict = {}
if not args.versions and args.preferred:
versions = [preferred_version(pkg)]
else:
versions = [Version(v) for v in args.versions]
for version in versions:
if deprecated_version(pkg, version):
tty.warn(f"Version {version} is deprecated")
if versions:
remote_versions = None
for version in versions:
if deprecated_version(pkg, version):
tty.warn("Version {0} is deprecated".format(version))
url = pkg.find_valid_url_for_version(version)
if url is not None:
url_dict[version] = url
continue
# if we get here, it's because no valid url was provided by the package
# do expensive fallback to try to recover
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(args.jobs)
if version in remote_versions:
url_dict[version] = remote_versions[version]
if len(versions) <= 0:
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(args.jobs)
url_dict = remote_versions
url = pkg.find_valid_url_for_version(version)
if url is not None:
url_dict[version] = url
continue
# if we get here, it's because no valid url was provided by the package
# do expensive fallback to try to recover
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions()
if version in remote_versions:
url_dict[version] = remote_versions[version]
else:
url_dict = pkg.fetch_remote_versions()
if not url_dict:
tty.die(f"Could not find any remote versions for {pkg.name}")
tty.die("Could not find any remote versions for {0}".format(pkg.name))
# print an empty line to create a new output section block
print()
version_hashes = spack.stage.get_checksums_for_versions(
version_lines = spack.stage.get_checksums_for_versions(
url_dict,
pkg.name,
keep_stage=args.keep_stage,
batch=(args.batch or len(versions) > 0 or len(url_dict) == 1),
batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
latest=args.latest,
fetch_options=pkg.fetch_options,
)
if args.verify:
print_checksum_status(pkg, version_hashes)
sys.exit(0)
# convert dict into package.py version statements
version_lines = get_version_lines(version_hashes, url_dict)
print()
print(version_lines)
print()
if args.add_to_package:
add_versions_to_package(pkg, version_lines)
filename = spack.repo.path.filename_for_package_name(pkg.name)
# Make sure we also have a newline after the last version
versions = [v + "\n" for v in version_lines.splitlines()]
versions.append("\n")
# We need to insert the versions in reversed order
versions.reverse()
versions.append(" # FIXME: Added by `spack checksum`\n")
version_line = None
with open(filename, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
# Black is drunk, so this is what it looks like for now
# See https://github.com/psf/black/issues/2156 for more information
if lines[i].startswith(" # FIXME: Added by `spack checksum`") or lines[
i
].startswith(" version("):
version_line = i
break
def print_checksum_status(pkg: PackageBase, version_hashes: dict):
"""
Verify checksums present in version_hashes against those present
in the package's instructions.
if version_line is not None:
for v in versions:
lines.insert(version_line, v)
Args:
pkg (spack.package_base.PackageBase): A package class for a given package in Spack.
version_hashes (dict): A dictionary of the form: version -> checksum.
with open(filename, "w") as f:
f.writelines(lines)
"""
results = []
num_verified = 0
failed = False
msg = "opening editor to verify"
max_len = max(len(str(v)) for v in version_hashes)
num_total = len(version_hashes)
if not sys.stdout.isatty():
msg = "please verify"
for version, sha in version_hashes.items():
if version not in pkg.versions:
msg = "No previous checksum"
status = "-"
elif sha == pkg.versions[version]["sha256"]:
msg = "Correct"
status = "="
num_verified += 1
tty.info(
"Added {0} new versions to {1}, "
"{2}.".format(len(versions) - 2, args.package, msg)
)
if sys.stdout.isatty():
editor(filename)
else:
msg = sha
status = "x"
failed = True
results.append("{0:{1}} {2} {3}".format(str(version), max_len, f"[{status}]", msg))
# Display table of checksum results.
tty.msg(f"Verified {num_verified} of {num_total}", "", *llnl.util.lang.elide_list(results), "")
# Terminate at the end of function to prevent additional output.
if failed:
print()
tty.die("Invalid checksums found.")
def add_versions_to_package(pkg: PackageBase, version_lines: str):
"""
Add checksumed versions to a package's instructions and open a user's
editor so they may double check the work of the function.
Args:
pkg (spack.package_base.PackageBase): A package class for a given package in Spack.
version_lines (str): A string of rendered version lines.
"""
# Get filename and path for package
filename = spack.repo.PATH.filename_for_package_name(pkg.name)
num_versions_added = 0
version_statement_re = re.compile(r"([\t ]+version\([^\)]*\))")
version_re = re.compile(r'[\t ]+version\(\s*"([^"]+)"[^\)]*\)')
# Split rendered version lines into tuple of (version, version_line)
# We reverse sort here to make sure the versions match the version_lines
new_versions = []
for ver_line in version_lines.split("\n"):
match = version_re.match(ver_line)
if match:
new_versions.append((Version(match.group(1)), ver_line))
with open(filename, "r+") as f:
contents = f.read()
split_contents = version_statement_re.split(contents)
for i, subsection in enumerate(split_contents):
# If there are no more versions to add we should exit
if len(new_versions) <= 0:
break
# Check if the section contains a version
contents_version = version_re.match(subsection)
if contents_version is not None:
parsed_version = Version(contents_version.group(1))
if parsed_version < new_versions[0][0]:
split_contents[i:i] = [new_versions.pop(0)[1], " # FIXME", "\n"]
num_versions_added += 1
elif parsed_version == new_versions[0][0]:
new_versions.pop(0)
# Seek back to the start of the file so we can rewrite the file contents.
f.seek(0)
f.writelines("".join(split_contents))
tty.msg(f"Added {num_versions_added} new versions to {pkg.name}")
tty.msg(f"Open {filename} to review the additions.")
if sys.stdout.isatty():
editor(filename)
tty.warn("Could not add new versions to {0}.".format(args.package))

View File

@@ -18,8 +18,6 @@
import spack.environment as ev
import spack.hash_types as ht
import spack.mirror
import spack.util.gpg as gpg_util
import spack.util.timer as timer
import spack.util.url as url_util
import spack.util.web as web_util
@@ -49,36 +47,40 @@ def setup_parser(subparser):
generate.add_argument(
"--output-file",
default=None,
help="pathname for the generated gitlab ci yaml file\n\n"
"path to the file where generated jobs file should be written. "
"default is .gitlab-ci.yml in the root of the repository",
help="""pathname for the generated gitlab ci yaml file
Path to the file where generated jobs file should
be written. Default is .gitlab-ci.yml in the root of
the repository.""",
)
generate.add_argument(
"--copy-to",
default=None,
help="path to additional directory for job files\n\n"
"this option provides an absolute path to a directory where the generated "
"jobs yaml file should be copied. default is not to copy",
help="""path to additional directory for job files
This option provides an absolute path to a directory
where the generated jobs yaml file should be copied.
Default is not to copy.""",
)
generate.add_argument(
"--optimize",
action="store_true",
default=False,
help="(experimental) optimize the gitlab yaml file for size\n\n"
"run the generated document through a series of optimization passes "
"designed to reduce the size of the generated file",
help="""(Experimental) optimize the gitlab yaml file for size
Run the generated document through a series of
optimization passes designed to reduce the size
of the generated file.""",
)
generate.add_argument(
"--dependencies",
action="store_true",
default=False,
help="(experimental) disable DAG scheduling (use 'plain' dependencies)",
help="(Experimental) disable DAG scheduling; use " ' "plain" dependencies.',
)
generate.add_argument(
"--buildcache-destination",
default=None,
help="override the mirror configured in the environment\n\n"
"allows for pushing binaries from the generated pipeline to a different location",
help="Override the mirror configured in the environment (spack.yaml) "
+ "in order to push binaries from the generated pipeline to a "
+ "different location.",
)
prune_group = generate.add_mutually_exclusive_group()
prune_group.add_argument(
@@ -86,37 +88,45 @@ def setup_parser(subparser):
action="store_true",
dest="prune_dag",
default=True,
help="skip up-to-date specs\n\n"
"do not generate jobs for specs that are up-to-date on the mirror",
help="""skip up-to-date specs
Do not generate jobs for specs that are up-to-date
on the mirror.""",
)
prune_group.add_argument(
"--no-prune-dag",
action="store_false",
dest="prune_dag",
default=True,
help="process up-to-date specs\n\n"
"generate jobs for specs even when they are up-to-date on the mirror",
help="""process up-to-date specs
Generate jobs for specs even when they are up-to-date
on the mirror.""",
)
generate.add_argument(
"--check-index-only",
action="store_true",
dest="index_only",
default=False,
help="only check spec state from buildcache indices\n\n"
"Spack always checks specs against configured binary mirrors, regardless of the DAG "
"pruning option. if enabled, Spack will assume all remote buildcache indices are "
"up-to-date when assessing whether the spec on the mirror, if present, is up-to-date. "
"this has the benefit of reducing pipeline generation time but at the potential cost of "
"needlessly rebuilding specs when the indices are outdated. if not enabled, Spack will "
"fetch remote spec files directly to assess whether the spec on the mirror is up-to-date",
help="""only check spec state from buildcache indices
Spack always checks specs against configured binary
mirrors, regardless of the DAG pruning option.
If enabled, Spack will assume all remote buildcache
indices are up-to-date when assessing whether the spec
on the mirror, if present, is up-to-date. This has the
benefit of reducing pipeline generation time but at the
potential cost of needlessly rebuilding specs when the
indices are outdated.
If not enabled, Spack will fetch remote spec files
directly to assess whether the spec on the mirror is
up-to-date.""",
)
generate.add_argument(
"--artifacts-root",
default=None,
help="path to the root of the artifacts directory\n\n"
"if provided, concrete environment files (spack.yaml, spack.lock) will be generated under "
"this directory. their location will be passed to generated child jobs through the "
"SPACK_CONCRETE_ENVIRONMENT_PATH variable",
help="""path to the root of the artifacts directory
If provided, concrete environment files (spack.yaml,
spack.lock) will be generated under this directory.
Their location will be passed to generated child jobs
through the SPACK_CONCRETE_ENVIRONMENT_PATH variable.""",
)
generate.set_defaults(func=ci_generate)
@@ -140,13 +150,13 @@ def setup_parser(subparser):
"--tests",
action="store_true",
default=False,
help="run stand-alone tests after the build",
help="""run stand-alone tests after the build""",
)
rebuild.add_argument(
"--fail-fast",
action="store_true",
default=False,
help="stop stand-alone tests after the first failure",
help="""stop stand-alone tests after the first failure""",
)
rebuild.set_defaults(func=ci_rebuild)
@@ -156,39 +166,23 @@ def setup_parser(subparser):
description=deindent(ci_reproduce.__doc__),
help=spack.cmd.first_line(ci_reproduce.__doc__),
)
reproduce.add_argument("job_url", help="URL of job artifacts bundle")
reproduce.add_argument(
"--runtime",
help="Container runtime to use.",
default="docker",
choices=["docker", "podman"],
)
reproduce.add_argument("job_url", help="Url of job artifacts bundle")
reproduce.add_argument(
"--working-dir",
help="where to unpack artifacts",
help="Where to unpack artifacts",
default=os.path.join(os.getcwd(), "ci_reproduction"),
)
reproduce.add_argument(
"-s", "--autostart", help="Run docker reproducer automatically", action="store_true"
)
gpg_group = reproduce.add_mutually_exclusive_group(required=False)
gpg_group.add_argument(
"--gpg-file", help="Path to public GPG key for validating binary cache installs"
)
gpg_group.add_argument(
"--gpg-url", help="URL to public GPG key for validating binary cache installs"
)
reproduce.set_defaults(func=ci_reproduce)
def ci_generate(args):
"""generate jobs file from a CI-aware spack file
"""Generate jobs file from a CI-aware spack file.
if you want to report the results on CDash, you will need to set the SPACK_CDASH_AUTH_TOKEN
before invoking this command. the value must be the CDash authorization token needed to create
a build group and register all generated jobs under it
"""
If you want to report the results on CDash, you will need to set
the SPACK_CDASH_AUTH_TOKEN before invoking this command. The
value must be the CDash authorization token needed to create a
build group and register all generated jobs under it."""
env = spack.cmd.require_active_env(cmd_name="ci generate")
output_file = args.output_file
@@ -229,11 +223,10 @@ def ci_generate(args):
def ci_reindex(args):
"""rebuild the buildcache index for the remote mirror
"""Rebuild the buildcache index for the remote mirror.
use the active, gitlab-enabled environment to rebuild the buildcache index for the associated
mirror
"""
Use the active, gitlab-enabled environment to rebuild the buildcache
index for the associated mirror."""
env = spack.cmd.require_active_env(cmd_name="ci rebuild-index")
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
@@ -249,13 +242,10 @@ def ci_reindex(args):
def ci_rebuild(args):
"""rebuild a spec if it is not on the remote mirror
check a single spec against the remote mirror, and rebuild it from source if the mirror does
not contain the hash
"""
rebuild_timer = timer.Timer()
"""Rebuild a spec if it is not on the remote mirror.
Check a single spec against the remote mirror, and rebuild it from
source if the mirror does not contain the hash."""
env = spack.cmd.require_active_env(cmd_name="ci rebuild")
# Make sure the environment is "gitlab-enabled", or else there's nothing
@@ -284,23 +274,13 @@ def ci_rebuild(args):
signing_key = os.environ.get("SPACK_SIGNING_KEY")
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
compiler_action = os.environ.get("SPACK_COMPILER_ACTION")
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
require_signing = os.environ.get("SPACK_REQUIRE_SIGNING")
# If signing key was provided via "SPACK_SIGNING_KEY", then try to import it.
if signing_key:
spack_ci.import_signing_key(signing_key)
# Fail early if signing is required but we don't have a signing key
sign_binaries = require_signing is not None and require_signing.lower() == "true"
if sign_binaries and not spack_ci.can_sign_binaries():
gpg_util.list(False, True)
tty.die("SPACK_REQUIRE_SIGNING=True => spack must have exactly one signing key")
# Construct absolute paths relative to current $CI_PROJECT_DIR
ci_project_dir = os.environ.get("CI_PROJECT_DIR")
@@ -315,6 +295,7 @@ def ci_rebuild(args):
tty.debug("pipeline_artifacts_dir = {0}".format(pipeline_artifacts_dir))
tty.debug("remote_mirror_url = {0}".format(remote_mirror_url))
tty.debug("job_spec_pkg_name = {0}".format(job_spec_pkg_name))
tty.debug("compiler_action = {0}".format(compiler_action))
# Query the environment manifest to find out whether we're reporting to a
# CDash instance, and if so, gather some information from the manifest to
@@ -425,6 +406,19 @@ def ci_rebuild(args):
dst_file = os.path.join(repro_dir, file_name)
shutil.copyfile(src_file, dst_file)
# If signing key was provided via "SPACK_SIGNING_KEY", then try to
# import it.
if signing_key:
spack_ci.import_signing_key(signing_key)
# Depending on the specifics of this job, we might need to turn on the
# "config:install_missing compilers" option (to build this job spec
# with a bootstrapped compiler), or possibly run "spack compiler find"
# (to build a bootstrap compiler or one of its deps in a
# compiler-agnostic way), or maybe do nothing at all (to build a spec
# using a compiler already installed on the target system).
spack_ci.configure_compilers(compiler_action)
# Write this job's spec json into the reproduction directory, and it will
# also be used in the generated "spack install" command to install the spec
tty.debug("job concrete spec path: {0}".format(job_spec_json_path))
@@ -622,7 +616,7 @@ def ci_rebuild(args):
)
reports_dir = fs.join_path(os.getcwd(), "cdash_report")
if args.tests and broken_tests:
tty.warn("Unable to run stand-alone tests since listed in ci's 'broken-tests-packages'")
tty.warn("Unable to run stand-alone tests since listed in " "ci's 'broken-tests-packages'")
if cdash_handler:
msg = "Package is listed in ci's broken-tests-packages"
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
@@ -665,7 +659,7 @@ def ci_rebuild(args):
tty.warn("No recognized test results reporting option")
else:
tty.warn("Unable to run stand-alone tests due to unsuccessful installation")
tty.warn("Unable to run stand-alone tests due to unsuccessful " "installation")
if cdash_handler:
msg = "Failed to install the package"
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
@@ -681,7 +675,7 @@ def ci_rebuild(args):
input_spec=job_spec,
buildcache_mirror_url=buildcache_mirror_url,
pipeline_mirror_url=pipeline_mirror_url,
sign_binaries=spack_ci.can_sign_binaries(),
pr_pipeline=spack_is_pr_pipeline,
):
msg = tty.msg if result.success else tty.warn
msg(
@@ -725,7 +719,7 @@ def ci_rebuild(args):
\033[34mTo reproduce this build locally, run:
spack ci reproduce-build {0} [--working-dir <dir>] [--autostart]
spack ci reproduce-build {0} [--working-dir <dir>]
If this project does not have public pipelines, you will need to first:
@@ -739,38 +733,19 @@ def ci_rebuild(args):
print(reproduce_msg)
rebuild_timer.stop()
try:
with open("install_timers.json", "w") as timelog:
extra_attributes = {"name": ".ci-rebuild"}
rebuild_timer.write_json(timelog, extra_attributes=extra_attributes)
except Exception as e:
tty.debug(str(e))
# Tie job success/failure to the success/failure of building the spec
return install_exit_code
def ci_reproduce(args):
"""generate instructions for reproducing the spec rebuild job
"""Generate instructions for reproducing the spec rebuild job.
artifacts of the provided gitlab pipeline rebuild job's URL will be used to derive
instructions for reproducing the build locally
"""
Artifacts of the provided gitlab pipeline rebuild job's URL will be
used to derive instructions for reproducing the build locally."""
job_url = args.job_url
work_dir = args.working_dir
autostart = args.autostart
runtime = args.runtime
# Allow passing GPG key for reprocuding protected CI jobs
if args.gpg_file:
gpg_key_url = url_util.path_to_file_url(args.gpg_file)
elif args.gpg_url:
gpg_key_url = args.gpg_url
else:
gpg_key_url = None
return spack_ci.reproduce_ci_job(job_url, work_dir, autostart, gpg_key_url, runtime)
return spack_ci.reproduce_ci_job(job_url, work_dir)
def ci(parser, args):

View File

@@ -17,7 +17,6 @@
import spack.config
import spack.repo
import spack.stage
import spack.store
import spack.util.path
from spack.paths import lib_path, var_path
@@ -115,18 +114,22 @@ def clean(parser, args):
if args.stage:
tty.msg("Removing all temporary build stages")
spack.stage.purge()
# Temp directory where buildcaches are extracted
extract_tmp = os.path.join(spack.store.layout.root, ".tmp")
if os.path.exists(extract_tmp):
tty.debug("Removing {0}".format(extract_tmp))
shutil.rmtree(extract_tmp)
if args.downloads:
tty.msg("Removing cached downloads")
spack.caches.FETCH_CACHE.destroy()
spack.caches.fetch_cache.destroy()
if args.failures:
tty.msg("Removing install failure marks")
spack.store.STORE.failure_tracker.clear_all()
spack.installer.clear_failures()
if args.misc_cache:
tty.msg("Removing cached information on repositories")
spack.caches.MISC_CACHE.destroy()
spack.caches.misc_cache.destroy()
if args.python_cache:
tty.msg("Removing python cache files")

View File

@@ -48,7 +48,7 @@ def get_origin_info(remote):
)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn("No git repository found; using default upstream URL: %s" % origin_url)
tty.warn("No git repository found; " "using default upstream URL: %s" % origin_url)
return (origin_url.strip(), branch.strip())
@@ -69,7 +69,7 @@ def clone(parser, args):
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die(
"There are already files there! Delete these files before boostrapping spack.",
"There are already files there! " "Delete these files before boostrapping spack.",
*files_in_the_way,
)

View File

@@ -3,17 +3,17 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import copy
import os
import re
import sys
from argparse import ArgumentParser, Namespace
from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import ArgparseRstWriter, ArgparseWriter, Command
from llnl.util.argparsewriter import ArgparseCompletionWriter, ArgparseRstWriter, ArgparseWriter
from llnl.util.tty.colify import colify
import spack.cmd
@@ -27,46 +27,28 @@
#: list of command formatters
formatters: Dict[str, Callable[[Namespace, IO], None]] = {}
formatters = {}
#: standard arguments for updating completion scripts
#: we iterate through these when called with --update-completion
update_completion_args: Dict[str, Dict[str, Any]] = {
update_completion_args = {
"bash": {
"aliases": True,
"format": "bash",
"header": os.path.join(spack.paths.share_path, "bash", "spack-completion.bash"),
"header": os.path.join(spack.paths.share_path, "bash", "spack-completion.in"),
"update": os.path.join(spack.paths.share_path, "spack-completion.bash"),
},
"fish": {
"aliases": True,
"format": "fish",
"header": os.path.join(spack.paths.share_path, "fish", "spack-completion.fish"),
"update": os.path.join(spack.paths.share_path, "spack-completion.fish"),
},
}
}
def formatter(func: Callable[[Namespace, IO], None]) -> Callable[[Namespace, IO], None]:
"""Decorator used to register formatters.
Args:
func: Formatting function.
Returns:
The same function.
"""
def formatter(func):
"""Decorator used to register formatters"""
formatters[func.__name__] = func
return func
def setup_parser(subparser: ArgumentParser) -> None:
"""Set up the argument parser.
Args:
subparser: Preliminary argument parser.
"""
def setup_parser(subparser):
subparser.add_argument(
"--update-completion",
action="store_true",
@@ -109,34 +91,18 @@ class SpackArgparseRstWriter(ArgparseRstWriter):
def __init__(
self,
prog: str,
out: IO = sys.stdout,
aliases: bool = False,
documented_commands: Set[str] = set(),
rst_levels: Sequence[str] = ["-", "-", "^", "~", ":", "`"],
prog,
out=None,
aliases=False,
documented_commands=[],
rst_levels=["-", "-", "^", "~", ":", "`"],
):
"""Initialize a new SpackArgparseRstWriter instance.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
documented_commands: Set of commands with additional documentation.
rst_levels: List of characters for rst section headings.
"""
super().__init__(prog, out, aliases, rst_levels)
out = sys.stdout if out is None else out
super(SpackArgparseRstWriter, self).__init__(prog, out, aliases, rst_levels)
self.documented = documented_commands
def usage(self, usage: str) -> str:
"""Example usage of a command.
Args:
usage: Command usage.
Returns:
Usage of a command.
"""
string = super().usage(usage)
def usage(self, *args):
string = super(SpackArgparseRstWriter, self).usage(*args)
cmd = self.parser.prog.replace(" ", "-")
if cmd in self.documented:
@@ -146,21 +112,11 @@ def usage(self, usage: str) -> str:
class SubcommandWriter(ArgparseWriter):
"""Write argparse output as a list of subcommands."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
def format(self, cmd):
return " " * self.level + cmd.prog + "\n"
_positional_to_subroutine: Dict[str, str] = {
_positional_to_subroutine = {
"package": "_all_packages",
"spec": "_all_packages",
"filter": "_all_packages",
@@ -179,76 +135,10 @@ def format(self, cmd: Command) -> str:
}
class BashCompletionWriter(ArgparseWriter):
class BashCompletionWriter(ArgparseCompletionWriter):
"""Write argparse output as bash programmable tab completion."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
assert cmd.optionals # we should always at least have -h, --help
assert not (cmd.positionals and cmd.subcommands) # one or the other
# We only care about the arguments/flags, not the help messages
positionals: Tuple[str, ...] = ()
if cmd.positionals:
positionals, _, _, _ = zip(*cmd.positionals)
optionals, _, _, _, _ = zip(*cmd.optionals)
subcommands: Tuple[str, ...] = ()
if cmd.subcommands:
_, subcommands, _ = zip(*cmd.subcommands)
# Flatten lists of lists
optionals = [x for xx in optionals for x in xx]
return (
self.start_function(cmd.prog)
+ self.body(positionals, optionals, subcommands)
+ self.end_function(cmd.prog)
)
def start_function(self, prog: str) -> str:
"""Return the syntax needed to begin a function definition.
Args:
prog: Program name.
Returns:
Function definition beginning.
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
def end_function(self, prog: str) -> str:
"""Return the syntax needed to end a function definition.
Args:
prog: Program name
Returns:
Function definition ending.
"""
return "}\n"
def body(
self, positionals: Sequence[str], optionals: Sequence[str], subcommands: Sequence[str]
) -> str:
"""Return the body of the function.
Args:
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
Returns:
Function body.
"""
def body(self, positionals, optionals, subcommands):
if positionals:
return """
if $list_options
@@ -278,15 +168,7 @@ def body(
self.optionals(optionals)
)
def positionals(self, positionals: Sequence[str]) -> str:
"""Return the syntax for reporting positional arguments.
Args:
positionals: List of positional arguments.
Returns:
Syntax for positional arguments.
"""
def positionals(self, positionals):
# If match found, return function name
for positional in positionals:
for key, value in _positional_to_subroutine.items():
@@ -296,439 +178,22 @@ def positionals(self, positionals: Sequence[str]) -> str:
# If no matches found, return empty list
return 'SPACK_COMPREPLY=""'
def optionals(self, optionals: Sequence[str]) -> str:
"""Return the syntax for reporting optional flags.
Args:
optionals: List of optional arguments.
Returns:
Syntax for optional flags.
"""
def optionals(self, optionals):
return 'SPACK_COMPREPLY="{0}"'.format(" ".join(optionals))
def subcommands(self, subcommands: Sequence[str]) -> str:
"""Return the syntax for reporting subcommands.
Args:
subcommands: List of subcommand parsers.
Returns:
Syntax for subcommand parsers
"""
def subcommands(self, subcommands):
return 'SPACK_COMPREPLY="{0}"'.format(" ".join(subcommands))
# Map argument destination names to their complete commands
# Earlier items in the list have higher precedence
_dest_to_fish_complete = {
("activate", "view"): "-f -a '(__fish_complete_directories)'",
("bootstrap root", "path"): "-f -a '(__fish_complete_directories)'",
("mirror add", "mirror"): "-f",
("repo add", "path"): "-f -a '(__fish_complete_directories)'",
("test find", "filter"): "-f -a '(__fish_spack_tests)'",
("bootstrap", "name"): "-f -a '(__fish_spack_bootstrap_names)'",
("buildcache create", "key"): "-f -a '(__fish_spack_gpg_keys)'",
("build-env", r"spec \[--\].*"): "-f -a '(__fish_spack_build_env_spec)'",
("checksum", "package"): "-f -a '(__fish_spack_packages)'",
(
"checksum",
"versions",
): "-f -a '(__fish_spack_package_versions $__fish_spack_argparse_argv[1])'",
("config", "path"): "-f -a '(__fish_spack_colon_path)'",
("config", "section"): "-f -a '(__fish_spack_config_sections)'",
("develop", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("diff", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("gpg sign", "output"): "-f -a '(__fish_complete_directories)'",
("gpg", "keys?"): "-f -a '(__fish_spack_gpg_keys)'",
("graph", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("help", "help_command"): "-f -a '(__fish_spack_commands)'",
("list", "filter"): "-f -a '(__fish_spack_packages)'",
("mirror", "mirror"): "-f -a '(__fish_spack_mirrors)'",
("pkg", "package"): "-f -a '(__fish_spack_pkg_packages)'",
("remove", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("repo", "namespace_or_path"): "$__fish_spack_force_files -a '(__fish_spack_repos)'",
("restage", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("rm", "specs?"): "-f -a '(__fish_spack_installed_specs)'",
("solve", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("spec", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("stage", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("test-env", r"spec \[--\].*"): "-f -a '(__fish_spack_build_env_spec)'",
("test", r"\[?name.*"): "-f -a '(__fish_spack_tests)'",
("undevelop", "specs?"): "-f -k -a '(__fish_spack_specs_or_id)'",
("verify", "specs_or_files"): "$__fish_spack_force_files -a '(__fish_spack_installed_specs)'",
("view", "path"): "-f -a '(__fish_complete_directories)'",
("", "comment"): "-f",
("", "compiler_spec"): "-f -a '(__fish_spack_installed_compilers)'",
("", "config_scopes"): "-f -a '(__fish_complete_directories)'",
("", "extendable"): "-f -a '(__fish_spack_extensions)'",
("", "installed_specs?"): "-f -a '(__fish_spack_installed_specs)'",
("", "job_url"): "-f",
("", "location_env"): "-f -a '(__fish_complete_directories)'",
("", "pytest_args"): "-f -a '(__fish_spack_unit_tests)'",
("", "package_or_file"): "$__fish_spack_force_files -a '(__fish_spack_packages)'",
("", "package_or_user"): "-f -a '(__fish_spack_packages)'",
("", "package"): "-f -a '(__fish_spack_packages)'",
("", "PKG"): "-f -a '(__fish_spack_packages)'",
("", "prefix"): "-f -a '(__fish_complete_directories)'",
("", r"rev\d?"): "-f -a '(__fish_spack_git_rev)'",
("", "specs?"): "-f -k -a '(__fish_spack_specs)'",
("", "tags?"): "-f -a '(__fish_spack_tags)'",
("", "virtual_package"): "-f -a '(__fish_spack_providers)'",
("", "working_dir"): "-f -a '(__fish_complete_directories)'",
("", r"(\w*_)?env"): "-f -a '(__fish_spack_environments)'",
("", r"(\w*_)?dir(ectory)?"): "-f -a '(__fish_spack_environments)'",
("", r"(\w*_)?mirror_name"): "-f -a '(__fish_spack_mirrors)'",
}
def _fish_dest_get_complete(prog: str, dest: str) -> Optional[str]:
"""Map from subcommand to autocompletion argument.
Args:
prog: Program name.
dest: Destination.
Returns:
Autocompletion argument.
"""
s = prog.split(None, 1)
subcmd = s[1] if len(s) == 2 else ""
for (prog_key, pos_key), value in _dest_to_fish_complete.items():
if subcmd.startswith(prog_key) and re.match("^" + pos_key + "$", dest):
return value
return None
class FishCompletionWriter(ArgparseWriter):
"""Write argparse output as bash programmable tab completion."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of a node.
"""
assert cmd.optionals # we should always at least have -h, --help
assert not (cmd.positionals and cmd.subcommands) # one or the other
# We also need help messages and how arguments are used
# So we pass everything to completion writer
positionals = cmd.positionals
optionals = cmd.optionals
subcommands = cmd.subcommands
return (
self.prog_comment(cmd.prog)
+ self.optspecs(cmd.prog, optionals)
+ self.complete(cmd.prog, positionals, optionals, subcommands)
)
def _quote(self, string: str) -> str:
"""Quote string and escape special characters if necessary.
Args:
string: Input string.
Returns:
Quoted string.
"""
# Goal here is to match fish_indent behavior
# Strings without spaces (or other special characters) do not need to be escaped
if not any([sub in string for sub in [" ", "'", '"']]):
return string
string = string.replace("'", r"\'")
return f"'{string}'"
def optspecs(
self,
prog: str,
optionals: List[Tuple[Sequence[str], List[str], str, Union[int, str, None], str]],
) -> str:
"""Read the optionals and return the command to set optspec.
Args:
prog: Program name.
optionals: List of optional arguments.
Returns:
Command to set optspec variable.
"""
# Variables of optspecs
optspec_var = "__fish_spack_optspecs_" + prog.replace(" ", "_").replace("-", "_")
if optionals is None:
return "set -g %s\n" % optspec_var
# Build optspec by iterating over options
args = []
for flags, dest, _, nargs, _ in optionals:
if len(flags) == 0:
continue
required = ""
# Because nargs '?' is treated differently in fish, we treat it as required.
# Because multi-argument options are not supported, we treat it like one argument.
required = "="
if nargs == 0:
required = ""
# Pair short options with long options
# We need to do this because fish doesn't support multiple short
# or long options.
# However, since we are paring options only, this is fine
short = [f[1:] for f in flags if f.startswith("-") and len(f) == 2]
long = [f[2:] for f in flags if f.startswith("--")]
while len(short) > 0 and len(long) > 0:
arg = "%s/%s%s" % (short.pop(), long.pop(), required)
while len(short) > 0:
arg = "%s/%s" % (short.pop(), required)
while len(long) > 0:
arg = "%s%s" % (long.pop(), required)
args.append(arg)
# Even if there is no option, we still set variable.
# In fish such variable is an empty array, we use it to
# indicate that such subcommand exists.
args = " ".join(args)
return "set -g %s %s\n" % (optspec_var, args)
@staticmethod
def complete_head(
prog: str, index: Optional[int] = None, nargs: Optional[Union[int, str]] = None
) -> str:
"""Return the head of the completion command.
Args:
prog: Program name.
index: Index of positional argument.
nargs: Number of arguments.
Returns:
Head of the completion command.
"""
# Split command and subcommand
s = prog.split(None, 1)
subcmd = s[1] if len(s) == 2 else ""
if index is None:
return "complete -c %s -n '__fish_spack_using_command %s'" % (s[0], subcmd)
elif nargs in [argparse.ZERO_OR_MORE, argparse.ONE_OR_MORE, argparse.REMAINDER]:
head = "complete -c %s -n '__fish_spack_using_command_pos_remainder %d %s'"
else:
head = "complete -c %s -n '__fish_spack_using_command_pos %d %s'"
return head % (s[0], index, subcmd)
def complete(
self,
prog: str,
positionals: List[Tuple[str, Optional[Iterable[Any]], Union[int, str, None], str]],
optionals: List[Tuple[Sequence[str], List[str], str, Union[int, str, None], str]],
subcommands: List[Tuple[ArgumentParser, str, str]],
) -> str:
"""Return all the completion commands.
Args:
prog: Program name.
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
Returns:
Completion command.
"""
commands = []
if positionals:
commands.append(self.positionals(prog, positionals))
if subcommands:
commands.append(self.subcommands(prog, subcommands))
if optionals:
commands.append(self.optionals(prog, optionals))
return "".join(commands)
def positionals(
self,
prog: str,
positionals: List[Tuple[str, Optional[Iterable[Any]], Union[int, str, None], str]],
) -> str:
"""Return the completion for positional arguments.
Args:
prog: Program name.
positionals: List of positional arguments.
Returns:
Completion command.
"""
commands = []
for idx, (args, choices, nargs, help) in enumerate(positionals):
# Make sure we always get same order of output
if isinstance(choices, dict):
choices = sorted(choices.keys())
elif isinstance(choices, (set, frozenset)):
choices = sorted(choices)
# Remove platform-specific choices to avoid hard-coding the platform.
if choices is not None:
valid_choices = []
for choice in choices:
if spack.platforms.host().name not in choice:
valid_choices.append(choice)
choices = valid_choices
head = self.complete_head(prog, idx, nargs)
if choices is not None:
# If there are choices, we provide a completion for all possible values.
commands.append(head + " -f -a %s" % self._quote(" ".join(choices)))
else:
# Otherwise, we try to find a predefined completion for it
value = _fish_dest_get_complete(prog, args)
if value is not None:
commands.append(head + " " + value)
return "\n".join(commands) + "\n"
def prog_comment(self, prog: str) -> str:
"""Return a comment line for the command.
Args:
prog: Program name.
Returns:
Comment line.
"""
return "\n# %s\n" % prog
def optionals(
self,
prog: str,
optionals: List[Tuple[Sequence[str], List[str], str, Union[int, str, None], str]],
) -> str:
"""Return the completion for optional arguments.
Args:
prog: Program name.
optionals: List of optional arguments.
Returns:
Completion command.
"""
commands = []
head = self.complete_head(prog)
for flags, dest, _, nargs, help in optionals:
# Make sure we always get same order of output
if isinstance(dest, dict):
dest = sorted(dest.keys())
elif isinstance(dest, (set, frozenset)):
dest = sorted(dest)
# Remove platform-specific choices to avoid hard-coding the platform.
if dest is not None:
valid_choices = []
for choice in dest:
if spack.platforms.host().name not in choice:
valid_choices.append(choice)
dest = valid_choices
# To provide description for optionals, and also possible values,
# we need to use two split completion command.
# Otherwise, each option will have same description.
prefix = head
# Add all flags to the completion
for f in flags:
if f.startswith("--"):
long = f[2:]
prefix += " -l %s" % long
elif f.startswith("-"):
short = f[1:]
assert len(short) == 1
prefix += " -s %s" % short
# Check if option require argument.
# Currently multi-argument options are not supported, so we treat it like one argument.
if nargs != 0:
prefix += " -r"
if dest is not None:
# If there are choices, we provide a completion for all possible values.
commands.append(prefix + " -f -a %s" % self._quote(" ".join(dest)))
else:
# Otherwise, we try to find a predefined completion for it
value = _fish_dest_get_complete(prog, dest)
if value is not None:
commands.append(prefix + " " + value)
if help:
commands.append(prefix + " -d %s" % self._quote(help))
return "\n".join(commands) + "\n"
def subcommands(self, prog: str, subcommands: List[Tuple[ArgumentParser, str, str]]) -> str:
"""Return the completion for subcommands.
Args:
prog: Program name.
subcommands: List of subcommand parsers.
Returns:
Completion command.
"""
commands = []
head = self.complete_head(prog, 0)
for _, subcommand, help in subcommands:
command = head + " -f -a %s" % self._quote(subcommand)
if help is not None and len(help) > 0:
help = help.split("\n")[0]
command += " -d %s" % self._quote(help)
commands.append(command)
return "\n".join(commands) + "\n"
@formatter
def subcommands(args: Namespace, out: IO) -> None:
"""Hierarchical tree of subcommands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def subcommands(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
writer = SubcommandWriter(parser.prog, out, args.aliases)
writer.write(parser)
def rst_index(out: IO) -> None:
"""Generate an index of all commands.
Args:
out: File object to write to.
"""
def rst_index(out):
out.write("\n")
index = spack.main.index_commands()
@@ -756,19 +221,13 @@ def rst_index(out: IO) -> None:
@formatter
def rst(args: Namespace, out: IO) -> None:
"""ReStructuredText documentation of subcommands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def rst(args, out):
# create a parser with all commands
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
# extract cross-refs of the form `_cmd-spack-<cmd>:` from rst files
documented_commands: Set[str] = set()
documented_commands = set()
for filename in args.rst_files:
with open(filename) as f:
for line in f:
@@ -786,13 +245,7 @@ def rst(args: Namespace, out: IO) -> None:
@formatter
def names(args: Namespace, out: IO) -> None:
"""Simple list of top-level commands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def names(args, out):
commands = copy.copy(spack.cmd.all_commands())
if args.aliases:
@@ -802,39 +255,15 @@ def names(args: Namespace, out: IO) -> None:
@formatter
def bash(args: Namespace, out: IO) -> None:
"""Bash tab-completion script.
args:
args: Command-line arguments.
out: File object to write to.
"""
def bash(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
aliases = ";".join(f"{key}:{val}" for key, val in spack.main.aliases.items())
out.write(f'SPACK_ALIASES="{aliases}"\n\n')
writer = BashCompletionWriter(parser.prog, out, args.aliases)
writer.write(parser)
@formatter
def fish(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
writer = FishCompletionWriter(parser.prog, out, args.aliases)
writer.write(parser)
def prepend_header(args: Namespace, out: IO) -> None:
"""Prepend header text at the beginning of a file.
Args:
args: Command-line arguments.
out: File object to write to.
"""
def prepend_header(args, out):
if not args.header:
return
@@ -842,14 +271,10 @@ def prepend_header(args: Namespace, out: IO) -> None:
out.write(header.read())
def _commands(parser: ArgumentParser, args: Namespace) -> None:
def _commands(parser, args):
"""This is the 'regular' command, which can be called multiple times.
See ``commands()`` below for ``--update-completion`` handling.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
formatter = formatters[args.format]
@@ -871,15 +296,12 @@ def _commands(parser: ArgumentParser, args: Namespace) -> None:
formatter(args, sys.stdout)
def update_completion(parser: ArgumentParser, args: Namespace) -> None:
def update_completion(parser, args):
"""Iterate through the shells and update the standard completion files.
This is a convenience method to avoid calling this command many
times, and to simplify completion update for developers.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
for shell, shell_args in update_completion_args.items():
for attr, value in shell_args.items():
@@ -887,20 +309,14 @@ def update_completion(parser: ArgumentParser, args: Namespace) -> None:
_commands(parser, args)
def commands(parser: ArgumentParser, args: Namespace) -> None:
"""Main function that calls formatter functions.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
def commands(parser, args):
if args.update_completion:
if args.format != "names" or any([args.aliases, args.update, args.header]):
tty.die("--update-completion can only be specified alone.")
# this runs the command multiple times with different arguments
update_completion(parser, args)
return update_completion(parser, args)
else:
# run commands normally
_commands(parser, args)
return _commands(parser, args)

View File

@@ -36,10 +36,7 @@ def shell_init_instructions(cmd, equivalent):
" source %s/setup-env.fish" % spack.paths.share_path,
"",
color.colorize("@*c{For Windows batch:}"),
" %s\\spack_cmd.bat" % spack.paths.bin_path,
"",
color.colorize("@*c{For PowerShell:}"),
" %s\\setup-env.ps1" % spack.paths.share_path,
" source %s/spack_cmd.bat" % spack.paths.share_path,
"",
"Or, if you do not want to use shell support, run "
+ ("one of these" if shell_specific else "this")
@@ -53,7 +50,6 @@ def shell_init_instructions(cmd, equivalent):
equivalent.format(sh_arg="--csh ") + " # csh/tcsh",
equivalent.format(sh_arg="--fish") + " # fish",
equivalent.format(sh_arg="--bat ") + " # batch",
equivalent.format(sh_arg="--pwsh") + " # powershell",
]
else:
msg += [" " + equivalent]

View File

@@ -12,7 +12,7 @@
import spack.cmd
import spack.config
import spack.deptypes as dt
import spack.dependency as dep
import spack.environment as ev
import spack.mirror
import spack.modules
@@ -82,12 +82,12 @@ def _specs(self, **kwargs):
# return everything for an empty query.
if not qspecs:
return spack.store.STORE.db.query(**kwargs)
return spack.store.db.query(**kwargs)
# Return only matching stuff otherwise.
specs = {}
for spec in qspecs:
for s in spack.store.STORE.db.query(spec, **kwargs):
for s in spack.store.db.query(spec, **kwargs):
# This is fast for already-concrete specs
specs[s.dag_hash()] = s
@@ -114,13 +114,16 @@ def __call__(self, parser, namespace, jobs, option_string):
class DeptypeAction(argparse.Action):
"""Creates a flag of valid dependency types from a deptype argument."""
"""Creates a tuple of valid dependency types from a deptype argument."""
def __call__(self, parser, namespace, values, option_string=None):
if not values or values == "all":
deptype = dt.ALL
else:
deptype = dt.canonicalize(values.split(","))
deptype = dep.all_deptypes
if values:
deptype = tuple(x.strip() for x in values.split(","))
if deptype == ("all",):
deptype = "all"
deptype = dep.canonical_deptype(deptype)
setattr(namespace, self.dest, deptype)
@@ -262,7 +265,7 @@ def recurse_dependents():
"--dependents",
action="store_true",
dest="dependents",
help="also uninstall any packages that depend on the ones given via command line",
help="also uninstall any packages that depend on the ones given " "via command line",
)
@@ -282,8 +285,9 @@ def deptype():
return Args(
"--deptype",
action=DeptypeAction,
default=dt.ALL,
help="comma-separated list of deptypes to traverse (default=%s)" % ",".join(dt.ALL_TYPES),
default=dep.all_deptypes,
help="comma-separated list of deptypes to traverse\ndefault=%s"
% ",".join(dep.all_deptypes),
)
@@ -327,17 +331,6 @@ def tags():
)
@arg
def namespaces():
return Args(
"-N",
"--namespaces",
action="store_true",
default=False,
help="show fully qualified package names",
)
@arg
def jobs():
return Args(
@@ -357,9 +350,9 @@ def install_status():
"--install-status",
action="store_true",
default=True,
help="show install status of packages\n\npackages can be: "
help="show install status of packages. packages can be: "
"installed [+], missing and needed by an installed package [-], "
"installed in an upstream instance [^], "
"installed in and upstream instance [^], "
"or not installed (no annotation)",
)
@@ -400,23 +393,24 @@ def add_cdash_args(subparser, add_help):
cdash_help = {}
if add_help:
cdash_help["upload-url"] = "CDash URL where reports will be uploaded"
cdash_help["build"] = (
"name of the build that will be reported to CDash\n\n"
"defaults to spec of the package to operate on"
)
cdash_help["site"] = (
"site name that will be reported to CDash\n\n" "defaults to current system hostname"
)
cdash_help["track"] = (
"results will be reported to this group on CDash\n\n" "defaults to Experimental"
)
cdash_help["buildstamp"] = (
"use custom buildstamp\n\n"
"instead of letting the CDash reporter prepare the "
"buildstamp which, when combined with build name, site and project, "
"uniquely identifies the build, provide this argument to identify "
"the build yourself. format: %%Y%%m%%d-%%H%%M-[cdash-track]"
)
cdash_help[
"build"
] = """The name of the build that will be reported to CDash.
Defaults to spec of the package to operate on."""
cdash_help[
"site"
] = """The site name that will be reported to CDash.
Defaults to current system hostname."""
cdash_help[
"track"
] = """Results will be reported to this group on CDash.
Defaults to Experimental."""
cdash_help[
"buildstamp"
] = """Instead of letting the CDash reporter prepare the
buildstamp which, when combined with build name, site and project,
uniquely identifies the build, provide this argument to identify
the build yourself. Format: %%Y%%m%%d-%%H%%M-[cdash-track]"""
else:
cdash_help["upload-url"] = argparse.SUPPRESS
cdash_help["build"] = argparse.SUPPRESS
@@ -485,7 +479,7 @@ def __init__(
# substituting '_' for ':'.
dest = dest.replace(":", "_")
super().__init__(
super(ConfigSetAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
@@ -548,16 +542,16 @@ def add_s3_connection_args(subparser, add_help):
"--s3-access-key-id", help="ID string to use to connect to this S3 mirror"
)
subparser.add_argument(
"--s3-access-key-secret", help="secret string to use to connect to this S3 mirror"
"--s3-access-key-secret", help="Secret string to use to connect to this S3 mirror"
)
subparser.add_argument(
"--s3-access-token", help="access token to use to connect to this S3 mirror"
"--s3-access-token", help="Access Token to use to connect to this S3 mirror"
)
subparser.add_argument(
"--s3-profile", help="S3 profile name to use to connect to this S3 mirror", default=None
)
subparser.add_argument(
"--s3-endpoint-url", help="endpoint URL to use to connect to this S3 mirror"
"--s3-endpoint-url", help="Endpoint URL to use to connect to this S3 mirror"
)

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import os
@@ -10,7 +12,6 @@
import spack.build_environment as build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.deptypes as dt
import spack.error
import spack.paths
import spack.spec
@@ -47,9 +48,9 @@ def __init__(self, context="build"):
raise ValueError("context can only be build or test")
if context == "build":
self.direct_deps = dt.BUILD | dt.LINK | dt.RUN
self.direct_deps = ("build", "link", "run")
else:
self.direct_deps = dt.BUILD | dt.TEST | dt.LINK | dt.RUN
self.direct_deps = ("build", "test", "link", "run")
self.has_uninstalled_deps = False
@@ -72,8 +73,8 @@ def accept(self, item):
def neighbors(self, item):
# Direct deps: follow build & test edges.
# Transitive deps: follow link / run.
depflag = self.direct_deps if item.depth == 0 else dt.LINK | dt.RUN
return item.edge.spec.edges_to_dependencies(depflag=depflag)
deptypes = self.direct_deps if item.depth == 0 else ("link", "run")
return item.edge.spec.edges_to_dependencies(deptype=deptypes)
def emulate_env_utility(cmd_name, context, args):
@@ -107,7 +108,7 @@ def emulate_env_utility(cmd_name, context, args):
visitor = AreDepsInstalledVisitor(context=context)
# Mass install check needs read transaction.
with spack.store.STORE.db.read_transaction():
with spack.store.db.read_transaction():
traverse.traverse_breadth_first_with_visitor([spec], traverse.CoverNodesVisitor(visitor))
if visitor.has_uninstalled_deps:

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import sys
@@ -24,6 +26,7 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="compiler_command")
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
# Find
find_parser = sp.add_parser(
@@ -35,7 +38,7 @@ def setup_parser(subparser):
find_parser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
default=spack.config.default_modify_scope("compilers"),
help="configuration scope to modify",
)
@@ -49,7 +52,7 @@ def setup_parser(subparser):
remove_parser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
default=None,
help="configuration scope to modify",
)
@@ -59,7 +62,7 @@ def setup_parser(subparser):
list_parser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
default=spack.config.default_list_scope(),
help="configuration scope to read from",
)
@@ -70,7 +73,7 @@ def setup_parser(subparser):
info_parser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
default=spack.config.default_list_scope(),
help="configuration scope to read from",
)
@@ -92,7 +95,7 @@ def compiler_find(args):
n = len(new_compilers)
s = "s" if n > 1 else ""
config = spack.config.CONFIG
config = spack.config.config
filename = config.get_config_filename(args.scope, "compilers")
tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
colify(reversed(sorted(c.spec.display_str for c in new_compilers)), indent=4)
@@ -185,7 +188,7 @@ def compiler_list(args):
os_str = os
if target:
os_str += "-%s" % target
cname = "%s{%s} %s" % (spack.spec.COMPILER_COLOR, name, os_str)
cname = "%s{%s} %s" % (spack.spec.compiler_color, name, os_str)
tty.hline(colorize(cname), char="-")
colify(reversed(sorted(c.spec.display_str for c in compilers)))

View File

@@ -13,11 +13,12 @@
def setup_parser(subparser):
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
subparser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
help="configuration scope to read/modify",
)

View File

@@ -14,16 +14,18 @@
def setup_parser(subparser):
subparser.add_argument(
"-f", "--force", action="store_true", help="re-concretize even if already concretized"
"-f", "--force", action="store_true", help="Re-concretize even if already concretized."
)
subparser.add_argument(
"--test",
default=None,
choices=["root", "all"],
help="concretize with test dependencies of only root packages or all packages",
help="""Concretize with test dependencies. When 'root' is chosen, test
dependencies are only added for the environment's root specs. When 'all' is
chosen, test dependencies are enabled for all packages in the environment.""",
)
subparser.add_argument(
"-q", "--quiet", action="store_true", help="don't print concretized specs"
"-q", "--quiet", action="store_true", help="Don't print concretized specs"
)
spack.cmd.common.arguments.add_concretizer_args(subparser)

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import os
import shutil
@@ -27,12 +29,13 @@
def setup_parser(subparser):
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
# User can only choose one
subparser.add_argument(
"--scope",
choices=scopes,
metavar=spack.config.SCOPES_METAVAR,
metavar=scopes_metavar,
help="configuration scope to read/modify",
)
@@ -41,10 +44,10 @@ def setup_parser(subparser):
get_parser = sp.add_parser("get", help="print configuration values")
get_parser.add_argument(
"section",
help="configuration section to print\n\noptions: %(choices)s",
help="configuration section to print. " "options: %(choices)s",
nargs="?",
metavar="section",
choices=spack.config.SECTION_SCHEMAS,
choices=spack.config.section_schemas,
)
blame_parser = sp.add_parser(
@@ -52,18 +55,18 @@ def setup_parser(subparser):
)
blame_parser.add_argument(
"section",
help="configuration section to print\n\noptions: %(choices)s",
help="configuration section to print. " "options: %(choices)s",
metavar="section",
choices=spack.config.SECTION_SCHEMAS,
choices=spack.config.section_schemas,
)
edit_parser = sp.add_parser("edit", help="edit configuration file")
edit_parser.add_argument(
"section",
help="configuration section to edit\n\noptions: %(choices)s",
help="configuration section to edit. " "options: %(choices)s",
metavar="section",
nargs="?",
choices=spack.config.SECTION_SCHEMAS,
choices=spack.config.section_schemas,
)
edit_parser.add_argument(
"--print-file", action="store_true", help="print the file name that would be edited"
@@ -75,7 +78,7 @@ def setup_parser(subparser):
add_parser.add_argument(
"path",
nargs="?",
help="colon-separated path to config that should be added, e.g. 'config:default:true'",
help="colon-separated path to config that should be added," " e.g. 'config:default:true'",
)
add_parser.add_argument("-f", "--file", help="file from which to set all config values")
@@ -87,7 +90,7 @@ def setup_parser(subparser):
"--local",
action="store_true",
default=False,
help="set packages preferences based on local installs, rather than upstream",
help="Set packages preferences based on local installs, rather " "than upstream.",
)
remove_parser = sp.add_parser("remove", aliases=["rm"], help="remove configuration parameters")
@@ -145,10 +148,10 @@ def config_get(args):
scope, section = _get_scope_and_section(args)
if section is not None:
spack.config.CONFIG.print_section(section)
spack.config.config.print_section(section)
elif scope and scope.startswith("env:"):
config_file = spack.config.CONFIG.get_config_filename(scope, section)
config_file = spack.config.config.get_config_filename(scope, section)
if os.path.exists(config_file):
with open(config_file) as f:
print(f.read())
@@ -156,12 +159,12 @@ def config_get(args):
tty.die("environment has no %s file" % ev.manifest_name)
else:
tty.die("`spack config get` requires a section argument or an active environment.")
tty.die("`spack config get` requires a section argument " "or an active environment.")
def config_blame(args):
"""Print out line-by-line blame of merged YAML."""
spack.config.CONFIG.print_section(args.section, blame=True)
spack.config.config.print_section(args.section, blame=True)
def config_edit(args):
@@ -179,8 +182,8 @@ def config_edit(args):
# If we aren't editing a spack.yaml file, get config path from scope.
scope, section = _get_scope_and_section(args)
if not scope and not section:
tty.die("`spack config edit` requires a section argument or an active environment.")
config_file = spack.config.CONFIG.get_config_filename(scope, section)
tty.die("`spack config edit` requires a section argument " "or an active environment.")
config_file = spack.config.config.get_config_filename(scope, section)
if args.print_file:
print(config_file)
@@ -193,7 +196,7 @@ def config_list(args):
Used primarily for shell tab completion scripts.
"""
print(" ".join(list(spack.config.SECTION_SCHEMAS)))
print(" ".join(list(spack.config.section_schemas)))
def config_add(args):
@@ -250,19 +253,19 @@ def _can_update_config_file(scope: spack.config.ConfigScope, cfg_file):
def config_update(args):
# Read the configuration files
spack.config.CONFIG.get_config(args.section, scope=args.scope)
spack.config.config.get_config(args.section, scope=args.scope)
updates: List[spack.config.ConfigScope] = list(
filter(
lambda s: not isinstance(
s, (spack.config.InternalConfigScope, spack.config.ImmutableConfigScope)
),
spack.config.CONFIG.format_updates[args.section],
spack.config.config.format_updates[args.section],
)
)
cannot_overwrite, skip_system_scope = [], False
for scope in updates:
cfg_file = spack.config.CONFIG.get_config_filename(scope.name, args.section)
cfg_file = spack.config.config.get_config_filename(scope.name, args.section)
can_be_updated = _can_update_config_file(scope, cfg_file)
if not can_be_updated:
if scope.name == "system":
@@ -301,7 +304,7 @@ def config_update(args):
" the latest schema format:\n\n"
)
for scope in updates:
cfg_file = spack.config.CONFIG.get_config_filename(scope.name, args.section)
cfg_file = spack.config.config.get_config_filename(scope.name, args.section)
msg += "\t[scope={0}, file={1}]\n".format(scope.name, cfg_file)
msg += (
"\nIf the configuration files are updated, versions of Spack "
@@ -324,7 +327,7 @@ def config_update(args):
# Make a backup copy and rewrite the file
bkp_file = cfg_file + ".bkp"
shutil.copy(cfg_file, bkp_file)
spack.config.CONFIG.update_config(args.section, data, scope=scope.name, force=True)
spack.config.config.update_config(args.section, data, scope=scope.name, force=True)
tty.msg(f'File "{cfg_file}" update [backup={bkp_file}]')
@@ -336,13 +339,13 @@ def _can_revert_update(scope_dir, cfg_file, bkp_file):
def config_revert(args):
scopes = [args.scope] if args.scope else [x.name for x in spack.config.CONFIG.file_scopes]
scopes = [args.scope] if args.scope else [x.name for x in spack.config.config.file_scopes]
# Search for backup files in the configuration scopes
Entry = collections.namedtuple("Entry", ["scope", "cfg", "bkp"])
to_be_restored, cannot_overwrite = [], []
for scope in scopes:
cfg_file = spack.config.CONFIG.get_config_filename(scope, args.section)
cfg_file = spack.config.config.get_config_filename(scope, args.section)
bkp_file = cfg_file + ".bkp"
# If the backup files doesn't exist move to the next scope
@@ -373,7 +376,7 @@ def config_revert(args):
proceed = True
if not args.yes_to_all:
msg = "The following scopes will be restored from the corresponding backup files:\n"
msg = "The following scopes will be restored from the corresponding" " backup files:\n"
for entry in to_be_restored:
msg += "\t[scope={0.scope}, bkp={0.bkp}]\n".format(entry)
msg += "This operation cannot be undone."
@@ -398,8 +401,8 @@ def config_prefer_upstream(args):
if scope is None:
scope = spack.config.default_modify_scope("packages")
all_specs = set(spack.store.STORE.db.query(installed=True))
local_specs = set(spack.store.STORE.db.query_local(installed=True))
all_specs = set(spack.store.db.query(installed=True))
local_specs = set(spack.store.db.query_local(installed=True))
pref_specs = local_specs if args.local else all_specs - local_specs
conflicting_variants = set()
@@ -456,7 +459,7 @@ def config_prefer_upstream(args):
existing = spack.config.get("packages", scope=scope)
new = spack.config.merge_yaml(existing, pkgs)
spack.config.set("packages", new, scope)
config_file = spack.config.CONFIG.get_config_filename(scope, section)
config_file = spack.config.config.get_config_filename(scope, section)
tty.msg("Updated config at {0}".format(config_file))

View File

@@ -10,7 +10,7 @@
import spack.container
import spack.container.images
description = "creates recipes to build images for different container runtimes"
description = "creates recipes to build images for different" " container runtimes"
section = "container"
level = "long"

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import re
import urllib.parse
@@ -17,7 +19,6 @@
from spack.url import UndetectableNameError, UndetectableVersionError, parse_name, parse_version
from spack.util.editor import editor
from spack.util.executable import ProcessError, which
from spack.util.format import get_version_lines
from spack.util.naming import mod_to_class, simplify_name, valid_fully_qualified_module_name
description = "create a new package file"
@@ -70,7 +71,7 @@ class {class_name}({base_class_name}):
'''
class BundlePackageTemplate:
class BundlePackageTemplate(object):
"""
Provides the default values to be used for a bundle package file template.
"""
@@ -121,7 +122,7 @@ def install(self, spec, prefix):
url_line = ' url = "{url}"'
def __init__(self, name, url, versions):
super().__init__(name, versions)
super(PackageTemplate, self).__init__(name, versions)
self.url_def = self.url_line.format(url=url)
@@ -199,7 +200,7 @@ def __init__(self, name, url, *args, **kwargs):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to lua-{0}".format(name))
name = "lua-{0}".format(name)
super().__init__(name, url, *args, **kwargs)
super(LuaPackageTemplate, self).__init__(name, url, *args, **kwargs)
class MesonPackageTemplate(PackageTemplate):
@@ -307,7 +308,7 @@ def __init__(self, name, url, *args, **kwargs):
tty.msg("Changing package name from {0} to rkt-{0}".format(name))
name = "rkt-{0}".format(name)
self.body_def = self.body_def.format(name[4:])
super().__init__(name, url, *args, **kwargs)
super(RacketPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PythonPackageTemplate(PackageTemplate):
@@ -326,7 +327,6 @@ class PythonPackageTemplate(PackageTemplate):
# FIXME: Add a build backend, usually defined in pyproject.toml. If no such file
# exists, use setuptools.
# depends_on("py-setuptools", type="build")
# depends_on("py-hatchling", type="build")
# depends_on("py-flit-core", type="build")
# depends_on("py-poetry-core", type="build")
@@ -334,11 +334,17 @@ class PythonPackageTemplate(PackageTemplate):
# depends_on("py-foo", type=("build", "run"))"""
body_def = """\
def config_settings(self, spec, prefix):
# FIXME: Add configuration settings to be passed to the build backend
def global_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py
# FIXME: If not needed, delete this function
settings = {}
return settings"""
options = []
return options
def install_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py install
# FIXME: If not needed, delete this function
options = []
return options"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name py-numpy`, don't rename it py-py-numpy
@@ -394,7 +400,7 @@ def __init__(self, name, url, *args, **kwargs):
+ self.url_line
)
super().__init__(name, url, *args, **kwargs)
super(PythonPackageTemplate, self).__init__(name, url, *args, **kwargs)
class RPackageTemplate(PackageTemplate):
@@ -433,7 +439,7 @@ def __init__(self, name, url, *args, **kwargs):
if bioc:
self.url_line = ' url = "{0}"\n' ' bioc = "{1}"'.format(url, r_name)
super().__init__(name, url, *args, **kwargs)
super(RPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PerlmakePackageTemplate(PackageTemplate):
@@ -460,7 +466,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to perl-{0}".format(name))
name = "perl-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(PerlmakePackageTemplate, self).__init__(name, *args, **kwargs)
class PerlbuildPackageTemplate(PerlmakePackageTemplate):
@@ -493,7 +499,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to octave-{0}".format(name))
name = "octave-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(OctavePackageTemplate, self).__init__(name, *args, **kwargs)
class RubyPackageTemplate(PackageTemplate):
@@ -521,7 +527,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to ruby-{0}".format(name))
name = "ruby-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(RubyPackageTemplate, self).__init__(name, *args, **kwargs)
class MakefilePackageTemplate(PackageTemplate):
@@ -566,7 +572,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = "py-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(SIPPackageTemplate, self).__init__(name, *args, **kwargs)
templates = {
@@ -608,7 +614,7 @@ def setup_parser(subparser):
"--template",
metavar="TEMPLATE",
choices=sorted(templates.keys()),
help="build system template to use\n\noptions: %(choices)s",
help="build system template to use. options: %(choices)s",
)
subparser.add_argument(
"-r", "--repo", help="path to a repository where the package should be created"
@@ -616,7 +622,7 @@ def setup_parser(subparser):
subparser.add_argument(
"-N",
"--namespace",
help="specify a namespace for the package\n\nmust be the namespace of "
help="specify a namespace for the package. must be the namespace of "
"a repository registered with Spack",
)
subparser.add_argument(
@@ -822,7 +828,7 @@ def get_versions(args, name):
if args.url is not None and args.template != "bundle" and valid_url:
# Find available versions
try:
url_dict = spack.url.find_versions_of_archive(args.url)
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
@@ -833,15 +839,13 @@ def get_versions(args, name):
version = parse_version(args.url)
url_dict = {version: args.url}
version_hashes = spack.stage.get_checksums_for_versions(
versions = spack.stage.get_checksums_for_versions(
url_dict,
name,
first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1),
)
versions = get_version_lines(version_hashes, url_dict)
else:
versions = unhashed_versions
@@ -876,7 +880,7 @@ def get_build_system(template, url, guesser):
# Use whatever build system the guesser detected
selected_template = guesser.build_system
if selected_template == "generic":
tty.warn("Unable to detect a build system. Using a generic package template.")
tty.warn("Unable to detect a build system. " "Using a generic package template.")
else:
msg = "This package looks like it uses the {0} build system"
tty.msg(msg.format(selected_template))
@@ -915,11 +919,11 @@ def get_repository(args, name):
)
else:
if spec.namespace:
repo = spack.repo.PATH.get_repo(spec.namespace, None)
repo = spack.repo.path.get_repo(spec.namespace, None)
if not repo:
tty.die("Unknown namespace: '{0}'".format(spec.namespace))
else:
repo = spack.repo.PATH.first_repo()
repo = spack.repo.path.first_repo()
# Set the namespace on the spec if it's not there already
if not spec.namespace:

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import platform
import re
@@ -60,16 +62,16 @@ def create_db_tarball(args):
tarball_name = "spack-db.%s.tar.gz" % _debug_tarball_suffix()
tarball_path = os.path.abspath(tarball_name)
base = os.path.basename(str(spack.store.STORE.root))
base = os.path.basename(str(spack.store.root))
transform_args = []
if "GNU" in tar("--version", output=str):
transform_args = ["--transform", "s/^%s/%s/" % (base, tarball_name)]
else:
transform_args = ["-s", "/^%s/%s/" % (base, tarball_name)]
wd = os.path.dirname(str(spack.store.STORE.root))
wd = os.path.dirname(str(spack.store.root))
with working_dir(wd):
files = [spack.store.STORE.db._index_path]
files = [spack.store.db._index_path]
files += glob("%s/*/*/*/.spack/spec.json" % base)
files += glob("%s/*/*/*/.spack/spec.yaml" % base)
files = [os.path.relpath(f) for f in files]

View File

@@ -26,8 +26,8 @@ def setup_parser(subparser):
"--installed",
action="store_true",
default=False,
help="list installed dependencies of an installed spec "
"instead of possible dependencies of a package",
help="List installed dependencies of an installed spec, "
"instead of possible dependencies of a package.",
)
subparser.add_argument(
"-t",
@@ -60,7 +60,7 @@ def dependencies(parser, args):
format_string = "{name}{@version}{%compiler}{/hash:7}"
if sys.stdout.isatty():
tty.msg("Dependencies of %s" % spec.format(format_string, color=True))
deps = spack.store.STORE.db.installed_relatives(
deps = spack.store.db.installed_relatives(
spec, "children", args.transitive, deptype=args.deptype
)
if deps:
@@ -74,7 +74,7 @@ def dependencies(parser, args):
spec,
transitive=args.transitive,
expand_virtuals=args.expand_virtuals,
depflag=args.deptype,
deptype=args.deptype,
)
if spec.name in dependencies:

View File

@@ -25,15 +25,15 @@ def setup_parser(subparser):
"--installed",
action="store_true",
default=False,
help="list installed dependents of an installed spec "
"instead of possible dependents of a package",
help="List installed dependents of an installed spec, "
"instead of possible dependents of a package.",
)
subparser.add_argument(
"-t",
"--transitive",
action="store_true",
default=False,
help="show all transitive dependents",
help="Show all transitive dependents.",
)
arguments.add_common_arguments(subparser, ["spec"])
@@ -47,14 +47,14 @@ def inverted_dependencies():
actual dependents.
"""
dag = {}
for pkg_cls in spack.repo.PATH.all_package_classes():
for pkg_cls in spack.repo.path.all_package_classes():
dag.setdefault(pkg_cls.name, set())
for dep in pkg_cls.dependencies:
deps = [dep]
# expand virtuals if necessary
if spack.repo.PATH.is_virtual(dep):
deps += [s.name for s in spack.repo.PATH.providers_for(dep)]
if spack.repo.path.is_virtual(dep):
deps += [s.name for s in spack.repo.path.providers_for(dep)]
for d in deps:
dag.setdefault(d, set()).add(pkg_cls.name)
@@ -96,7 +96,7 @@ def dependents(parser, args):
format_string = "{name}{@version}{%compiler}{/hash:7}"
if sys.stdout.isatty():
tty.msg("Dependents of %s" % spec.cformat(format_string))
deps = spack.store.STORE.db.installed_relatives(spec, "parents", args.transitive)
deps = spack.store.db.installed_relatives(spec, "parents", args.transitive)
if deps:
spack.cmd.display_specs(deps, long=True)
else:

View File

@@ -13,6 +13,8 @@
It is up to the user to ensure binary compatibility between the deprecated
installation and its deprecator.
"""
from __future__ import print_function
import argparse
import os
@@ -26,7 +28,7 @@
from spack.database import InstallStatuses
from spack.error import SpackError
description = "replace one package with another via symlinks"
description = "Replace one package with another via symlinks"
section = "admin"
level = "long"
@@ -46,7 +48,7 @@ def setup_parser(sp):
action="store_true",
default=True,
dest="dependencies",
help="deprecate dependencies (default)",
help="Deprecate dependencies (default)",
)
deps.add_argument(
"-D",
@@ -54,7 +56,7 @@ def setup_parser(sp):
action="store_false",
default=True,
dest="dependencies",
help="do not deprecate dependencies",
help="Do not deprecate dependencies",
)
install = sp.add_mutually_exclusive_group()
@@ -64,7 +66,7 @@ def setup_parser(sp):
action="store_true",
default=False,
dest="install",
help="concretize and install deprecator spec",
help="Concretize and install deprecator spec",
)
install.add_argument(
"-I",
@@ -72,7 +74,7 @@ def setup_parser(sp):
action="store_false",
default=False,
dest="install",
help="deprecator spec must already be installed (default)",
help="Deprecator spec must already be installed (default)",
)
sp.add_argument(
@@ -81,7 +83,7 @@ def setup_parser(sp):
type=str,
default="soft",
choices=["soft", "hard"],
help="type of filesystem link to use for deprecation (default soft)",
help="Type of filesystem link to use for deprecation (default soft)",
)
sp.add_argument(
@@ -130,7 +132,7 @@ def deprecate(parser, args):
already_deprecated = []
already_deprecated_for = []
for spec in all_deprecate:
deprecated_for = spack.store.STORE.db.deprecator(spec)
deprecated_for = spack.store.db.deprecator(spec)
if deprecated_for:
already_deprecated.append(spec)
already_deprecated_for.append(deprecated_for)

Some files were not shown because too many files have changed in this diff Show More