Compare commits

..

2 Commits

Author SHA1 Message Date
Gregory Becker
1be0cf0fdf apple clang package 2023-05-30 16:10:59 -07:00
Gregory Becker
f60cb4090b wip: apple-clang only and now parallelism 2023-05-23 19:57:02 +02:00
1278 changed files with 10073 additions and 15517 deletions

View File

@@ -5,8 +5,3 @@ updates:
directory: "/"
schedule:
interval: "daily"
# Requirements to build documentation
- package-ecosystem: "pip"
directory: "/lib/spack/docs"
schedule:
interval: "daily"

View File

@@ -19,8 +19,8 @@ jobs:
package-audits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
@@ -38,7 +38,7 @@ jobs:
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup repo
@@ -158,7 +158,7 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -179,7 +179,7 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Bootstrap clingo
run: |
set -ex
@@ -204,7 +204,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup repo
@@ -247,7 +247,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup non-root user
@@ -283,7 +283,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Setup non-root user
@@ -316,7 +316,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -333,7 +333,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh

View File

@@ -49,14 +49,14 @@ jobs:
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[rockylinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'rockylinux:9'],
[fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'],
[fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -92,13 +92,13 @@ jobs:
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # @v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@16c0bc4a6e6ada2cfd8afd41d22d95379cf7c32a # @v1
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -106,13 +106,13 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # @v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 # @v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -35,7 +35,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0

View File

@@ -1,31 +0,0 @@
name: Windows Paraview Nightly
on:
schedule:
- cron: '0 2 * * *' # Run at 2 am
defaults:
run:
shell:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools coverage
- name: Build Test
run: |
spack compiler find
spack external find cmake ninja win-sdk win-wdk wgl msmpi
spack -d install -y --cdash-upload-url https://cdash.spack.io/submit.php?project=Spack+on+Windows --cdash-track Nightly --only dependencies paraview
exit 0

View File

@@ -47,10 +47,10 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -87,17 +87,17 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -118,7 +118,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2
with:
flags: shelltests,linux
@@ -133,7 +133,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -152,10 +152,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -176,7 +176,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # @v2.1.0
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
@@ -186,10 +186,10 @@ jobs:
matrix:
python-version: ["3.10"]
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -211,6 +211,6 @@ jobs:
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2
with:
flags: unittests,macos

View File

@@ -18,8 +18,8 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: '3.11'
cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # @v2
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # @v2
with:
python-version: '3.11'
cache: 'pip'
@@ -68,7 +68,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # @v2
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -81,7 +81,6 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack debug report
spack -d bootstrap now --dev
spack style -t black
spack unit-test -V

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b
with:
python-version: 3.9
- name: Install Python packages
@@ -33,16 +33,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2
with:
flags: unittests,windows
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b
with:
python-version: 3.9
- name: Install Python packages
@@ -57,16 +57,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
- uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2
with:
flags: unittests,windows
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0
- uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,16 +1,10 @@
version: 2
build:
os: "ubuntu-22.04"
apt_packages:
- graphviz
tools:
python: "3.11"
sphinx:
configuration: lib/spack/docs/conf.py
fail_on_warning: true
python:
version: 3.7
install:
- requirements: lib/spack/docs/requirements.txt

View File

@@ -25,6 +25,8 @@ exit 1
# Line above is a shell no-op, and ends a python multi-line comment.
# The code above runs this file with our preferred python interpreter.
from __future__ import print_function
import os
import os.path
import sys

View File

@@ -214,7 +214,7 @@ goto :end_switch
if defined _sp_args (
if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:-h=%" (
) else if NOT "%_sp_args%"=="%_sp_args: -h=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case

View File

@@ -1,132 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# #######################################################################
function Compare-CommonArgs {
$CMDArgs = $args[0]
# These aruments take precedence and call for no futher parsing of arguments
# invoke actual Spack entrypoint with that context and exit after
"--help", "-h", "--version", "-V" | ForEach-Object {
$arg_opt = $_
if(($CMDArgs) -and ([bool]($CMDArgs.Where({$_ -eq $arg_opt})))) {
return $true
}
}
return $false
}
function Read-SpackArgs {
$SpackCMD_params = @()
$SpackSubCommand = $NULL
$SpackSubCommandArgs = @()
$args_ = $args[0]
$args_ | ForEach-Object {
if (!$SpackSubCommand) {
if($_.SubString(0,1) -eq "-")
{
$SpackCMD_params += $_
}
else{
$SpackSubCommand = $_
}
}
else{
$SpackSubCommandArgs += $_
}
}
return $SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs
}
function Invoke-SpackCD {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python $Env:SPACK_ROOT/bin/spack cd -h
}
else {
$LOC = $(python $Env:SPACK_ROOT/bin/spack location $SpackSubCommandArgs)
if (($NULL -ne $LOC)){
if ( Test-Path -Path $LOC){
Set-Location $LOC
}
else{
exit 1
}
}
else {
exit 1
}
}
}
function Invoke-SpackEnv {
if (Compare-CommonArgs $SpackSubCommandArgs[0]) {
python $Env:SPACK_ROOT/bin/spack env -h
}
else {
$SubCommandSubCommand = $SpackSubCommandArgs[0]
$SubCommandSubCommandArgs = $SpackSubCommandArgs[1..$SpackSubCommandArgs.Count]
switch ($SubCommandSubCommand) {
"activate" {
if (Compare-CommonArgs $SubCommandSubCommandArgs) {
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
elseif ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
elseif (!$SubCommandSubCommandArgs) {
python $Env:SPACK_ROOT/bin/spack env activate $SubCommandSubCommandArgs
}
else {
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params env activate "--pwsh" $SubCommandSubCommandArgs)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
"deactivate" {
if ([bool]($SubCommandSubCommandArgs.Where({$_ -eq "--pwsh"}))) {
python $Env:SPACK_ROOT/bin/spack env deactivate $SubCommandSubCommandArgs
}
elseif($SubCommandSubCommandArgs) {
python $Env:SPACK_ROOT/bin/spack env deactivate -h
}
else {
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params env deactivate --pwsh)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
default {python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}
}
}
function Invoke-SpackLoad {
if (Compare-CommonArgs $SpackSubCommandArgs) {
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
elseif ([bool]($SpackSubCommandArgs.Where({($_ -eq "--pwsh") -or ($_ -eq "--list")}))) {
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
}
else {
$SpackEnv = $(python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand "--pwsh" $SpackSubCommandArgs)
$ExecutionContext.InvokeCommand($SpackEnv)
}
}
$SpackCMD_params, $SpackSubCommand, $SpackSubCommandArgs = Read-SpackArgs $args
if (Compare-CommonArgs $SpackCMD_params) {
python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs
exit $LASTEXITCODE
}
# Process Spack commands with special conditions
# all other commands are piped directly to Spack
switch($SpackSubCommand)
{
"cd" {Invoke-SpackCD}
"env" {Invoke-SpackEnv}
"load" {Invoke-SpackLoad}
"unload" {Invoke-SpackLoad}
default {python $Env:SPACK_ROOT/bin/spack $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
}

View File

@@ -1,16 +0,0 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications
from pygments.styles.default import DefaultStyle
from pygments.token import Generic
class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8"
styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9"

View File

@@ -76,53 +76,6 @@ To build with with ``icx``, do ::
spack install patchelf%oneapi
Using oneAPI Spack environment
-------------------------------
In this example, we build lammps with ``icx`` using Spack environment for oneAPI packages created by Intel. The
compilers are installed with Spack like in example above.
Install the oneAPI compilers::
spack install intel-oneapi-compilers
Add the compilers to your ``compilers.yaml`` so Spack can use them::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
Verify that the compilers are available::
spack compiler list
Clone `spack-configs <https://github.com/spack/spack-configs>`_ repo and activate Intel oneAPI CPU environment::
git clone https://github.com/spack/spack-configs
spack env activate spack-configs/INTEL/CPU
spack concretize -f
`Intel oneAPI CPU environment <https://github.com/spack/spack-configs/blob/main/INTEL/CPU/spack.yaml>`_ contains applications tested and validated by Intel, this list is constantly extended. And currently it supports:
- `GROMACS <https://www.gromacs.org/>`_
- `HPCG <https://www.hpcg-benchmark.org/>`_
- `HPL <https://netlib.org/benchmark/hpl/>`_
- `LAMMPS <https://www.lammps.org/#gsc.tab=0>`_
- `OpenFOAM <https://www.openfoam.com/>`_
- `STREAM <https://www.cs.virginia.edu/stream/>`_
- `WRF <https://github.com/wrf-model/WRF>`_
To build lammps with oneAPI compiler from this environment just run::
spack install lammps
Compiled binaries can be find using::
spack cd -i lammps
You can do the same for all other applications from this environment.
Using oneAPI MPI to Satisfy a Virtual Dependence
------------------------------------------------------

View File

@@ -72,7 +72,7 @@ arguments to the configure phase, you can use:
.. code-block:: python
def configure_args(self):
def configure_args(self, spec, prefix):
return ['--no-python-dbus']

View File

@@ -97,7 +97,9 @@ class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node:
del node["refspecific"]
return super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode
)
#
@@ -147,6 +149,7 @@ def setup(sphinx):
# Get nice vector graphics
graphviz_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@@ -230,8 +233,30 @@ def setup(sphinx):
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
sys.path.append("./_pygments")
pygments_style = "style.SpackStyle"
# The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications
from pygments.style import Style
from pygments.styles.default import DefaultStyle
from pygments.token import Comment, Generic, Text
class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8"
styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9"
import pkg_resources
dist = pkg_resources.Distribution(__file__)
sys.path.append(".") # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse("spack = conf:SpackStyle", dist=dist)
dist._ep_map = {"pygments.styles": {"plugin1": ep}}
pkg_resources.working_set.add(dist)
pygments_style = "spack"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -316,15 +341,16 @@ def setup(sphinx):
# Output file base name for HTML help builder.
htmlhelp_basename = "Spackdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples

View File

@@ -636,7 +636,7 @@ to customize the generation of container recipes:
- No
* - ``os_packages:command``
- Tool used to manage system packages
- ``apt``, ``yum``, ``dnf``, ``dnf_epel``, ``zypper``, ``apk``, ``yum_amazon``
- ``apt``, ``yum``, ``zypper``, ``apk``, ``yum_amazon``
- Only with custom base images
* - ``os_packages:update``
- Whether or not to update the list of available packages

View File

@@ -916,9 +916,9 @@ function, as shown in the example below:
.. code-block:: yaml
projections:
zlib: "{name}-{version}"
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
zlib: {name}-{version}
^mpi: {name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}
all: {name}-{version}/{compiler.name}-{compiler.version}
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
@@ -1132,11 +1132,11 @@ index once every package is pushed. Note how this target uses the generated
example/push/%: example/install/%
@mkdir -p $(dir $@)
$(info About to push $(SPEC) to a buildcache)
$(SPACK) -e . buildcache push --allow-root --only=package $(BUILDCACHE_DIR) /$(HASH)
$(SPACK) -e . buildcache create --allow-root --only=package --directory $(BUILDCACHE_DIR) /$(HASH)
@touch $@
push: $(addprefix example/push/,$(example/SPACK_PACKAGE_IDS))
$(info Updating the buildcache index)
$(SPACK) -e . buildcache update-index $(BUILDCACHE_DIR)
$(SPACK) -e . buildcache update-index --directory $(BUILDCACHE_DIR)
$(info Done!)
@touch $@

View File

@@ -317,7 +317,7 @@ installed, but you know that new compilers have been added to your
.. code-block:: console
$ module load gcc/4.9.0
$ module load gcc-4.9.0
$ spack compiler find
==> Added 1 new compiler to ~/.spack/linux/compilers.yaml
gcc@4.9.0

View File

@@ -76,7 +76,6 @@ or refer to the full manual below.
chain
extensions
pipelines
signing
.. toctree::
:maxdepth: 2

View File

@@ -35,27 +35,27 @@ showing lots of installed packages:
$ module avail
--------------------------------------------------------------- ~/spack/share/spack/modules/linux-ubuntu14-x86_64 ---------------------------------------------------------------
autoconf/2.69-gcc-4.8-qextxkq hwloc/1.11.6-gcc-6.3.0-akcisez m4/1.4.18-gcc-4.8-ev2znoc openblas/0.2.19-gcc-6.3.0-dhkmed6 py-setuptools/34.2.0-gcc-6.3.0-fadur4s
automake/1.15-gcc-4.8-maqvukj isl/0.18-gcc-4.8-afi6taq m4/1.4.18-gcc-6.3.0-uppywnz openmpi/2.1.0-gcc-6.3.0-go2s4z5 py-six/1.10.0-gcc-6.3.0-p4dhkaw
binutils/2.28-gcc-4.8-5s7c6rs libiconv/1.15-gcc-4.8-at46wg3 mawk/1.3.4-gcc-4.8-acjez57 openssl/1.0.2k-gcc-4.8-dkls5tk python/2.7.13-gcc-6.3.0-tyehea7
bison/3.0.4-gcc-4.8-ek4luo5 libpciaccess/0.13.4-gcc-6.3.0-gmufnvh mawk/1.3.4-gcc-6.3.0-ostdoms openssl/1.0.2k-gcc-6.3.0-gxgr5or readline/7.0-gcc-4.8-xhufqhn
bzip2/1.0.6-gcc-4.8-iffrxzn libsigsegv/2.11-gcc-4.8-pp2cvte mpc/1.0.3-gcc-4.8-g5mztc5 pcre/8.40-gcc-4.8-r5pbrxb readline/7.0-gcc-6.3.0-zzcyicg
bzip2/1.0.6-gcc-6.3.0-bequudr libsigsegv/2.11-gcc-6.3.0-7enifnh mpfr/3.1.5-gcc-4.8-o7xm7az perl/5.24.1-gcc-4.8-dg5j65u sqlite/3.8.5-gcc-6.3.0-6zoruzj
cmake/3.7.2-gcc-6.3.0-fowuuby libtool/2.4.6-gcc-4.8-7a523za mpich/3.2-gcc-6.3.0-dmvd3aw perl/5.24.1-gcc-6.3.0-6uzkpt6 tar/1.29-gcc-4.8-wse2ass
curl/7.53.1-gcc-4.8-3fz46n6 libtool/2.4.6-gcc-6.3.0-n7zmbzt ncurses/6.0-gcc-4.8-dcpe7ia pkg-config/0.29.2-gcc-4.8-ib33t75 tcl/8.6.6-gcc-4.8-tfxzqbr
expat/2.2.0-gcc-4.8-mrv6bd4 libxml2/2.9.4-gcc-4.8-ryzxnsu ncurses/6.0-gcc-6.3.0-ucbhcdy pkg-config/0.29.2-gcc-6.3.0-jpgubk3 util-macros/1.19.1-gcc-6.3.0-xorz2x2
flex/2.6.3-gcc-4.8-yf345oo libxml2/2.9.4-gcc-6.3.0-rltzsdh netlib-lapack/3.6.1-gcc-6.3.0-js33dog py-appdirs/1.4.0-gcc-6.3.0-jxawmw7 xz/5.2.3-gcc-4.8-mew4log
gcc/6.3.0-gcc-4.8-24puqve lmod/7.4.1-gcc-4.8-je4srhr netlib-scalapack/2.0.2-gcc-6.3.0-5aidk4l py-numpy/1.12.0-gcc-6.3.0-oemmoeu xz/5.2.3-gcc-6.3.0-3vqeuvb
gettext/0.19.8.1-gcc-4.8-yymghlh lua/5.3.4-gcc-4.8-im75yaz netlib-scalapack/2.0.2-gcc-6.3.0-hjsemcn py-packaging/16.8-gcc-6.3.0-i2n3dtl zip/3.0-gcc-4.8-rwar22d
gmp/6.1.2-gcc-4.8-5ub2wu5 lua-luafilesystem/1_6_3-gcc-4.8-wkey3nl netlib-scalapack/2.0.2-gcc-6.3.0-jva724b py-pyparsing/2.1.10-gcc-6.3.0-tbo6gmw zlib/1.2.11-gcc-4.8-pgxsxv7
help2man/1.47.4-gcc-4.8-kcnqmau lua-luaposix/33.4.0-gcc-4.8-mdod2ry netlib-scalapack/2.0.2-gcc-6.3.0-rgqfr6d py-scipy/0.19.0-gcc-6.3.0-kr7nat4 zlib/1.2.11-gcc-6.3.0-7cqp6cj
autoconf-2.69-gcc-4.8-qextxkq hwloc-1.11.6-gcc-6.3.0-akcisez m4-1.4.18-gcc-4.8-ev2znoc openblas-0.2.19-gcc-6.3.0-dhkmed6 py-setuptools-34.2.0-gcc-6.3.0-fadur4s
automake-1.15-gcc-4.8-maqvukj isl-0.18-gcc-4.8-afi6taq m4-1.4.18-gcc-6.3.0-uppywnz openmpi-2.1.0-gcc-6.3.0-go2s4z5 py-six-1.10.0-gcc-6.3.0-p4dhkaw
binutils-2.28-gcc-4.8-5s7c6rs libiconv-1.15-gcc-4.8-at46wg3 mawk-1.3.4-gcc-4.8-acjez57 openssl-1.0.2k-gcc-4.8-dkls5tk python-2.7.13-gcc-6.3.0-tyehea7
bison-3.0.4-gcc-4.8-ek4luo5 libpciaccess-0.13.4-gcc-6.3.0-gmufnvh mawk-1.3.4-gcc-6.3.0-ostdoms openssl-1.0.2k-gcc-6.3.0-gxgr5or readline-7.0-gcc-4.8-xhufqhn
bzip2-1.0.6-gcc-4.8-iffrxzn libsigsegv-2.11-gcc-4.8-pp2cvte mpc-1.0.3-gcc-4.8-g5mztc5 pcre-8.40-gcc-4.8-r5pbrxb readline-7.0-gcc-6.3.0-zzcyicg
bzip2-1.0.6-gcc-6.3.0-bequudr libsigsegv-2.11-gcc-6.3.0-7enifnh mpfr-3.1.5-gcc-4.8-o7xm7az perl-5.24.1-gcc-4.8-dg5j65u sqlite-3.8.5-gcc-6.3.0-6zoruzj
cmake-3.7.2-gcc-6.3.0-fowuuby libtool-2.4.6-gcc-4.8-7a523za mpich-3.2-gcc-6.3.0-dmvd3aw perl-5.24.1-gcc-6.3.0-6uzkpt6 tar-1.29-gcc-4.8-wse2ass
curl-7.53.1-gcc-4.8-3fz46n6 libtool-2.4.6-gcc-6.3.0-n7zmbzt ncurses-6.0-gcc-4.8-dcpe7ia pkg-config-0.29.2-gcc-4.8-ib33t75 tcl-8.6.6-gcc-4.8-tfxzqbr
expat-2.2.0-gcc-4.8-mrv6bd4 libxml2-2.9.4-gcc-4.8-ryzxnsu ncurses-6.0-gcc-6.3.0-ucbhcdy pkg-config-0.29.2-gcc-6.3.0-jpgubk3 util-macros-1.19.1-gcc-6.3.0-xorz2x2
flex-2.6.3-gcc-4.8-yf345oo libxml2-2.9.4-gcc-6.3.0-rltzsdh netlib-lapack-3.6.1-gcc-6.3.0-js33dog py-appdirs-1.4.0-gcc-6.3.0-jxawmw7 xz-5.2.3-gcc-4.8-mew4log
gcc-6.3.0-gcc-4.8-24puqve lmod-7.4.1-gcc-4.8-je4srhr netlib-scalapack-2.0.2-gcc-6.3.0-5aidk4l py-numpy-1.12.0-gcc-6.3.0-oemmoeu xz-5.2.3-gcc-6.3.0-3vqeuvb
gettext-0.19.8.1-gcc-4.8-yymghlh lua-5.3.4-gcc-4.8-im75yaz netlib-scalapack-2.0.2-gcc-6.3.0-hjsemcn py-packaging-16.8-gcc-6.3.0-i2n3dtl zip-3.0-gcc-4.8-rwar22d
gmp-6.1.2-gcc-4.8-5ub2wu5 lua-luafilesystem-1_6_3-gcc-4.8-wkey3nl netlib-scalapack-2.0.2-gcc-6.3.0-jva724b py-pyparsing-2.1.10-gcc-6.3.0-tbo6gmw zlib-1.2.11-gcc-4.8-pgxsxv7
help2man-1.47.4-gcc-4.8-kcnqmau lua-luaposix-33.4.0-gcc-4.8-mdod2ry netlib-scalapack-2.0.2-gcc-6.3.0-rgqfr6d py-scipy-0.19.0-gcc-6.3.0-kr7nat4 zlib-1.2.11-gcc-6.3.0-7cqp6cj
The names should look familiar, as they resemble the output from ``spack find``.
For example, you could type the following command to load the ``cmake`` module:
.. code-block:: console
$ module load cmake/3.7.2-gcc-6.3.0-fowuuby
$ module load cmake-3.7.2-gcc-6.3.0-fowuuby
Neither of these is particularly pretty, easy to remember, or easy to
type. Luckily, Spack offers many facilities for customizing the module
@@ -779,35 +779,35 @@ cut-and-pasted into a shell script. For example:
$ spack module tcl loads --dependencies py-numpy git
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
module load bzip2/1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
# ncurses@6.0%gcc@4.9.3=linux-x86_64
module load ncurses/6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
module load zlib/1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
module load sqlite/3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
# readline@6.3%gcc@4.9.3=linux-x86_64
module load readline/6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
# python@3.5.1%gcc@4.9.3=linux-x86_64
module load python/3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
module load py-setuptools/20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
module load py-nose/1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
module load openblas/0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
module load py-numpy/1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
# curl@7.47.1%gcc@4.9.3=linux-x86_64
module load curl/7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
# autoconf@2.69%gcc@4.9.3=linux-x86_64
module load autoconf/2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
module load cmake/3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
# expat@2.1.0%gcc@4.9.3=linux-x86_64
module load expat/2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
module load git/2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
The script may be further edited by removing unnecessary modules.
@@ -826,12 +826,12 @@ For example, consider the following on one system:
.. code-block:: console
$ module avail
linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads antlr # WRONG!
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load antlr/2.7.7-gcc-5.3.0-bdpl46y
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y

View File

@@ -121,7 +121,7 @@ Since v0.19, Spack supports two ways of writing a package recipe. The most comm
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super().url_for_version(version)
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
@@ -155,7 +155,7 @@ builder class explicitly. Using the same example as above, this reads:
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super().url_for_version(version)
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
@@ -3071,7 +3071,7 @@ follows:
# The library provided by the bar virtual package
@property
def bar_libs(self):
return find_libraries("libFooBar", root=self.home, recursive=True)
return find_libraries("libFooBar", root=sef.home, recursive=True)
# The baz virtual package home
@property

View File

@@ -1,8 +1,13 @@
sphinx==6.2.1
sphinxcontrib-programoutput==0.17
sphinx_design==0.4.1
sphinx-rtd-theme==1.2.2
python-levenshtein==0.21.1
docutils==0.18.1
pygments==2.15.1
urllib3==2.0.3
# These dependencies should be installed using pip in order
# to build the documentation.
sphinx>=3.4,!=4.1.2,!=5.1.0
sphinxcontrib-programoutput
sphinx-design
sphinx-rtd-theme
python-levenshtein
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.
# https://stackoverflow.com/questions/67542699
docutils <0.17
pygments <2.13
urllib3 <2

View File

@@ -1,484 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _signing:
=====================
Spack Package Signing
=====================
The goal of package signing in Spack is to provide data integrity
assurances around official packages produced by the automated Spack CI
pipelines. These assurances directly address the security of Spacks
software supply chain by explaining why a security-conscious user can
be reasonably justified in the belief that packages installed via Spack
have an uninterrupted auditable trail back to change management
decisions judged to be appropriate by the Spack maintainers. This is
achieved through cryptographic signing of packages built by Spack CI
pipelines based on code that has been transparently reviewed and
approved on GitHub. This document describes the signing process for
interested users.
.. _risks:
------------------------------
Risks, Impact and Threat Model
------------------------------
This document addresses the approach taken to safeguard Spacks
reputation with regard to the integrity of the package data produced by
Spacks CI pipelines. It does not address issues of data confidentiality
(Spack is intended to be largely open source) or availability (efforts
are described elsewhere). With that said the main reputational risk can
be broadly categorized as a loss of faith in the data integrity due to a
breach of the private key used to sign packages. Remediation of a
private key breach would require republishing the public key with a
revocation certificate, generating a new signing key, an assessment and
potential rebuild/resigning of all packages since the key was breached,
and finally direct intervention by every spack user to update their copy
of Spacks public keys used for local verification.
The primary threat model used in mitigating the risks of these stated
impacts is one of individual error not malicious intent or insider
threat. The primary objective is to avoid the above impacts by making a
private key breach nearly impossible due to oversight or configuration
error. Obvious and straightforward measures are taken to mitigate issues
of malicious interference in data integrity and insider threats but
these attack vectors are not systematically addressed. It should be hard
to exfiltrate the private key intentionally, and almost impossible to
leak the key by accident.
.. _overview:
-----------------
Pipeline Overview
-----------------
Spack pipelines build software through progressive stages where packages
in later stages nominally depend on packages built in earlier stages.
For both technical and design reasons these dependencies are not
implemented through the default GitLab artifacts mechanism; instead
built packages are uploaded to AWS S3 mirrors (buckets) where they are
retrieved by subsequent stages in the pipeline. Two broad categories of
pipelines exist: Pull Request (PR) pipelines and Develop/Release
pipelines.
- PR pipelines are launched in response to pull requests made by
trusted and untrusted users. Packages built on these pipelines upload
code to quarantined AWS S3 locations which cache the built packages
for the purposes of review and iteration on the changes proposed in
the pull request. Packages built on PR pipelines can come from
untrusted users so signing of these pipelines is not implemented.
Jobs in these pipelines are executed via normal GitLab runners both
within the AWS GitLab infrastructure and at affiliated institutions.
- Develop and Release pipelines **sign** the packages they produce and carry
strong integrity assurances that trace back to auditable change management
decisions. These pipelines only run after members from a trusted group of
reviewers verify that the proposed changes in a pull request are appropriate.
Once the PR is merged, or a release is cut, a pipeline is run on protected
GitLab runners which provide access to the required signing keys within the
job. Intermediary keys are used to sign packages in each stage of the
pipeline as they are built and a final job officially signs each package
external to any specific packages build environment. An intermediate key
exists in the AWS infrastructure and for each affiliated instritution that
maintains protected runners. The runners that execute these pipelines
exclusively accept jobs from protected branches meaning the intermediate keys
are never exposed to unreviewed code and the official keys are never exposed
to any specific build environment.
.. _key_architecture:
----------------
Key Architecture
----------------
Spacks CI process uses public-key infrastructure (PKI) based on GNU Privacy
Guard (gpg) keypairs to sign public releases of spack package metadata, also
called specs. Two classes of GPG keys are involved in the process to reduce the
impact of an individual private key compromise, these key classes are the
*Intermediate CI Key* and *Reputational Key*. Each of these keys has signing
sub-keys that are used exclusively for signing packages. This can be confusing
so for the purpose of this explanation well refer to Root and Signing keys.
Each key has a private and a public component as well as one or more identities
and zero or more signatures.
-------------------
Intermediate CI Key
-------------------
The Intermediate key class is used to sign and verify packages between stages
within a develop or release pipeline. An intermediate key exists for the AWS
infrastructure as well as each affiliated institution that maintains protected
runners. These intermediate keys are made available to the GitLab execution
environment building the package so that the packages dependencies may be
verified by the Signing Intermediate CI Public Key and the final package may be
signed by the Signing Intermediate CI Private Key.
+---------------------------------------------------------------------------------------------------------+
| **Intermediate CI Key (GPG)** |
+==================================================+======================================================+
| Root Intermediate CI Private Key (RSA 4096)# | Root Intermediate CI Public Key (RSA 4096) |
+--------------------------------------------------+------------------------------------------------------+
| Signing Intermediate CI Private Key (RSA 4096) | Signing Intermediate CI Public Key (RSA 4096) |
+--------------------------------------------------+------------------------------------------------------+
| Identity: “Intermediate CI Key <maintainers@spack.io>” |
+---------------------------------------------------------------------------------------------------------+
| Signatures: None |
+---------------------------------------------------------------------------------------------------------+
The *Root intermediate CI Private Key*\ Is stripped out of the GPG key and
stored offline completely separate from Spacks infrastructure. This allows the
core development team to append revocation certificates to the GPG key and
issue new sub-keys for use in the pipeline. It is our expectation that this
will happen on a semi regular basis. A corollary of this is that *this key
should not be used to verify package integrity outside the internal CI process.*
----------------
Reputational Key
----------------
The Reputational Key is the public facing key used to sign complete groups of
development and release packages. Only one key pair exsits in this class of
keys. In contrast to the Intermediate CI Key the Reputational Key *should* be
used to verify package integrity. At the end of develop and release pipeline a
final pipeline job pulls down all signed package metadata built by the pipeline,
verifies they were signed with an Intermediate CI Key, then strips the
Intermediate CI Key signature from the package and re-signs them with the
Signing Reputational Private Key. The officially signed packages are then
uploaded back to the AWS S3 mirror. Please note that separating use of the
reputational key into this final job is done to prevent leakage of the key in a
spack package. Because the Signing Reputational Private Key is never exposed to
a build job it cannot accidentally end up in any built package.
+---------------------------------------------------------------------------------------------------------+
| **Reputational Key (GPG)** |
+==================================================+======================================================+
| Root Reputational Private Key (RSA 4096)# | Root Reputational Public Key (RSA 4096) |
+--------------------------------------------------+------------------------------------------------------+
| Signing Reputational Private Key (RSA 4096) | Signing Reputational Public Key (RSA 4096) |
+--------------------------------------------------+------------------------------------------------------+
| Identity: “Spack Project <maintainers@spack.io>” |
+---------------------------------------------------------------------------------------------------------+
| Signatures: Signed by core development team [#f1]_ |
+---------------------------------------------------------------------------------------------------------+
The Root Reputational Private Key is stripped out of the GPG key and stored
offline completely separate from Spacks infrastructure. This allows the core
development team to append revocation certificates to the GPG key in the
unlikely event that the Signing Reputation Private Key is compromised. In
general it is the expectation that rotating this key will happen infrequently if
at all. This should allow relatively transparent verification for the end-user
community without needing deep familiarity with GnuPG or Public Key
Infrastructure.
.. _build_cache_format:
------------------
Build Cache Format
------------------
A binary package consists of a metadata file unambiguously defining the
built package (and including other details such as how to relocate it)
and the installation directory of the package stored as a compressed
archive file. The metadata files can either be unsigned, in which case
the contents are simply the json-serialized concrete spec plus metadata,
or they can be signed, in which case the json-serialized concrete spec
plus metadata is wrapped in a gpg cleartext signature. Built package
metadata files are named to indicate the operating system and
architecture for which the package was built as well as the compiler
used to build it and the packages name and version. For example::
linux-ubuntu18.04-haswell-gcc-7.5.0-zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow.spec.json.sig
would contain the concrete spec and binary metadata for a binary package
of ``zlib@1.2.12``, built for the ``ubuntu`` operating system and ``haswell``
architecture. The id of the built package exists in the name of the file
as well (after the package name and version) and in this case begins
with ``llv2ys``. The id distinguishes a particular built package from all
other built packages with the same os/arch, compiler, name, and version.
Below is an example of a signed binary package metadata file. Such a
file would live in the ``build_cache`` directory of a binary mirror::
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
{
"spec": {
<concrete-spec-contents-omitted>
},
"buildcache_layout_version": 1,
"binary_cache_checksum": {
"hash_algorithm": "sha256",
"hash": "4f1e46452c35a5e61bcacca205bae1bfcd60a83a399af201a29c95b7cc3e1423"
},
"buildinfo": {
"relative_prefix":
"linux-ubuntu18.04-haswell/gcc-7.5.0/zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow",
"relative_rpaths": false
}
}
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEETZn0sLle8jIrdAPLx/P+voVcifMFAmKAGvwACgkQx/P+voVc
ifNoVgv/VrhA+wurVs5GB9PhmMA1m5U/AfXZb4BElDRwpT8ZcTPIv5X8xtv60eyn
4EOneGVbZoMThVxgev/NKARorGmhFXRqhWf+jknJZ1dicpqn/qpv34rELKUpgXU+
QDQ4d1P64AIdTczXe2GI9ZvhOo6+bPvK7LIsTkBbtWmopkomVxF0LcMuxAVIbA6b
887yBvVO0VGlqRnkDW7nXx49r3AG2+wDcoU1f8ep8QtjOcMNaPTPJ0UnjD0VQGW6
4ZFaGZWzdo45MY6tF3o5mqM7zJkVobpoW3iUz6J5tjz7H/nMlGgMkUwY9Kxp2PVH
qoj6Zip3LWplnl2OZyAY+vflPFdFh12Xpk4FG7Sxm/ux0r+l8tCAPvtw+G38a5P7
QEk2JBr8qMGKASmnRlJUkm1vwz0a95IF3S9YDfTAA2vz6HH3PtsNLFhtorfx8eBi
Wn5aPJAGEPOawEOvXGGbsH4cDEKPeN0n6cy1k92uPEmBLDVsdnur8q42jk5c2Qyx
j3DXty57
=3gvm
-----END PGP SIGNATURE-----
If a user has trusted the public key associated with the private key
used to sign the above spec file, the signature can be verified with
gpg, as follows::
$ gpg verify linux-ubuntu18.04-haswell-gcc-7.5.0-zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow.spec.json.sig
The metadata (regardless whether signed or unsigned) contains the checksum
of the ``.spack`` file containing the actual installation. The checksum should
be compared to a checksum computed locally on the ``.spack`` file to ensure the
contents have not changed since the binary spec plus metadata were signed. The
``.spack`` files are actually tarballs containing the compressed archive of the
install tree. These files, along with the metadata files, live within the
``build_cache`` directory of the mirror, and together are organized as follows::
build_cache/
# unsigned metadata (for indexing, contains sha256 of .spack file)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spec.json
# clearsigned metadata (same as above, but signed)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spec.json.sig
<arch>/
<compiler>/
<name>-<ver>/
# tar.gz-compressed prefix (may support more compression formats later)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spack
Uncompressing and extracting the ``.spack`` file results in the install tree.
This is in contrast to previous versions of spack, where the ``.spack`` file
contained a (duplicated) metadata file, a signature file and a nested tarball
containing the install tree.
.. _internal_implementation:
-----------------------
Internal Implementation
-----------------------
The technical implementation of the pipeline signing process includes components
defined in Amazon Web Services, the Kubernetes cluster, at affilicated
institutions, and the GitLab/GitLab Runner deployment. We present the techincal
implementation in two interdependent sections. The first addresses how secrets
are managed through the lifecycle of a develop or release pipeline. The second
section describes how Gitlab Runner and pipelines are configured and managed to
support secure automated signing.
Secrets Management
^^^^^^^^^^^^^^^^^^
As stated above the Root Private Keys (intermediate and reputational)
are stripped from the GPG keys and stored outside Spacks
infrastructure.
.. warning::
**TODO**
- Explanation here about where and how access is handled for these keys.
- Both Root private keys are protected with strong passwords
- Who has access to these and how?
**Intermediate CI Key**
-----------------------
Multiple intermediate CI signing keys exist, one Intermediate CI Key for jobs
run in AWS, and one key for each affiliated institution (e.g. Univerity of
Oregon). Here we describe how the Intermediate CI Key is managed in AWS:
The Intermediate CI Key (including the Signing Intermediate CI Private Key is
exported as an ASCII armored file and stored in a Kubernetes secret called
``spack-intermediate-ci-signing-key``. For convenience sake, this same secret
contains an ASCII-armored export of just the *public* components of the
Reputational Key. This secret also contains the *public* components of each of
the affiliated institutions' Intermediate CI Key. These are potentially needed
to verify dependent packages which may have been found in the public mirror or
built by a protected job running on an affiliated institution's infrastrcuture
in an earlier stage of the pipeline.
Procedurally the ``spack-intermediate-ci-signing-key`` secret is used in
the following way:
1. A ``large-arm-prot`` or ``large-x86-prot`` protected runner picks up
a job tagged ``protected`` from a protected GitLab branch. (See
`Protected Runners and Reserved Tags <#_8bawjmgykv0b>`__).
2. Based on its configuration, the runner creates a job Pod in the
pipeline namespace and mounts the spack-intermediate-ci-signing-key
Kubernetes secret into the build container
3. The Intermediate CI Key, affiliated institutions' public key and the
Reputational Public Key are imported into a keyring by the ``spack gpg …``
sub-command. This is initiated by the jobs build script which is created by
the generate job at the beginning of the pipeline.
4. Assuming the package has dependencies those specs are verified using
the keyring.
5. The package is built and the spec.json is generated
6. The spec.json is signed by the keyring and uploaded to the mirrors
build cache.
**Reputational Key**
--------------------
Because of the increased impact to end users in the case of a private
key breach, the Reputational Key is managed separately from the
Intermediate CI Keys and has additional controls. First, the Reputational
Key was generated outside of Spacks infrastructure and has been signed
by the core development team. The Reputational Key (along with the
Signing Reputational Private Key) was then ASCII armor exported to a
file. Unlike the Intermediate CI Key this exported file is not stored as
a base64 encoded secret in Kubernetes. Instead\ *the key file
itself*\ is encrypted and stored in Kubernetes as the
``spack-signing-key-encrypted`` secret in the pipeline namespace.
The encryption of the exported Reputational Key (including the Signing
Reputational Private Key) is handled by `AWS Key Management Store (KMS) data
keys
<https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys>`__.
The private key material is decrypted and imported at the time of signing into a
memory mounted temporary directory holding the keychain. The signing job uses
the `AWS Encryption SDK
<https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/crypto-cli.html>`__
(i.e. ``aws-encryption-cli``) to decrypt the Reputational Key. Permission to
decrypt the key is granted to the job Pod through a Kubernetes service account
specifically used for this, and only this, function. Finally, for convenience
sake, this same secret contains an ASCII-armored export of the *public*
components of the Intermediate CI Keys and the Reputational Key. This allows the
signing script to verify that packages were built by the pipeline (both on AWS
or at affiliated institutions), or signed previously as a part of a different
pipeline. This is is done *before* importing decrypting and importing the
Signing Reputational Private Key material and officially signing the packages.
Procedurally the ``spack-singing-key-encrypted`` secret is used in the
following way:
1. The ``spack-package-signing-gitlab-runner`` protected runner picks
up a job tagged ``notary`` from a protected GitLab branch (See
`Protected Runners and Reserved Tags <#_8bawjmgykv0b>`__).
2. Based on its configuration, the runner creates a job pod in the
pipeline namespace. The job is run in a stripped down purpose-built
image ``ghcr.io/spack/notary:latest`` Docker image. The runner is
configured to only allow running jobs with this image.
3. The runner also mounts the ``spack-signing-key-encrypted`` secret to
a path on disk. Note that this becomes several files on disk, the
public components of the Intermediate CI Keys, the public components
of the Reputational CI, and an AWS KMS encrypted file containing the
Singing Reputational Private Key.
4. In addition to the secret, the runner creates a tmpfs memory mounted
directory where the GnuPG keyring will be created to verify, and
then resign the package specs.
5. The job script syncs all spec.json.sig files from the build cache to
a working directory in the jobs execution environment.
6. The job script then runs the ``sign.sh`` script built into the
notary Docker image.
7. The ``sign.sh`` script imports the public components of the
Reputational and Intermediate CI Keys and uses them to verify good
signatures on the spec.json.sig files. If any signed spec does not
verify the job immediately fails.
8. Assuming all specs are verified, the ``sign.sh`` script then unpacks
the spec json data from the signed file in preparation for being
re-signed with the Reputational Key.
9. The private components of the Reputational Key are decrypted to
standard out using ``aws-encryption-cli`` directly into a ``gpg
import …`` statement which imports the key into the
keyring mounted in-memory.
10. The private key is then used to sign each of the json specs and the
keyring is removed from disk.
11. The re-signed json specs are resynced to the AWS S3 Mirror and the
public signing of the packages for the develop or release pipeline
that created them is complete.
Non service-account access to the private components of the Reputational
Key that are managed through access to the symmetric secret in KMS used
to encrypt the data key (which in turn is used to encrypt the GnuPG key
- See:\ `Encryption SDK
Documentation <https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/crypto-cli-examples.html#cli-example-encrypt-file>`__).
A small trusted subset of the core development team are the only
individuals with access to this symmetric key.
.. _protected_runners:
Protected Runners and Reserved Tags
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack has a large number of Gitlab Runners operating in its build farm.
These include runners deployed in the AWS Kubernetes cluster as well as
runners deployed at affiliated institutions. The majority of runners are
shared runners that operate across projects in gitlab.spack.io. These
runners pick up jobs primarily from the spack/spack project and execute
them in PR pipelines.
A small number of runners operating on AWS and at affiliated institutions are
registered as specific *protected* runners on the spack/spack project. In
addition to protected runners there are protected branches on the spack/spack
project. These are the ``develop`` branch, any release branch (i.e. managed with
the ``releases/v*`` wildcard) and any tag branch (managed with the ``v*``
wildcard) Finally Spacks pipeline generation code reserves certain tags to make
sure jobs are routed to the correct runners, these tags are ``public``,
``protected``, and ``notary``. Understanding how all this works together to
protect secrets and provide integrity assurances can be a little confusing so
lets break these down:
- **Protected Branches**- Protected branches in Spack prevent anyone
other than Maintainers in GitLab from pushing code. In the case of
Spack the only Maintainer level entity pushing code to protected
branches is Spack bot. Protecting branches also marks them in such a
way that Protected Runners will only run jobs from those branches
- **Protected Runners**- Protected Runners only run jobs from protected
branches. Because protected runners have access to secrets, it's critical
that they not run Jobs from untrusted code (i.e. PR branches). If they did it
would be possible for a PR branch to tag a job in such a way that a protected
runner executed that job and mounted secrets into a code execution
environment that had not been reviewed by Spack maintainers. Note however
that in the absence of tagging used to route jobs, public runners *could* run
jobs from protected branches. No secrets would be at risk of being breached
because non-protected runners do not have access to those secrets; lack of
secrets would, however, cause the jobs to fail.
- **Reserved Tags**- To mitigate the issue of public runners picking up
protected jobs Spack uses a small set of “reserved” job tags (Note that these
are *job* tags not git tags). These tags are “public”, “private”, and
“notary.” The majority of jobs executed in Spacks GitLab instance are
executed via a ``generate`` job. The generate job code systematically ensures
that no user defined configuration sets these tags. Instead, the ``generate``
job sets these tags based on rules related to the branch where this pipeline
originated. If the job is a part of a pipeline on a PR branch it sets the
``public`` tag. If the job is part of a pipeline on a protected branch it
sets the ``protected`` tag. Finally if the job is the package signing job and
it is running on a pipeline that is part of a protected branch then it sets
the ``notary`` tag.
Protected Runners are configured to only run jobs from protected branches. Only
jobs running in pipelines on protected branches are tagged with ``protected`` or
``notary`` tags. This tightly couples jobs on protected branches to protected
runners that provide access to the secrets required to sign the built packages.
The secrets are can **only** be accessed via:
1. Runners under direct control of the core development team.
2. Runners under direct control of trusted maintainers at affiliated institutions.
3. By code running the automated pipeline that has been reviewed by the
Spack maintainers and judged to be appropriate.
Other attempts (either through malicious intent or incompetence) can at
worst grab jobs intended for protected runners which will cause those
jobs to fail alerting both Spack maintainers and the core development
team.
.. [#f1]
The Reputational Key has also cross signed core development team
keys.

428
lib/spack/env/cc vendored
View File

@@ -416,14 +416,30 @@ input_command="$*"
# The lists are all bell-separated to be as flexible as possible, as their
# contents may come from the command line, from ' '-separated lists,
# ':'-separated lists, etc.
include_dirs_list=""
lib_dirs_list=""
rpath_dirs_list=""
system_include_dirs_list=""
system_lib_dirs_list=""
system_rpath_dirs_list=""
isystem_system_include_dirs_list=""
isystem_include_dirs_list=""
libs_list=""
other_args_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
append system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
append rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
@@ -433,9 +449,9 @@ parse_Wl() {
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
append system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
@@ -443,9 +459,9 @@ parse_Wl() {
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
append system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
@@ -459,7 +475,7 @@ parse_Wl() {
return 1
;;
*)
append return_other_args_list "-Wl,$1"
append other_args_list "-Wl,$1"
;;
esac
fi
@@ -467,210 +483,177 @@ parse_Wl() {
done
}
categorize_arguments() {
unset IFS
while [ $# -ne 0 ]; do
return_other_args_list=""
return_isystem_was_used=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
# an RPATH to be added after the case statement.
rp=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Multiple consecutive spaces in the command line can
# result in blank arguments
if [ -z "$1" ]; then
shift
continue
fi
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
while [ $# -ne 0 ]; do
# an RPATH to be added after the case statement.
rp=""
# Multiple consecutive spaces in the command line can
# result in blank arguments
if [ -z "$1" ]; then
shift
continue
fi
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append return_other_args_list \"\$1\"
shift
continue
;;
esac
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
replaced="$after$stripped"
# it matched, remove it
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append other_args_list \"\$1\"
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
case "$1" in
-isystem*)
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
# and passed by ifx to the linker, which confuses it with a
# library. Filter it out.
# TODO: generalize filtering of args with an env var, so that
# TODO: we do not have to special case this here.
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
&& [ "$1" != "${1#-loopopt}" ]; then
shift
continue
fi
arg="${1#-l}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append return_other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
if ! parse_Wl ${1#-Wl,}; then
append return_other_args_list "$1"
fi
unset IFS
;;
-Xlinker)
shift
if [ $# -eq 0 ]; then
# -Xlinker without value: let the compiler error about it.
append return_other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append return_other_args_list -Xlinker
append return_other_args_list "$1"
;;
esac
fi
;;
"$dtags_to_strip")
;;
*)
append return_other_args_list "$1"
continue
;;
esac
shift
done
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append return_other_args_list -Xlinker
append return_other_args_list -rpath
replaced="$after$stripped"
# it matched, remove it
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append return_other_args_list -Wl,-rpath
fi
}
case "$1" in
-isystem*)
arg="${1#-isystem}"
isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append isystem_system_include_dirs_list "$arg"
else
append isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append system_include_dirs_list "$arg"
else
append include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
append system_lib_dirs_list "$arg"
else
append lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
# and passed by ifx to the linker, which confuses it with a
# library. Filter it out.
# TODO: generalize filtering of args with an env var, so that
# TODO: we do not have to special case this here.
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
&& [ "$1" != "${1#-loopopt}" ]; then
shift
continue
fi
arg="${1#-l}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
if ! parse_Wl ${1#-Wl,}; then
append other_args_list "$1"
fi
unset IFS
;;
-Xlinker)
shift
if [ $# -eq 0 ]; then
# -Xlinker without value: let the compiler error about it.
append other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
if system_dir "$1"; then
append system_rpath_dirs_list "$1"
else
append rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append other_args_list -Xlinker
append other_args_list "$1"
;;
esac
fi
;;
"$dtags_to_strip")
;;
*)
append other_args_list "$1"
;;
esac
shift
done
categorize_arguments "$@"
include_dirs_list="$return_include_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append other_args_list -Xlinker
append other_args_list -rpath
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append other_args_list -Wl,-rpath
fi
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -690,14 +673,12 @@ elif [ "$SPACK_ADD_DEBUG_FLAGS" = "custom" ]; then
extend flags_list SPACK_DEBUG_FLAGS
fi
spack_flags_list=""
# Fortran flags come before CPPFLAGS
case "$mode" in
cc|ccld)
case $lang_flags in
F)
extend spack_flags_list SPACK_FFLAGS
extend flags_list SPACK_FFLAGS
;;
esac
;;
@@ -706,7 +687,7 @@ esac
# C preprocessor flags come before any C/CXX flags
case "$mode" in
cpp|as|cc|ccld)
extend spack_flags_list SPACK_CPPFLAGS
extend flags_list SPACK_CPPFLAGS
;;
esac
@@ -716,10 +697,10 @@ case "$mode" in
cc|ccld)
case $lang_flags in
C)
extend spack_flags_list SPACK_CFLAGS
extend flags_list SPACK_CFLAGS
;;
CXX)
extend spack_flags_list SPACK_CXXFLAGS
extend flags_list SPACK_CXXFLAGS
;;
esac
@@ -731,25 +712,10 @@ esac
# Linker flags
case "$mode" in
ld|ccld)
extend spack_flags_list SPACK_LDFLAGS
extend flags_list SPACK_LDFLAGS
;;
esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
@@ -775,8 +741,6 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend lib_dirs_list SPACK_LINK_DIRS
fi
libs_list=""
# add RPATHs if we're in in any linking mode
case "$mode" in
ld|ccld)
@@ -805,16 +769,12 @@ args_list="$flags_list"
# Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_include_dirs_list "-I"
extend args_list include_dirs_list "-I"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
if [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
@@ -822,15 +782,11 @@ case "$mode" in
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
# RPATHs arguments
@@ -839,25 +795,20 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
ld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;
esac
# Other arguments from the input command
extend args_list other_args_list
extend args_list spack_flags_other_args_list
# Inject SPACK_LDLIBS, if supplied
extend args_list libs_list "-l"
@@ -913,4 +864,3 @@ fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -65,6 +65,9 @@
up to date with CTest, just make sure the ``*_matches`` and
``*_exceptions`` lists are kept up to date with CTest's build handler.
"""
from __future__ import print_function
from __future__ import division
import re
import math
import multiprocessing
@@ -208,7 +211,7 @@
]
class LogEvent:
class LogEvent(object):
"""Class representing interesting events (e.g., errors) in a build log."""
def __init__(self, text, line_no,
source_file=None, source_line_no=None,
@@ -345,7 +348,7 @@ def _parse_unpack(args):
return _parse(*args)
class CTestLogParser:
class CTestLogParser(object):
"""Log file parser that extracts errors and warnings."""
def __init__(self, profile=False):
# whether to record timing information

View File

@@ -3,42 +3,33 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import abc
from __future__ import print_function
import argparse
import errno
import io
import re
import sys
from argparse import ArgumentParser
from typing import IO, Optional, Sequence, Tuple
class Command:
class Command(object):
"""Parsed representation of a command from argparse.
This is a single command from an argparse parser. ``ArgparseWriter`` creates these and returns
them from ``parse()``, and it passes one of these to each call to ``format()`` so that we can
take an action for a single command.
This is a single command from an argparse parser. ``ArgparseWriter``
creates these and returns them from ``parse()``, and it passes one of
these to each call to ``format()`` so that we can take an action for
a single command.
Parts of a Command:
- prog: command name (str)
- description: command description (str)
- usage: command usage (str)
- positionals: list of positional arguments (list)
- optionals: list of optional arguments (list)
- subcommands: list of subcommand parsers (list)
"""
def __init__(
self,
prog: str,
description: Optional[str],
usage: str,
positionals: Sequence[Tuple[str, str]],
optionals: Sequence[Tuple[Sequence[str], str, str]],
subcommands: Sequence[Tuple[ArgumentParser, str]],
) -> None:
"""Initialize a new Command instance.
Args:
prog: Program name.
description: Command description.
usage: Command usage.
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
"""
def __init__(self, prog, description, usage, positionals, optionals, subcommands):
self.prog = prog
self.description = description
self.usage = usage
@@ -47,34 +38,35 @@ def __init__(
self.subcommands = subcommands
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access to self._expand_help(),
# ArgparseWriter is not intended to be used as a formatter_class.
class ArgparseWriter(argparse.HelpFormatter, abc.ABC):
"""Analyze an argparse ArgumentParser for easy generation of help."""
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access
# to self._expand_help(), ArgparseWriter is not intended to be used as a
# formatter_class.
class ArgparseWriter(argparse.HelpFormatter):
"""Analyzes an argparse ArgumentParser for easy generation of help."""
def __init__(self, prog: str, out: IO = sys.stdout, aliases: bool = False) -> None:
"""Initialize a new ArgparseWriter instance.
def __init__(self, prog, out=None, aliases=False):
"""Initializes a new ArgparseWriter instance.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
Parameters:
prog (str): the program name
out (file object): the file to write to (default sys.stdout)
aliases (bool): whether or not to include subparsers for aliases
"""
super().__init__(prog)
super(ArgparseWriter, self).__init__(prog)
self.level = 0
self.prog = prog
self.out = out
self.out = sys.stdout if out is None else out
self.aliases = aliases
def parse(self, parser: ArgumentParser, prog: str) -> Command:
"""Parse the parser object and return the relavent components.
def parse(self, parser, prog):
"""Parses the parser object and returns the relavent components.
Args:
parser: Command parser.
prog: Program name.
Parameters:
parser (argparse.ArgumentParser): the parser
prog (str): the command name
Returns:
Information about the command from the parser.
(Command) information about the command from the parser
"""
self.parser = parser
@@ -88,7 +80,8 @@ def parse(self, parser: ArgumentParser, prog: str) -> Command:
groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip()
# Go through actions and split them into optionals, positionals, and subcommands
# Go through actions and split them into optionals, positionals,
# and subcommands
optionals = []
positionals = []
subcommands = []
@@ -105,7 +98,7 @@ def parse(self, parser: ArgumentParser, prog: str) -> Command:
subcommands.append((subparser, subaction.dest))
# Look for aliases of the form 'name (alias, ...)'
if self.aliases and isinstance(subaction.metavar, str):
if self.aliases:
match = re.match(r"(.*) \((.*)\)", subaction.metavar)
if match:
aliases = match.group(2).split(", ")
@@ -120,26 +113,28 @@ def parse(self, parser: ArgumentParser, prog: str) -> Command:
return Command(prog, description, usage, positionals, optionals, subcommands)
@abc.abstractmethod
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
def format(self, cmd):
"""Returns the string representation of a single node in the
parser tree.
Override this in subclasses to define how each subcommand should be displayed.
Override this in subclasses to define how each subcommand
should be displayed.
Args:
cmd: Parsed information about a command or subcommand.
Parameters:
(Command): parsed information about a command or subcommand
Returns:
String representation of this subcommand.
str: the string representation of this subcommand
"""
raise NotImplementedError
def _write(self, parser: ArgumentParser, prog: str, level: int = 0) -> None:
"""Recursively write a parser.
def _write(self, parser, prog, level=0):
"""Recursively writes a parser.
Args:
parser: Command parser.
prog: Program name.
level: Current level.
Parameters:
parser (argparse.ArgumentParser): the parser
prog (str): the command name
level (int): the current level
"""
self.level = level
@@ -149,17 +144,19 @@ def _write(self, parser: ArgumentParser, prog: str, level: int = 0) -> None:
for subparser, prog in cmd.subcommands:
self._write(subparser, prog, level=level + 1)
def write(self, parser: ArgumentParser) -> None:
def write(self, parser):
"""Write out details about an ArgumentParser.
Args:
parser: Command parser.
parser (argparse.ArgumentParser): the parser
"""
try:
self._write(parser, self.prog)
except BrokenPipeError:
except IOError as e:
# Swallow pipe errors
pass
# Raises IOError in Python 2 and BrokenPipeError in Python 3
if e.errno != errno.EPIPE:
raise
_rst_levels = ["=", "-", "^", "~", ":", "`"]
@@ -168,33 +165,21 @@ def write(self, parser: ArgumentParser) -> None:
class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections."""
def __init__(
self,
prog: str,
out: IO = sys.stdout,
aliases: bool = False,
rst_levels: Sequence[str] = _rst_levels,
) -> None:
"""Initialize a new ArgparseRstWriter instance.
def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
rst_levels: List of characters for rst section headings.
Parameters:
prog (str): program name
out (file object): file to write to
aliases (bool): whether or not to include subparsers for aliases
rst_levels (list of str): list of characters
for rst section headings
"""
super().__init__(prog, out, aliases)
out = sys.stdout if out is None else out
super(ArgparseRstWriter, self).__init__(prog, out, aliases)
self.rst_levels = rst_levels
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of a node.
"""
def format(self, cmd):
string = io.StringIO()
string.write(self.begin_command(cmd.prog))
@@ -220,15 +205,7 @@ def format(self, cmd: Command) -> str:
return string.getvalue()
def begin_command(self, prog: str) -> str:
"""Text to print before a command.
Args:
prog: Program name.
Returns:
Text before a command.
"""
def begin_command(self, prog):
return """
----
@@ -241,26 +218,10 @@ def begin_command(self, prog: str) -> str:
prog.replace(" ", "-"), prog, self.rst_levels[self.level] * len(prog)
)
def description(self, description: str) -> str:
"""Description of a command.
Args:
description: Command description.
Returns:
Description of a command.
"""
def description(self, description):
return description + "\n\n"
def usage(self, usage: str) -> str:
"""Example usage of a command.
Args:
usage: Command usage.
Returns:
Usage of a command.
"""
def usage(self, usage):
return """\
.. code-block:: console
@@ -270,24 +231,10 @@ def usage(self, usage: str) -> str:
usage
)
def begin_positionals(self) -> str:
"""Text to print before positional arguments.
Returns:
Positional arguments header.
"""
def begin_positionals(self):
return "\n**Positional arguments**\n\n"
def positional(self, name: str, help: str) -> str:
"""Description of a positional argument.
Args:
name: Argument name.
help: Help text.
Returns:
Positional argument description.
"""
def positional(self, name, help):
return """\
{0}
{1}
@@ -296,32 +243,13 @@ def positional(self, name: str, help: str) -> str:
name, help
)
def end_positionals(self) -> str:
"""Text to print after positional arguments.
Returns:
Positional arguments footer.
"""
def end_positionals(self):
return ""
def begin_optionals(self) -> str:
"""Text to print before optional arguments.
Returns:
Optional arguments header.
"""
def begin_optionals(self):
return "\n**Optional arguments**\n\n"
def optional(self, opts: str, help: str) -> str:
"""Description of an optional argument.
Args:
opts: Optional argument.
help: Help text.
Returns:
Optional argument description.
"""
def optional(self, opts, help):
return """\
``{0}``
{1}
@@ -330,23 +258,10 @@ def optional(self, opts: str, help: str) -> str:
opts, help
)
def end_optionals(self) -> str:
"""Text to print after optional arguments.
Returns:
Optional arguments footer.
"""
def end_optionals(self):
return ""
def begin_subcommands(self, subcommands: Sequence[Tuple[ArgumentParser, str]]) -> str:
"""Table with links to other subcommands.
Arguments:
subcommands: List of subcommands.
Returns:
Subcommand linking text.
"""
def begin_subcommands(self, subcommands):
string = """
**Subcommands**
@@ -365,25 +280,29 @@ def begin_subcommands(self, subcommands: Sequence[Tuple[ArgumentParser, str]]) -
class ArgparseCompletionWriter(ArgparseWriter):
"""Write argparse output as shell programmable tab completion functions."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
def format(self, cmd):
"""Returns the string representation of a single node in the
parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Override this in subclasses to define how each subcommand
should be displayed.
Parameters:
(Command): parsed information about a command or subcommand
Returns:
String representation of this subcommand.
str: the string representation of this subcommand
"""
assert cmd.optionals # we should always at least have -h, --help
assert not (cmd.positionals and cmd.subcommands) # one or the other
# We only care about the arguments/flags, not the help messages
positionals: Tuple[str, ...] = ()
positionals = []
if cmd.positionals:
positionals, _ = zip(*cmd.positionals)
optionals, _, _ = zip(*cmd.optionals)
subcommands: Tuple[str, ...] = ()
subcommands = []
if cmd.subcommands:
_, subcommands = zip(*cmd.subcommands)
@@ -396,73 +315,71 @@ def format(self, cmd: Command) -> str:
+ self.end_function(cmd.prog)
)
def start_function(self, prog: str) -> str:
"""Return the syntax needed to begin a function definition.
def start_function(self, prog):
"""Returns the syntax needed to begin a function definition.
Args:
prog: Program name.
Parameters:
prog (str): the command name
Returns:
Function definition beginning.
str: the function definition beginning
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
def end_function(self, prog: str) -> str:
"""Return the syntax needed to end a function definition.
def end_function(self, prog=None):
"""Returns the syntax needed to end a function definition.
Args:
prog: Program name
Parameters:
prog (str or None): the command name
Returns:
Function definition ending.
str: the function definition ending
"""
return "}\n"
def body(
self, positionals: Sequence[str], optionals: Sequence[str], subcommands: Sequence[str]
) -> str:
"""Return the body of the function.
def body(self, positionals, optionals, subcommands):
"""Returns the body of the function.
Args:
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
Parameters:
positionals (list): list of positional arguments
optionals (list): list of optional arguments
subcommands (list): list of subcommand parsers
Returns:
Function body.
str: the function body
"""
return ""
def positionals(self, positionals: Sequence[str]) -> str:
"""Return the syntax for reporting positional arguments.
def positionals(self, positionals):
"""Returns the syntax for reporting positional arguments.
Args:
positionals: List of positional arguments.
Parameters:
positionals (list): list of positional arguments
Returns:
Syntax for positional arguments.
str: the syntax for positional arguments
"""
return ""
def optionals(self, optionals: Sequence[str]) -> str:
"""Return the syntax for reporting optional flags.
def optionals(self, optionals):
"""Returns the syntax for reporting optional flags.
Args:
optionals: List of optional arguments.
Parameters:
optionals (list): list of optional arguments
Returns:
Syntax for optional flags.
str: the syntax for optional flags
"""
return ""
def subcommands(self, subcommands: Sequence[str]) -> str:
"""Return the syntax for reporting subcommands.
def subcommands(self, subcommands):
"""Returns the syntax for reporting subcommands.
Args:
subcommands: List of subcommand parsers.
Parameters:
subcommands (list): list of subcommand parsers
Returns:
Syntax for subcommand parsers
str: the syntax for subcommand parsers
"""
return ""

View File

@@ -402,7 +402,7 @@ def groupid_to_group(x):
os.remove(backup_filename)
class FileFilter:
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
@@ -610,8 +610,6 @@ def chgrp(path, group, follow_symlinks=True):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
if os.stat(path).st_gid == gid:
return
if follow_symlinks:
os.chown(path, -1, gid)
else:
@@ -1338,7 +1336,7 @@ def lexists_islink_isdir(path):
return True, is_link, is_dir
class BaseDirectoryVisitor:
class BaseDirectoryVisitor(object):
"""Base class and interface for :py:func:`visit_directory_tree`."""
def visit_file(self, root, rel_path, depth):
@@ -1892,7 +1890,7 @@ class HeaderList(FileList):
include_regex = re.compile(r"(.*?)(\binclude\b)(.*)")
def __init__(self, files):
super().__init__(files)
super(HeaderList, self).__init__(files)
self._macro_definitions = []
self._directories = None
@@ -1918,7 +1916,7 @@ def _default_directories(self):
"""Default computation of directories based on the list of
header files.
"""
dir_list = super().directories
dir_list = super(HeaderList, self).directories
values = []
for d in dir_list:
# If the path contains a subdirectory named 'include' then stop
@@ -2354,7 +2352,7 @@ def find_all_libraries(root, recursive=False):
)
class WindowsSimulatedRPath:
class WindowsSimulatedRPath(object):
"""Class representing Windows filesystem rpath analog
One instance of this class is associated with a package (only on Windows)

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import division
import collections.abc
import contextlib
import functools
@@ -766,10 +768,10 @@ def pretty_seconds(seconds):
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
super(RequiredAttributeError, self).__init__(message)
class ObjectWrapper:
class ObjectWrapper(object):
"""Base class that wraps an object. Derived classes can add new behavior
while staying undercover.
@@ -796,7 +798,7 @@ def __init__(self, wrapped_object):
self.__dict__ = wrapped_object.__dict__
class Singleton:
class Singleton(object):
"""Simple wrapper for lazily initialized singleton objects."""
def __init__(self, factory):
@@ -843,7 +845,7 @@ def __repr__(self):
return repr(self.instance)
class LazyReference:
class LazyReference(object):
"""Lazily evaluated reference to part of a singleton."""
def __init__(self, ref_function):
@@ -941,7 +943,7 @@ def _wrapper(args):
return _wrapper
class Devnull:
class Devnull(object):
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
@@ -1058,7 +1060,7 @@ def __str__(self):
return str(self.data)
class GroupedExceptionHandler:
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
@@ -1089,7 +1091,7 @@ def grouped_message(self, with_tracebacks: bool = True) -> str:
return "due to the following failures:\n{0}".format("\n".join(each_exception_message))
class GroupedExceptionForwarder:
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
@@ -1109,7 +1111,7 @@ def __exit__(self, exc_type, exc_value, tb):
return True
class classproperty:
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)

View File

@@ -5,6 +5,8 @@
"""LinkTree class for setting up trees of symbolic links."""
from __future__ import print_function
import filecmp
import os
import shutil
@@ -285,7 +287,7 @@ def visit_symlinked_file(self, root, rel_path, depth):
self.visit_file(root, rel_path, depth)
class LinkTree:
class LinkTree(object):
"""Class to create trees of symbolic links from a source directory.
LinkTree objects are constructed with a source root. Their
@@ -430,12 +432,12 @@ class MergeConflictError(Exception):
class ConflictingSpecsError(MergeConflictError):
def __init__(self, spec_1, spec_2):
super().__init__(spec_1, spec_2)
super(MergeConflictError, self).__init__(spec_1, spec_2)
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super().__init__("Package merge blocked by file: %s" % path)
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError):
@@ -450,4 +452,4 @@ def __init__(self, conflicts):
msg += "\n `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst
)
super().__init__(msg)
super(MergeConflictSummary, self).__init__(msg)

View File

@@ -39,7 +39,7 @@
true_fn = lambda: True
class OpenFile:
class OpenFile(object):
"""Record for keeping track of open lockfiles (with reference counting).
There's really only one ``OpenFile`` per inode, per process, but we record the
@@ -53,7 +53,7 @@ def __init__(self, fh):
self.refs = 0
class OpenFileTracker:
class OpenFileTracker(object):
"""Track open lockfiles, to minimize number of open file descriptors.
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
@@ -169,7 +169,7 @@ def _attempts_str(wait_time, nattempts):
return " after {} and {}".format(pretty_seconds(wait_time), attempts)
class LockType:
class LockType(object):
READ = 0
WRITE = 1
@@ -192,7 +192,7 @@ def is_valid(op):
return op == LockType.READ or op == LockType.WRITE
class Lock:
class Lock(object):
"""This is an implementation of a filesystem lock using Python's lockf.
In Python, ``lockf`` actually calls ``fcntl``, so this should work with
@@ -681,7 +681,7 @@ def _status_msg(self, locktype, status):
)
class LockTransaction:
class LockTransaction(object):
"""Simple nested transaction context manager that uses a file lock.
Arguments:
@@ -770,7 +770,7 @@ class LockDowngradeError(LockError):
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super().__init__(msg)
super(LockDowngradeError, self).__init__(msg)
class LockLimitError(LockError):
@@ -782,7 +782,7 @@ class LockTimeoutError(LockError):
def __init__(self, lock_type, path, time, attempts):
fmt = "Timed out waiting for a {} lock after {}.\n Made {} {} on file: {}"
super().__init__(
super(LockTimeoutError, self).__init__(
fmt.format(
lock_type,
pretty_seconds(time),
@@ -798,7 +798,7 @@ class LockUpgradeError(LockError):
def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path
super().__init__(msg)
super(LockUpgradeError, self).__init__(msg)
class LockPermissionError(LockError):
@@ -810,7 +810,7 @@ class LockROFileError(LockPermissionError):
def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path
super().__init__(msg)
super(LockROFileError, self).__init__(msg)
class CantCreateLockError(LockPermissionError):
@@ -819,4 +819,4 @@ class CantCreateLockError(LockPermissionError):
def __init__(self, path):
msg = "cannot create lock '%s': " % path
msg += "file does not exist and location is not writable"
super().__init__(msg)
super(LockError, self).__init__(msg)

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import unicode_literals
import contextlib
import io
import os

View File

@@ -6,6 +6,8 @@
"""
Routines for printing columnar output. See ``colify()`` for more information.
"""
from __future__ import division, unicode_literals
import io
import os
import sys

View File

@@ -59,6 +59,8 @@
To output an @, use '@@'. To output a } inside braces, use '}}'.
"""
from __future__ import unicode_literals
import re
import sys
from contextlib import contextmanager
@@ -68,7 +70,7 @@ class ColorParseError(Exception):
"""Raised when a color format fails to parse."""
def __init__(self, message):
super().__init__(message)
super(ColorParseError, self).__init__(message)
# Text styles for ansi codes
@@ -203,7 +205,7 @@ def color_when(value):
set_color_when(old_value)
class match_to_ansi:
class match_to_ansi(object):
def __init__(self, color=True, enclose=False):
self.color = _color_when_value(color)
self.enclose = enclose
@@ -319,7 +321,7 @@ def cescape(string):
return string
class ColorStream:
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color

View File

@@ -5,6 +5,8 @@
"""Utility classes for logging the output of blocks of code.
"""
from __future__ import unicode_literals
import atexit
import ctypes
import errno
@@ -65,7 +67,7 @@ def _strip(line):
return _escape.sub("", line)
class keyboard_input:
class keyboard_input(object):
"""Context manager to disable line editing and echoing.
Use this with ``sys.stdin`` for keyboard input, e.g.::
@@ -242,7 +244,7 @@ def __exit__(self, exc_type, exception, traceback):
signal.signal(signum, old_handler)
class Unbuffered:
class Unbuffered(object):
"""Wrapper for Python streams that forces them to be unbuffered.
This is implemented by forcing a flush after each write.
@@ -287,7 +289,7 @@ def _file_descriptors_work(*streams):
return False
class FileWrapper:
class FileWrapper(object):
"""Represents a file. Can be an open stream, a path to a file (not opened
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
@@ -329,7 +331,7 @@ def close(self):
self.file.close()
class MultiProcessFd:
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
@@ -429,7 +431,7 @@ def log_output(*args, **kwargs):
return nixlog(*args, **kwargs)
class nixlog:
class nixlog(object):
"""
Under the hood, we spawn a daemon and set up a pipe between this
process and the daemon. The daemon writes our output to both the
@@ -750,7 +752,7 @@ def close(self):
os.close(self.saved_stream)
class winlog:
class winlog(object):
"""
Similar to nixlog, with underlying
functionality ported to support Windows.

View File

@@ -13,6 +13,8 @@
Note: The functionality in this module is unsupported on Windows
"""
from __future__ import print_function
import multiprocessing
import os
import re
@@ -34,7 +36,7 @@
pass
class ProcessController:
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process (the controller) to drive another (the
@@ -155,7 +157,7 @@ def wait_running(self):
self.wait(lambda: "T" not in self.proc_status())
class PseudoShell:
class PseudoShell(object):
"""Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some

View File

@@ -13,7 +13,7 @@
from spack.util.executable import Executable, ProcessError
class ABI:
class ABI(object):
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""

View File

@@ -60,7 +60,7 @@ def _search_duplicate_compilers(error_cls):
GROUPS = collections.defaultdict(list)
class Error:
class Error(object):
"""Information on an error reported in a test."""
def __init__(self, summary, details):

View File

@@ -80,14 +80,14 @@ def __init__(self, errors):
else:
err = errors[0]
self.message = "{0}: {1}".format(err.__class__.__name__, str(err))
super().__init__(self.message)
super(FetchCacheError, self).__init__(self.message)
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class BinaryCacheIndex:
class BinaryCacheIndex(object):
"""
The BinaryCacheIndex tracks what specs are available on (usually remote)
binary caches.
@@ -517,7 +517,9 @@ class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
super(NoOverwriteException, self).__init__(
f"Refusing to overwrite the following file: {file_path}"
)
class NoGpgException(spack.error.SpackError):
@@ -526,7 +528,7 @@ class NoGpgException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
@@ -535,7 +537,7 @@ class NoKeyException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
@@ -546,7 +548,7 @@ class PickKeyException(spack.error.SpackError):
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
@@ -563,7 +565,7 @@ class NoChecksumException(spack.error.SpackError):
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
super(NoChecksumException, self).__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
@@ -576,7 +578,7 @@ class NewLayoutException(spack.error.SpackError):
"""
def __init__(self, msg):
super().__init__(msg)
super(NewLayoutException, self).__init__(msg)
class UnsignedPackageException(spack.error.SpackError):
@@ -758,12 +760,13 @@ def hashes_to_prefixes(spec):
}
def get_buildinfo_dict(spec):
def get_buildinfo_dict(spec, rel=False):
"""Create metadata for a tarball"""
manifest = get_buildfile_manifest(spec)
return {
"sbang_install_path": spack.hooks.sbang.sbang_install_path(),
"relative_rpaths": rel,
"buildpath": spack.store.layout.root,
"spackprefix": spack.paths.prefix,
"relative_prefix": os.path.relpath(spec.prefix, spack.store.layout.root),
@@ -1206,6 +1209,9 @@ class PushOptions(NamedTuple):
#: Overwrite existing tarball/metadata files in buildcache
force: bool = False
#: Whether to use relative RPATHs
relative: bool = False
#: Allow absolute paths to package prefixes when creating a tarball
allow_root: bool = False
@@ -1275,17 +1281,41 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
raise NoOverwriteException(url_util.format(remote_specfile_path))
pkg_dir = os.path.basename(spec.prefix.rstrip(os.path.sep))
workdir = os.path.join(stage_dir, pkg_dir)
binaries_dir = spec.prefix
# TODO: We generally don't want to mutate any files, but when using relative
# mode, Spack unfortunately *does* mutate rpaths and links ahead of time.
# For now, we only make a full copy of the spec prefix when in relative mode.
if options.relative:
# tarfile is used because it preserves hardlink etc best.
binaries_dir = workdir
temp_tarfile_name = tarball_name(spec, ".tar")
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, "w")) as tar:
tar.add(name="%s" % spec.prefix, arcname=".")
with closing(tarfile.open(temp_tarfile_path, "r")) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
else:
binaries_dir = spec.prefix
# create info for later relocation and create tar
buildinfo = get_buildinfo_dict(spec)
buildinfo = get_buildinfo_dict(spec, options.relative)
if not options.allow_root:
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if options.relative:
make_package_relative(workdir, spec, buildinfo, options.allow_root)
elif not options.allow_root:
ensure_package_relocatable(buildinfo, binaries_dir)
_do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo)
# remove copy of install directory
if options.relative:
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
@@ -1306,6 +1336,7 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo["relative_prefix"] = os.path.relpath(spec.prefix, spack.store.layout.root)
buildinfo["relative_rpaths"] = options.relative
spec_dict["buildinfo"] = buildinfo
with open(specfile_path, "w") as outfile:
@@ -1565,6 +1596,35 @@ def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
return None
def make_package_relative(workdir, spec, buildinfo, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
old_layout_root = buildinfo["buildpath"]
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo["relocate_binaries"]:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
platform = spack.platforms.by_name(spec.platform)
if "macho" in platform.binary_formats:
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names, old_layout_root)
if "elf" in platform.binary_formats:
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names, old_layout_root)
allow_root or relocate.ensure_binaries_are_relocatable(cur_path_names)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get("relocate_links", []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
relocate.make_link_relative(cur_path_names, orig_path_names)
def ensure_package_relocatable(buildinfo, binaries_dir):
"""Check if package binaries are relocatable."""
binaries = [os.path.join(binaries_dir, f) for f in buildinfo["relocate_binaries"]]
@@ -2335,7 +2395,7 @@ def download_single_spec(concrete_spec, destination, mirror_url=None):
return download_buildcache_entry(files_to_fetch, mirror_url)
class BinaryCacheQuery:
class BinaryCacheQuery(object):
"""Callable object to query if a spec is in a binary cache"""
def __init__(self, all_architectures):

View File

@@ -175,12 +175,12 @@ def black_root_spec() -> str:
def flake8_root_spec() -> str:
"""Return the root spec used to bootstrap flake8"""
return _root_spec("py-flake8@3.8.2:")
return _root_spec("py-flake8")
def pytest_root_spec() -> str:
"""Return the root spec used to bootstrap flake8"""
return _root_spec("py-pytest@6.2.4:")
return _root_spec("py-pytest")
def ensure_environment_dependencies() -> None:

View File

@@ -148,7 +148,7 @@ class MakeExecutable(Executable):
def __init__(self, name, jobs, **kwargs):
supports_jobserver = kwargs.pop("supports_jobserver", True)
super().__init__(name, **kwargs)
super(MakeExecutable, self).__init__(name, **kwargs)
self.supports_jobserver = supports_jobserver
self.jobs = jobs
@@ -175,7 +175,7 @@ def __call__(self, *args, **kwargs):
if jobs_env_jobs is not None:
kwargs["extra_env"] = {jobs_env: str(jobs_env_jobs)}
return super().__call__(*args, **kwargs)
return super(MakeExecutable, self).__call__(*args, **kwargs)
def _on_cray():
@@ -1332,7 +1332,7 @@ class ChildError(InstallError):
build_errors = [("spack.util.executable", "ProcessError")]
def __init__(self, msg, module, classname, traceback_string, log_name, log_type, context):
super().__init__(msg)
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
@@ -1373,7 +1373,7 @@ def long_message(self):
test_log = join_path(os.path.dirname(self.log_name), spack_install_test_log)
if os.path.isfile(test_log):
out.write("\nSee test log for details:\n")
out.write(" {0}\n".format(test_log))
out.write(" {0}n".format(test_log))
return out.getvalue()

View File

@@ -2,7 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections.abc
import os
from typing import Tuple
@@ -14,24 +13,21 @@
from .cmake import CMakeBuilder, CMakePackage
def cmake_cache_path(name, value, comment="", force=False):
def cmake_cache_path(name, value, comment=""):
"""Generate a string for a cmake cache variable"""
force_str = " FORCE" if force else ""
return 'set({0} "{1}" CACHE PATH "{2}"{3})\n'.format(name, value, comment, force_str)
return 'set({0} "{1}" CACHE PATH "{2}")\n'.format(name, value, comment)
def cmake_cache_string(name, value, comment="", force=False):
def cmake_cache_string(name, value, comment=""):
"""Generate a string for a cmake cache variable"""
force_str = " FORCE" if force else ""
return 'set({0} "{1}" CACHE STRING "{2}"{3})\n'.format(name, value, comment, force_str)
return 'set({0} "{1}" CACHE STRING "{2}")\n'.format(name, value, comment)
def cmake_cache_option(name, boolean_value, comment="", force=False):
def cmake_cache_option(name, boolean_value, comment=""):
"""Generate a string for a cmake configuration option"""
value = "ON" if boolean_value else "OFF"
force_str = " FORCE" if force else ""
return 'set({0} {1} CACHE BOOL "{2}"{3})\n'.format(name, value, comment, force_str)
return 'set({0} {1} CACHE BOOL "{2}")\n'.format(name, value, comment)
class CachedCMakeBuilder(CMakeBuilder):
@@ -67,34 +63,6 @@ def cache_name(self):
def cache_path(self):
return os.path.join(self.pkg.stage.source_path, self.cache_name)
# Implement a version of the define_from_variant for Cached packages
def define_cmake_cache_from_variant(self, cmake_var, variant=None, comment=""):
"""Return a Cached CMake field from the given variant's value.
See define_from_variant in lib/spack/spack/build_systems/cmake.py package
"""
if variant is None:
variant = cmake_var.lower()
if variant not in self.pkg.variants:
raise KeyError('"{0}" is not a variant of "{1}"'.format(variant, self.pkg.name))
if variant not in self.pkg.spec.variants:
return ""
value = self.pkg.spec.variants[variant].value
field = None
if isinstance(value, bool):
field = cmake_cache_option(cmake_var, value, comment)
else:
if isinstance(value, collections.abc.Sequence) and not isinstance(value, str):
value = ";".join(str(v) for v in value)
else:
value = str(value)
field = cmake_cache_string(cmake_var, value, comment)
return field
def initconfig_compiler_entries(self):
# This will tell cmake to use the Spack compiler wrappers when run
# through Spack, but use the underlying compiler when run outside of
@@ -162,17 +130,6 @@ def initconfig_compiler_entries(self):
libs_string = libs_format_string.format(lang)
entries.append(cmake_cache_string(libs_string, libs_flags))
# Set the generator in the cached config
if self.spec.satisfies("generator=make"):
entries.append(cmake_cache_string("CMAKE_GENERATOR", "Unix Makefiles"))
if self.spec.satisfies("generator=ninja"):
entries.append(cmake_cache_string("CMAKE_GENERATOR", "Ninja"))
entries.append(
cmake_cache_string(
"CMAKE_MAKE_PROGRAM", "{0}/ninja".format(spec["ninja"].prefix.bin)
)
)
return entries
def initconfig_mpi_entries(self):
@@ -238,58 +195,26 @@ def initconfig_hardware_entries(self):
"#------------------{0}\n".format("-" * 60),
]
# Provide standard CMake arguments for dependent CachedCMakePackages
if spec.satisfies("^cuda"):
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Cuda")
entries.append("#------------------{0}\n".format("-" * 30))
cudatoolkitdir = spec["cuda"].prefix
entries.append(cmake_cache_path("CUDAToolkit_ROOT", cudatoolkitdir))
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER", "${CUDAToolkit_ROOT}/bin/nvcc"))
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${CMAKE_CXX_COMPILER}"))
# Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
archs = spec.variants["cuda_arch"].value
if archs[0] != "none":
arch_str = ";".join(archs)
entries.append(
cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", "{0}".format(arch_str))
)
if "+rocm" in spec:
entries.append("#------------------{0}".format("-" * 30))
entries.append("# ROCm")
entries.append("#------------------{0}\n".format("-" * 30))
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
entries.append(
cmake_cache_path("HIP_CXX_COMPILER", "{0}".format(self.spec["hip"].hipcc))
)
archs = self.spec.variants["amdgpu_target"].value
if archs[0] != "none":
arch_str = ";".join(archs)
entries.append(
cmake_cache_string("CMAKE_HIP_ARCHITECTURES", "{0}".format(arch_str))
)
entries.append(cmake_cache_string("AMDGPU_TARGETS", "{0}".format(arch_str)))
entries.append(cmake_cache_string("GPU_TARGETS", "{0}".format(arch_str)))
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER", cudacompiler))
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${CMAKE_CXX_COMPILER}"))
return entries
def std_initconfig_entries(self):
cmake_prefix_path_env = os.environ["CMAKE_PREFIX_PATH"]
cmake_prefix_path = cmake_prefix_path_env.replace(os.pathsep, ";")
return [
"#------------------{0}".format("-" * 60),
"# !!!! This is a generated file, edit at own risk !!!!",
"#------------------{0}".format("-" * 60),
"# CMake executable path: {0}".format(self.pkg.spec["cmake"].command.path),
"#------------------{0}\n".format("-" * 60),
cmake_cache_path("CMAKE_PREFIX_PATH", cmake_prefix_path),
self.define_cmake_cache_from_variant("CMAKE_BUILD_TYPE", "build_type"),
]
def initconfig_package_entries(self):
@@ -312,7 +237,7 @@ def initconfig(self, pkg, spec, prefix):
@property
def std_cmake_args(self):
args = super().std_cmake_args
args = super(CachedCMakeBuilder, self).std_cmake_args
args.extend(["-C", self.cache_path])
return args

View File

@@ -5,7 +5,6 @@
import collections.abc
import inspect
import os
import pathlib
import platform
import re
import sys
@@ -16,6 +15,7 @@
import spack.build_environment
import spack.builder
import spack.package_base
import spack.util.path
from spack.directives import build_system, conflicts, depends_on, variant
from spack.multimethod import when
@@ -271,7 +271,7 @@ def std_args(pkg, generator=None):
args = [
"-G",
generator,
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
define("CMAKE_INSTALL_PREFIX", pkg.prefix),
define("CMAKE_BUILD_TYPE", build_type),
define("BUILD_TESTING", pkg.run_tests),
]

View File

@@ -102,10 +102,11 @@ def cuda_flags(arch_list):
depends_on("cuda@11.0:", when="cuda_arch=80")
depends_on("cuda@11.1:", when="cuda_arch=86")
depends_on("cuda@11.4:", when="cuda_arch=87")
depends_on("cuda@11.8:", when="cuda_arch=89")
depends_on("cuda@12.0:", when="cuda_arch=90")
depends_on("cuda@11.4:", when="cuda_arch=87")
depends_on("cuda@11.8:", when="cuda_arch=89")
depends_on("cuda@11.8:", when="cuda_arch=90")
# From the NVIDIA install guide we know of conflicts for particular
# platforms (linux, darwin), architectures (x86, powerpc) and compilers

View File

@@ -121,7 +121,7 @@ def setup_run_environment(self, env):
$ source {prefix}/{component}/{version}/env/vars.sh
"""
# Only if environment modifications are desired (default is +envmods)
if "~envmods" not in self.spec:
if "+envmods" in self.spec:
env.extend(
EnvironmentModifications.from_sourcing_file(
join_path(self.component_prefix, "env", "vars.sh")
@@ -175,7 +175,7 @@ def libs(self):
return find_libraries("*", root=lib_path, shared=True, recursive=True)
class IntelOneApiStaticLibraryList:
class IntelOneApiStaticLibraryList(object):
"""Provides ld_flags when static linking is needed
Oneapi puts static and dynamic libraries in the same directory, so

View File

@@ -23,7 +23,6 @@
import spack.store
from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError, SpecError
from spack.install_test import test_part
from spack.version import Version
from ._checks import BaseBuilder, execute_install_time_tests
@@ -168,65 +167,18 @@ def remove_files_from_view(self, view, merge_map):
view.remove_files(to_remove)
def test_imports(self):
def test(self):
"""Attempts to import modules of the installed package."""
# Make sure we are importing the installed modules,
# not the ones in the source directory
python = inspect.getmodule(self).python.path
for module in self.import_modules:
with test_part(
self,
f"test_imports_{module}",
purpose=f"checking import of {module}",
self.run_test(
inspect.getmodule(self).python.path,
["-c", "import {0}".format(module)],
purpose="checking import of {0}".format(module),
work_dir="spack-test",
):
python("-c", f"import {module}")
def update_external_dependencies(self, extendee_spec=None):
"""
Ensure all external python packages have a python dependency
If another package in the DAG depends on python, we use that
python for the dependency of the external. If not, we assume
that the external PythonPackage is installed into the same
directory as the python it depends on.
"""
# TODO: Include this in the solve, rather than instantiating post-concretization
if "python" not in self.spec:
if extendee_spec:
python = extendee_spec
elif "python" in self.spec.root:
python = self.spec.root["python"]
else:
python = self.get_external_python_for_prefix()
if not python.concrete:
repo = spack.repo.path.repo_for_pkg(python)
python.namespace = repo.namespace
# Ensure architecture information is present
if not python.architecture:
host_platform = spack.platforms.host()
host_os = host_platform.operating_system("default_os")
host_target = host_platform.target("default_target")
python.architecture = spack.spec.ArchSpec(
(str(host_platform), str(host_os), str(host_target))
)
else:
if not python.architecture.platform:
python.architecture.platform = spack.platforms.host()
if not python.architecture.os:
python.architecture.os = "default_os"
if not python.architecture.target:
python.architecture.target = archspec.cpu.host().family.name
# Ensure compiler information is present
if not python.compiler:
python.compiler = self.spec.compiler
python.external_path = self.spec.external_path
python._mark_concrete()
self.spec.add_dependency_edge(python, deptypes=("build", "link", "run"), virtuals=())
)
class PythonPackage(PythonExtension):
@@ -273,6 +225,51 @@ def list_url(cls):
name = cls.pypi.split("/")[0]
return "https://pypi.org/simple/" + name + "/"
def update_external_dependencies(self, extendee_spec=None):
"""
Ensure all external python packages have a python dependency
If another package in the DAG depends on python, we use that
python for the dependency of the external. If not, we assume
that the external PythonPackage is installed into the same
directory as the python it depends on.
"""
# TODO: Include this in the solve, rather than instantiating post-concretization
if "python" not in self.spec:
if extendee_spec:
python = extendee_spec
elif "python" in self.spec.root:
python = self.spec.root["python"]
else:
python = self.get_external_python_for_prefix()
if not python.concrete:
repo = spack.repo.path.repo_for_pkg(python)
python.namespace = repo.namespace
# Ensure architecture information is present
if not python.architecture:
host_platform = spack.platforms.host()
host_os = host_platform.operating_system("default_os")
host_target = host_platform.target("default_target")
python.architecture = spack.spec.ArchSpec(
(str(host_platform), str(host_os), str(host_target))
)
else:
if not python.architecture.platform:
python.architecture.platform = spack.platforms.host()
if not python.architecture.os:
python.architecture.os = "default_os"
if not python.architecture.target:
python.architecture.target = archspec.cpu.host().family.name
# Ensure compiler information is present
if not python.compiler:
python.compiler = self.spec.compiler
python.external_path = self.spec.external_path
python._mark_concrete()
self.spec.add_dependency_edge(python, deptypes=("build", "link", "run"))
def get_external_python_for_prefix(self):
"""
For an external package that extends python, find the most likely spec for the python

View File

@@ -7,7 +7,7 @@
import llnl.util.lang as lang
from spack.directives import extends
from spack.directives import extends, maintainers
from .generic import GenericBuilder, Package
@@ -71,6 +71,8 @@ class RPackage(Package):
GenericBuilder = RBuilder
maintainers("glennpj")
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "RPackage"

View File

@@ -10,7 +10,6 @@
from llnl.util.filesystem import find, join_path, working_dir
import spack.builder
import spack.install_test
import spack.package_base
from spack.directives import build_system, depends_on, extends
from spack.multimethod import when
@@ -31,8 +30,8 @@ class SIPPackage(spack.package_base.PackageBase):
#: Name of private sip module to install alongside package
sip_module = "sip"
#: Callback names for install-time testing
install_time_test_callbacks = ["test_imports"]
#: Callback names for install-time test
install_time_test_callbacks = ["test"]
#: Legacy buildsystem attribute used to deserialize and install old specs
legacy_buildsystem = "sip"
@@ -88,20 +87,18 @@ def python(self, *args, **kwargs):
"""The python ``Executable``."""
inspect.getmodule(self).python(*args, **kwargs)
def test_imports(self):
def test(self):
"""Attempts to import modules of the installed package."""
# Make sure we are importing the installed modules,
# not the ones in the source directory
python = inspect.getmodule(self).python
for module in self.import_modules:
with spack.install_test.test_part(
self,
"test_imports_{0}".format(module),
self.run_test(
inspect.getmodule(self).python.path,
["-c", "import {0}".format(module)],
purpose="checking import of {0}".format(module),
work_dir="spack-test",
):
python("-c", "import {0}".format(module))
)
@spack.builder.builder("sip")

View File

@@ -63,7 +63,7 @@ def create(pkg):
return _BUILDERS[id(pkg)]
class _PhaseAdapter:
class _PhaseAdapter(object):
def __init__(self, builder, phase_fn):
self.builder = builder
self.phase_fn = phase_fn
@@ -115,7 +115,7 @@ class hierarchy (look at AspellDictPackage for an example of that)
# package. The semantic should be the same as the method in the base builder were still
# present in the base class of the package.
class _ForwardToBaseBuilder:
class _ForwardToBaseBuilder(object):
def __init__(self, wrapped_pkg_object, root_builder):
self.wrapped_package_object = wrapped_pkg_object
self.root_builder = root_builder
@@ -188,7 +188,7 @@ def __init__(self, pkg):
# Attribute containing the package wrapped in dispatcher with a `__getattr__`
# method that will forward certain calls to the default builder.
self.pkg_with_dispatcher = _ForwardToBaseBuilder(pkg, root_builder=self)
super().__init__(pkg)
super(Adapter, self).__init__(pkg)
# These two methods don't follow the (self, spec, prefix) signature of phases nor
# the (self) signature of methods, so they are added explicitly to avoid using a
@@ -388,7 +388,7 @@ def __new__(mcs, name, bases, attr_dict):
return super(_PackageAdapterMeta, mcs).__new__(mcs, name, bases, attr_dict)
class InstallationPhase:
class InstallationPhase(object):
"""Manages a single phase of the installation.
This descriptor stores at creation time the name of the method it should
@@ -530,9 +530,9 @@ def setup_build_environment(self, env):
modifications to be applied when the package is built. Package authors
can call methods on it to alter the build environment.
"""
if not hasattr(super(), "setup_build_environment"):
if not hasattr(super(Builder, self), "setup_build_environment"):
return
super().setup_build_environment(env)
super(Builder, self).setup_build_environment(env)
def setup_dependent_build_environment(self, env, dependent_spec):
"""Sets up the build environment of packages that depend on this one.
@@ -563,9 +563,9 @@ def setup_dependent_build_environment(self, env, dependent_spec):
the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
if not hasattr(super(), "setup_dependent_build_environment"):
if not hasattr(super(Builder, self), "setup_dependent_build_environment"):
return
super().setup_dependent_build_environment(env, dependent_spec)
super(Builder, self).setup_dependent_build_environment(env, dependent_spec)
def __getitem__(self, idx):
key = self.phases[idx]

View File

@@ -58,7 +58,7 @@ def _fetch_cache():
return spack.fetch_strategy.FsCache(path)
class MirrorCache:
class MirrorCache(object):
def __init__(self, root, skip_unstable_versions):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions

View File

@@ -28,6 +28,7 @@
import spack
import spack.binary_distribution as bindist
import spack.compilers as compilers
import spack.config as cfg
import spack.environment as ev
import spack.main
@@ -57,7 +58,7 @@
PushResult = namedtuple("PushResult", "success url")
class TemporaryDirectory:
class TemporaryDirectory(object):
def __init__(self):
self.temporary_directory = tempfile.mkdtemp()
@@ -69,10 +70,17 @@ def __exit__(self, exc_type, exc_value, exc_traceback):
return False
def get_job_name(spec, osarch, build_group):
def _is_main_phase(phase_name):
return True if phase_name == "specs" else False
def get_job_name(phase, strip_compiler, spec, osarch, build_group):
"""Given the necessary parts, format the gitlab job name
Arguments:
phase (str): Either 'specs' for the main phase, or the name of a
bootstrapping phase
strip_compiler (bool): Should compiler be stripped from job name
spec (spack.spec.Spec): Spec job will build
osarch: Architecture TODO: (this is a spack.spec.ArchSpec,
but sphinx doesn't recognize the type and fails).
@@ -85,7 +93,12 @@ def get_job_name(spec, osarch, build_group):
format_str = ""
format_args = []
format_str += "{{{0}}}".format(item_idx)
if phase:
format_str += "({{{0}}})".format(item_idx)
format_args.append(phase)
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(spec.name)
item_idx += 1
@@ -97,9 +110,10 @@ def get_job_name(spec, osarch, build_group):
format_args.append(spec.version)
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(spec.compiler)
item_idx += 1
if _is_main_phase(phase) is True or strip_compiler is False:
format_str += " {{{0}}}".format(item_idx)
format_args.append(spec.compiler)
item_idx += 1
format_str += " {{{0}}}".format(item_idx)
format_args.append(osarch)
@@ -139,33 +153,49 @@ def _add_dependency(spec_label, dep_label, deps):
deps[spec_label].add(dep_label)
def _get_spec_dependencies(specs, deps, spec_labels):
spec_deps_obj = _compute_spec_deps(specs)
def _get_spec_dependencies(
specs, deps, spec_labels, check_index_only=False, mirrors_to_check=None
):
spec_deps_obj = _compute_spec_deps(
specs, check_index_only=check_index_only, mirrors_to_check=mirrors_to_check
)
if spec_deps_obj:
dependencies = spec_deps_obj["dependencies"]
specs = spec_deps_obj["specs"]
for entry in specs:
spec_labels[entry["label"]] = entry["spec"]
spec_labels[entry["label"]] = {
"spec": entry["spec"],
"needs_rebuild": entry["needs_rebuild"],
}
for entry in dependencies:
_add_dependency(entry["spec"], entry["depends"], deps)
def stage_spec_jobs(specs):
def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
"""Take a set of release specs and generate a list of "stages", where the
jobs in any stage are dependent only on jobs in previous stages. This
allows us to maximize build parallelism within the gitlab-ci framework.
Arguments:
specs (Iterable): Specs to build
check_index_only (bool): Regardless of whether DAG pruning is enabled,
all configured mirrors are searched to see if binaries for specs
are up to date on those mirrors. This flag limits that search to
the binary cache indices on those mirrors to speed the process up,
even though there is no garantee the index is up to date.
mirrors_to_checK: Optional mapping giving mirrors to check instead of
any configured mirrors.
Returns: A tuple of information objects describing the specs, dependencies
and stages:
spec_labels: A dictionary mapping the spec labels (which are formatted
as pkg-name/hash-prefix) to concrete specs.
spec_labels: A dictionary mapping the spec labels which are made of
(pkg-name/hash-prefix), to objects containing "spec" and "needs_rebuild"
keys. The root spec is the spec of which this spec is a dependency
and the spec is the formatted spec string for this spec.
deps: A dictionary where the keys should also have appeared as keys in
the spec_labels dictionary, and the values are the set of
@@ -194,7 +224,13 @@ def _remove_satisfied_deps(deps, satisfied_list):
deps = {}
spec_labels = {}
_get_spec_dependencies(specs, deps, spec_labels)
_get_spec_dependencies(
specs,
deps,
spec_labels,
check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check,
)
# Save the original deps, as we need to return them at the end of the
# function. In the while loop below, the "dependencies" variable is
@@ -220,36 +256,24 @@ def _remove_satisfied_deps(deps, satisfied_list):
return spec_labels, deps, stages
def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions):
def _print_staging_summary(spec_labels, dependencies, stages):
if not stages:
return
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check)
tty.msg("Checked the following mirrors for binaries:")
for m in mirrors.values():
tty.msg(" {0}".format(m.fetch_url))
tty.msg("Staging summary ([x] means a job needs rebuilding):")
tty.msg(" Staging summary ([x] means a job needs rebuilding):")
for stage_index, stage in enumerate(stages):
tty.msg(" stage {0} ({1} jobs):".format(stage_index, len(stage)))
tty.msg(" stage {0} ({1} jobs):".format(stage_index, len(stage)))
for job in sorted(stage):
s = spec_labels[job]
rebuild = rebuild_decisions[job].rebuild
reason = rebuild_decisions[job].reason
reason_msg = " ({0})".format(reason) if reason else ""
s = spec_labels[job]["spec"]
tty.msg(
" [{1}] {0} -> {2}{3}".format(
job, "x" if rebuild else " ", _get_spec_string(s), reason_msg
" [{1}] {0} -> {2}".format(
job, "x" if spec_labels[job]["needs_rebuild"] else " ", _get_spec_string(s)
)
)
if rebuild_decisions[job].mirrors:
tty.msg(" found on the following mirrors:")
for murl in rebuild_decisions[job].mirrors:
tty.msg(" {0}".format(murl))
def _compute_spec_deps(spec_list):
def _compute_spec_deps(spec_list, check_index_only=False, mirrors_to_check=None):
"""
Computes all the dependencies for the spec(s) and generates a JSON
object which provides both a list of unique spec names as well as a
@@ -313,8 +337,12 @@ def append_dep(s, d):
tty.msg("Will not stage external pkg: {0}".format(s))
continue
up_to_date_mirrors = bindist.get_mirrors_for_spec(
spec=s, mirrors_to_check=mirrors_to_check, index_only=check_index_only
)
skey = _spec_deps_key(s)
spec_labels[skey] = s
spec_labels[skey] = {"spec": s, "needs_rebuild": not up_to_date_mirrors}
for d in s.dependencies(deptype=all):
dkey = _spec_deps_key(d)
@@ -324,8 +352,14 @@ def append_dep(s, d):
append_dep(skey, dkey)
for spec_label, concrete_spec in spec_labels.items():
specs.append({"label": spec_label, "spec": concrete_spec})
for spec_label, spec_holder in spec_labels.items():
specs.append(
{
"label": spec_label,
"spec": spec_holder["spec"],
"needs_rebuild": spec_holder["needs_rebuild"],
}
)
deps_json_obj = {"specs": specs, "dependencies": dependencies}
@@ -337,17 +371,26 @@ def _spec_matches(spec, match_string):
def _format_job_needs(
dep_jobs, osname, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
phase_name,
strip_compilers,
dep_jobs,
osname,
build_group,
prune_dag,
stage_spec_dict,
enable_artifacts_buildcache,
):
needs_list = []
for dep_job in dep_jobs:
dep_spec_key = _spec_deps_key(dep_job)
rebuild = rebuild_decisions[dep_spec_key].rebuild
dep_spec_info = stage_spec_dict[dep_spec_key]
if not prune_dag or rebuild:
if not prune_dag or dep_spec_info["needs_rebuild"]:
needs_list.append(
{
"job": get_job_name(dep_job, dep_job.architecture, build_group),
"job": get_job_name(
phase_name, strip_compilers, dep_job, dep_job.architecture, build_group
),
"artifacts": enable_artifacts_buildcache,
}
)
@@ -447,12 +490,17 @@ def get_spec_filter_list(env, affected_pkgs, dependent_traverse_depth=None):
return affected_specs
def _build_jobs(spec_labels, stages):
for stage_jobs in stages:
for spec_label in stage_jobs:
release_spec = spec_labels[spec_label]
release_spec_dag_hash = release_spec.dag_hash()
yield release_spec, release_spec_dag_hash
def _build_jobs(phases, staged_phases):
for phase in phases:
phase_name = phase["name"]
spec_labels, dependencies, stages = staged_phases[phase_name]
for stage_jobs in stages:
for spec_label in stage_jobs:
spec_record = spec_labels[spec_label]
release_spec = spec_record["spec"]
release_spec_dag_hash = release_spec.dag_hash()
yield release_spec, release_spec_dag_hash
def _noop(x):
@@ -471,21 +519,14 @@ def _unpack_script(script_section, op=_noop):
return script
class RebuildDecision:
def __init__(self):
self.rebuild = True
self.mirrors = []
self.reason = ""
class SpackCI:
"""Spack CI object used to generate intermediate representation
used by the CI generator(s).
"""
def __init__(self, ci_config, spec_labels, stages):
def __init__(self, ci_config, phases, staged_phases):
"""Given the information from the ci section of the config
and the staged jobs, set up meta data needed for generating Spack
and the job phases setup meta data needed for generating Spack
CI IR.
"""
@@ -500,6 +541,9 @@ def __init__(self, ci_config, spec_labels, stages):
"enable-artifacts-buildcache": self.ci_config.get(
"enable-artifacts-buildcache", False
),
"bootstrap": self.ci_config.get(
"bootstrap", []
), # This is deprecated and should be removed
"rebuild-index": self.ci_config.get("rebuild-index", True),
"broken-specs-url": self.ci_config.get("broken-specs-url", None),
"broken-tests-packages": self.ci_config.get("broken-tests-packages", []),
@@ -507,7 +551,7 @@ def __init__(self, ci_config, spec_labels, stages):
}
jobs = self.ir["jobs"]
for spec, dag_hash in _build_jobs(spec_labels, stages):
for spec, dag_hash in _build_jobs(phases, staged_phases):
jobs[dag_hash] = self.__init_job(spec)
for name in self.named_jobs:
@@ -707,7 +751,7 @@ def generate_gitlab_ci_yaml(
env.concretize()
env.write()
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
yaml_root = ev.config_dict(env.manifest)
# Get the joined "ci" config with all of the current scopes resolved
ci_config = cfg.get("ci")
@@ -829,6 +873,25 @@ def generate_gitlab_ci_yaml(
if "temporary-storage-url-prefix" in ci_config:
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
bootstrap_specs = []
phases = []
if "bootstrap" in ci_config:
for phase in ci_config["bootstrap"]:
try:
phase_name = phase.get("name")
strip_compilers = phase.get("compiler-agnostic")
except AttributeError:
phase_name = phase
strip_compilers = False
phases.append({"name": phase_name, "strip-compilers": strip_compilers})
for bs in env.spec_lists[phase_name]:
bootstrap_specs.append(
{"spec": bs, "phase-name": phase_name, "strip-compilers": strip_compilers}
)
phases.append({"name": "specs", "strip-compilers": False})
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
# generate.
@@ -883,7 +946,7 @@ def generate_gitlab_ci_yaml(
# Add config scopes to environment
env_includes = env_yaml_root["spack"].get("include", [])
cli_scopes = [
os.path.relpath(s.path, concrete_env_dir)
os.path.abspath(s.path)
for s in cfg.scopes().values()
if type(s) == cfg.ImmutableConfigScope
and s.path not in env_includes
@@ -930,13 +993,39 @@ def generate_gitlab_ci_yaml(
except bindist.FetchCacheError as e:
tty.warn(e)
spec_labels, dependencies, stages = stage_spec_jobs(
[
concrete
for abstract, concrete in env.concretized_specs()
if abstract in env.spec_lists["specs"]
]
)
staged_phases = {}
try:
for phase in phases:
phase_name = phase["name"]
if phase_name == "specs":
# Anything in the "specs" of the environment are already
# concretized by the block at the top of this method, so we
# only need to find the concrete versions, and then avoid
# re-concretizing them needlessly later on.
concrete_phase_specs = [
concrete
for abstract, concrete in env.concretized_specs()
if abstract in env.spec_lists[phase_name]
]
else:
# Any specs lists in other definitions (but not in the
# "specs") of the environment are not yet concretized so we
# have to concretize them explicitly here.
concrete_phase_specs = env.spec_lists[phase_name]
with spack.concretize.disable_compiler_existence_check():
for phase_spec in concrete_phase_specs:
phase_spec.concretize()
staged_phases[phase_name] = stage_spec_jobs(
concrete_phase_specs,
check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check,
)
finally:
# Clean up remote mirror override if enabled
if remote_mirror_override:
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
all_job_names = []
output_object = {}
@@ -959,212 +1048,276 @@ def generate_gitlab_ci_yaml(
else:
broken_spec_urls = web_util.list_url(broken_specs_url)
spack_ci = SpackCI(ci_config, spec_labels, stages)
spack_ci = SpackCI(ci_config, phases, staged_phases)
spack_ci_ir = spack_ci.generate_ir()
rebuild_decisions = {}
for phase in phases:
phase_name = phase["name"]
strip_compilers = phase["strip-compilers"]
for stage_jobs in stages:
stage_name = "stage-{0}".format(stage_id)
stage_names.append(stage_name)
stage_id += 1
spec_labels, dependencies, stages = staged_phases[phase_name]
for spec_label in stage_jobs:
release_spec = spec_labels[spec_label]
release_spec_dag_hash = release_spec.dag_hash()
for stage_jobs in stages:
stage_name = "stage-{0}".format(stage_id)
stage_names.append(stage_name)
stage_id += 1
spec_record = RebuildDecision()
rebuild_decisions[spec_label] = spec_record
for spec_label in stage_jobs:
spec_record = spec_labels[spec_label]
release_spec = spec_record["spec"]
release_spec_dag_hash = release_spec.dag_hash()
if prune_untouched_packages:
if release_spec not in affected_specs:
spec_record.rebuild = False
spec_record.reason = "Pruned, untouched by change."
if prune_untouched_packages:
if release_spec not in affected_specs:
tty.debug(
"Pruning {0}/{1}, untouched by change.".format(
release_spec.name, release_spec.dag_hash()[:7]
)
)
spec_record["needs_rebuild"] = False
continue
job_object = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"]
if not job_object:
tty.warn("No match found for {0}, skipping it".format(release_spec))
continue
up_to_date_mirrors = bindist.get_mirrors_for_spec(
spec=release_spec, mirrors_to_check=mirrors_to_check, index_only=check_index_only
)
if spack_pipeline_type is not None:
# For spack pipelines "public" and "protected" are reserved tags
job_object["tags"] = _remove_reserved_tags(job_object.get("tags", []))
if spack_pipeline_type == "spack_protected_branch":
job_object["tags"].extend(["protected"])
elif spack_pipeline_type == "spack_pull_request":
job_object["tags"].extend(["public"])
spec_record.rebuild = not up_to_date_mirrors
if up_to_date_mirrors:
spec_record.reason = "Pruned, found in mirrors"
spec_record.mirrors = [m["mirror_url"] for m in up_to_date_mirrors]
else:
spec_record.reason = "Scheduled, not found anywhere"
if "script" not in job_object:
raise AttributeError
job_object = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"]
def main_script_replacements(cmd):
return cmd.replace("{env_dir}", concrete_env_dir)
if not job_object:
tty.warn("No match found for {0}, skipping it".format(release_spec))
continue
job_object["script"] = _unpack_script(
job_object["script"], op=main_script_replacements
)
if spack_pipeline_type is not None:
# For spack pipelines "public" and "protected" are reserved tags
job_object["tags"] = _remove_reserved_tags(job_object.get("tags", []))
if spack_pipeline_type == "spack_protected_branch":
job_object["tags"].extend(["protected"])
elif spack_pipeline_type == "spack_pull_request":
job_object["tags"].extend(["public"])
if "before_script" in job_object:
job_object["before_script"] = _unpack_script(job_object["before_script"])
if "script" not in job_object:
raise AttributeError
if "after_script" in job_object:
job_object["after_script"] = _unpack_script(job_object["after_script"])
def main_script_replacements(cmd):
return cmd.replace("{env_dir}", rel_concrete_env_dir)
osname = str(release_spec.architecture)
job_name = get_job_name(
phase_name, strip_compilers, release_spec, osname, build_group
)
job_object["script"] = _unpack_script(
job_object["script"], op=main_script_replacements
)
compiler_action = "NONE"
if len(phases) > 1:
compiler_action = "FIND_ANY"
if _is_main_phase(phase_name):
compiler_action = "INSTALL_MISSING"
if "before_script" in job_object:
job_object["before_script"] = _unpack_script(job_object["before_script"])
job_vars = job_object.setdefault("variables", {})
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
job_vars["SPACK_COMPILER_ACTION"] = compiler_action
if "after_script" in job_object:
job_object["after_script"] = _unpack_script(job_object["after_script"])
job_object["needs"] = []
if spec_label in dependencies:
if enable_artifacts_buildcache:
# Get dependencies transitively, so they're all
# available in the artifacts buildcache.
dep_jobs = [d for d in release_spec.traverse(deptype=all, root=False)]
else:
# In this case, "needs" is only used for scheduling
# purposes, so we only get the direct dependencies.
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_jobs.append(spec_labels[dep_label]["spec"])
osname = str(release_spec.architecture)
job_name = get_job_name(release_spec, osname, build_group)
job_vars = job_object.setdefault("variables", {})
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
job_object["needs"] = []
if spec_label in dependencies:
if enable_artifacts_buildcache:
# Get dependencies transitively, so they're all
# available in the artifacts buildcache.
dep_jobs = [d for d in release_spec.traverse(deptype=all, root=False)]
else:
# In this case, "needs" is only used for scheduling
# purposes, so we only get the direct dependencies.
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_jobs.append(spec_labels[dep_label])
job_object["needs"].extend(
_format_job_needs(
dep_jobs,
osname,
build_group,
prune_dag,
rebuild_decisions,
enable_artifacts_buildcache,
job_object["needs"].extend(
_format_job_needs(
phase_name,
strip_compilers,
dep_jobs,
osname,
build_group,
prune_dag,
spec_labels,
enable_artifacts_buildcache,
)
)
)
rebuild_spec = spec_record.rebuild
rebuild_spec = spec_record["needs_rebuild"]
if not rebuild_spec and not copy_only_pipeline:
if prune_dag:
spec_record.reason = "Pruned, up-to-date"
# This next section helps gitlab make sure the right
# bootstrapped compiler exists in the artifacts buildcache by
# creating an artificial dependency between this spec and its
# compiler. So, if we are in the main phase, and if the
# compiler we are supposed to use is listed in any of the
# bootstrap spec lists, then we will add more dependencies to
# the job (that compiler and maybe it's dependencies as well).
if _is_main_phase(phase_name):
spec_arch_family = release_spec.architecture.target.microarchitecture.family
compiler_pkg_spec = compilers.pkg_spec_for_compiler(release_spec.compiler)
for bs in bootstrap_specs:
c_spec = bs["spec"]
bs_arch = c_spec.architecture
bs_arch_family = bs_arch.target.microarchitecture.family
if (
c_spec.intersects(compiler_pkg_spec)
and bs_arch_family == spec_arch_family
):
# We found the bootstrap compiler this release spec
# should be built with, so for DAG scheduling
# purposes, we will at least add the compiler spec
# to the jobs "needs". But if artifact buildcache
# is enabled, we'll have to add all transtive deps
# of the compiler as well.
# Here we check whether the bootstrapped compiler
# needs to be rebuilt. Until compilers are proper
# dependencies, we artificially force the spec to
# be rebuilt if the compiler targeted to build it
# needs to be rebuilt.
bs_specs, _, _ = staged_phases[bs["phase-name"]]
c_spec_key = _spec_deps_key(c_spec)
rbld_comp = bs_specs[c_spec_key]["needs_rebuild"]
rebuild_spec = rebuild_spec or rbld_comp
# Also update record so dependents do not fail to
# add this spec to their "needs"
spec_record["needs_rebuild"] = rebuild_spec
dep_jobs = [c_spec]
if enable_artifacts_buildcache:
dep_jobs = [d for d in c_spec.traverse(deptype=all)]
job_object["needs"].extend(
_format_job_needs(
bs["phase-name"],
bs["strip-compilers"],
dep_jobs,
str(bs_arch),
build_group,
prune_dag,
bs_specs,
enable_artifacts_buildcache,
)
)
else:
debug_msg = "".join(
[
"Considered compiler {0} for spec ",
"{1}, but rejected it either because it was ",
"not the compiler required by the spec, or ",
"because the target arch families of the ",
"spec and the compiler did not match",
]
).format(c_spec, release_spec)
tty.debug(debug_msg)
if prune_dag and not rebuild_spec and not copy_only_pipeline:
tty.debug(
"Pruning {0}/{1}, does not need rebuild.".format(
release_spec.name, release_spec.dag_hash()
)
)
continue
else:
# DAG pruning is disabled, force the spec to rebuild. The
# record still contains any mirrors on which the spec
# may have been found, so we can print them in the staging
# summary.
spec_record.rebuild = True
spec_record.reason = "Scheduled, DAG pruning disabled"
if broken_spec_urls is not None and release_spec_dag_hash in broken_spec_urls:
known_broken_specs_encountered.append(release_spec_dag_hash)
if broken_spec_urls is not None and release_spec_dag_hash in broken_spec_urls:
known_broken_specs_encountered.append(release_spec_dag_hash)
# Only keep track of these if we are copying rebuilt cache entries
if spack_buildcache_copy:
# TODO: This assumes signed version of the spec
buildcache_copies[release_spec_dag_hash] = [
{
"src": url_util.join(
buildcache_copy_src_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_name(release_spec, ".spec.json.sig"),
),
"dest": url_util.join(
buildcache_copy_dest_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_name(release_spec, ".spec.json.sig"),
),
},
{
"src": url_util.join(
buildcache_copy_src_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_path_name(release_spec, ".spack"),
),
"dest": url_util.join(
buildcache_copy_dest_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_path_name(release_spec, ".spack"),
),
},
]
if artifacts_root:
job_object["needs"].append(
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
)
# Let downstream jobs know whether the spec needed rebuilding, regardless
# whether DAG pruning was enabled or not.
job_vars["SPACK_SPEC_NEEDS_REBUILD"] = str(rebuild_spec)
if cdash_handler:
cdash_handler.current_spec = release_spec
build_name = cdash_handler.build_name
all_job_names.append(build_name)
job_vars["SPACK_CDASH_BUILD_NAME"] = build_name
build_stamp = cdash_handler.build_stamp
job_vars["SPACK_CDASH_BUILD_STAMP"] = build_stamp
job_object["artifacts"] = spack.config.merge_yaml(
job_object.get("artifacts", {}),
{
"when": "always",
"paths": [
rel_job_log_dir,
rel_job_repro_dir,
rel_job_test_dir,
rel_user_artifacts_dir,
],
},
)
if enable_artifacts_buildcache:
bc_root = os.path.join(local_mirror_dir, "build_cache")
job_object["artifacts"]["paths"].extend(
[
os.path.join(bc_root, p)
for p in [
bindist.tarball_name(release_spec, ".spec.json"),
bindist.tarball_directory_name(release_spec),
]
# Only keep track of these if we are copying rebuilt cache entries
if spack_buildcache_copy:
# TODO: This assumes signed version of the spec
buildcache_copies[release_spec_dag_hash] = [
{
"src": url_util.join(
buildcache_copy_src_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_name(release_spec, ".spec.json.sig"),
),
"dest": url_util.join(
buildcache_copy_dest_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_name(release_spec, ".spec.json.sig"),
),
},
{
"src": url_util.join(
buildcache_copy_src_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_path_name(release_spec, ".spack"),
),
"dest": url_util.join(
buildcache_copy_dest_prefix,
bindist.build_cache_relative_path(),
bindist.tarball_path_name(release_spec, ".spack"),
),
},
]
if artifacts_root:
job_object["needs"].append(
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
)
job_vars["SPACK_SPEC_NEEDS_REBUILD"] = str(rebuild_spec)
if cdash_handler:
cdash_handler.current_spec = release_spec
build_name = cdash_handler.build_name
all_job_names.append(build_name)
job_vars["SPACK_CDASH_BUILD_NAME"] = build_name
build_stamp = cdash_handler.build_stamp
job_vars["SPACK_CDASH_BUILD_STAMP"] = build_stamp
job_object["artifacts"] = spack.config.merge_yaml(
job_object.get("artifacts", {}),
{
"when": "always",
"paths": [
rel_job_log_dir,
rel_job_repro_dir,
rel_job_test_dir,
rel_user_artifacts_dir,
],
},
)
job_object["stage"] = stage_name
job_object["retry"] = {"max": 2, "when": JOB_RETRY_CONDITIONS}
job_object["interruptible"] = True
if enable_artifacts_buildcache:
bc_root = os.path.join(local_mirror_dir, "build_cache")
job_object["artifacts"]["paths"].extend(
[
os.path.join(bc_root, p)
for p in [
bindist.tarball_name(release_spec, ".spec.json"),
bindist.tarball_directory_name(release_spec),
]
]
)
length_needs = len(job_object["needs"])
if length_needs > max_length_needs:
max_length_needs = length_needs
max_needs_job = job_name
job_object["stage"] = stage_name
job_object["retry"] = {"max": 2, "when": JOB_RETRY_CONDITIONS}
job_object["interruptible"] = True
if not copy_only_pipeline:
output_object[job_name] = job_object
job_id += 1
length_needs = len(job_object["needs"])
if length_needs > max_length_needs:
max_length_needs = length_needs
max_needs_job = job_name
if not copy_only_pipeline:
output_object[job_name] = job_object
job_id += 1
if print_summary:
_print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions)
# Clean up remote mirror override if enabled
if remote_mirror_override:
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
for phase in phases:
phase_name = phase["name"]
tty.msg('Stages for phase "{0}"'.format(phase_name))
phase_stages = staged_phases[phase_name]
_print_staging_summary(*phase_stages)
tty.debug("{0} build jobs generated in {1} stages".format(job_id, stage_id))
@@ -1423,6 +1576,44 @@ def can_verify_binaries():
return len(gpg_util.public_keys()) >= 1
def configure_compilers(compiler_action, scope=None):
"""Depending on the compiler_action parameter, either turn on the
install_missing_compilers config option, or find spack compilers,
or do nothing. This is used from rebuild jobs in bootstrapping
pipelines, where in the bootsrapping phase we would pass
FIND_ANY in case of compiler-agnostic bootstrapping, while in the
spec building phase we would pass INSTALL_MISSING in order to get
spack to use the compiler which was built in the previous phase and
is now sitting in the binary mirror.
Arguments:
compiler_action (str): 'FIND_ANY', 'INSTALL_MISSING' have meanings
described above. Any other value essentially results in a no-op.
scope (spack.config.ConfigScope): Optional. The scope in which to look for
compilers, in case 'FIND_ANY' was provided.
"""
if compiler_action == "INSTALL_MISSING":
tty.debug("Make sure bootstrapped compiler will be installed")
config = cfg.get("config")
config["install_missing_compilers"] = True
cfg.set("config", config)
elif compiler_action == "FIND_ANY":
tty.debug("Just find any available compiler")
find_args = ["find"]
if scope:
find_args.extend(["--scope", scope])
output = spack_compiler(*find_args)
tty.debug("spack compiler find")
tty.debug(output)
output = spack_compiler("list")
tty.debug("spack compiler list")
tty.debug(output)
else:
tty.debug("No compiler action to be taken")
return None
def _push_mirror_contents(input_spec, sign_binaries, mirror_url):
"""Unchecked version of the public API, for easier mocking"""
unsigned = not sign_binaries
@@ -2128,7 +2319,7 @@ def run_standalone_tests(**kwargs):
tty.debug("spack test exited {0}".format(exit_code))
class CDashHandler:
class CDashHandler(object):
"""
Class for managing CDash data and processing.
"""

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import os
import re
@@ -147,7 +149,7 @@ def get_command(cmd_name):
return getattr(get_module(cmd_name), pname)
class _UnquotedFlags:
class _UnquotedFlags(object):
"""Use a heuristic in `.extract()` to detect whether the user is trying to set
multiple flags like the docker ENV attribute allows (e.g. 'cflags=-Os -pipe').
@@ -545,7 +547,7 @@ class PythonNameError(spack.error.SpackError):
def __init__(self, name):
self.name = name
super().__init__("{0} is not a permissible Python name.".format(name))
super(PythonNameError, self).__init__("{0} is not a permissible Python name.".format(name))
class CommandNameError(spack.error.SpackError):
@@ -553,7 +555,9 @@ class CommandNameError(spack.error.SpackError):
def __init__(self, name):
self.name = name
super().__init__("{0} is not a permissible Spack command name.".format(name))
super(CommandNameError, self).__init__(
"{0} is not a permissible Spack command name.".format(name)
)
########################################

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import archspec.cpu

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os.path
import shutil
import tempfile

View File

@@ -43,6 +43,13 @@ def setup_parser(subparser):
subparsers = subparser.add_subparsers(help="buildcache sub-commands")
push = subparsers.add_parser("push", aliases=["create"], help=push_fn.__doc__)
# TODO: remove from Spack 0.21
push.add_argument(
"-r",
"--rel",
action="store_true",
help="make all rpaths relative before creating tarballs. (deprecated)",
)
push.add_argument("-f", "--force", action="store_true", help="overwrite tarball if it exists.")
push.add_argument(
"-u", "--unsigned", action="store_true", help="push unsigned buildcache tarballs"
@@ -56,7 +63,37 @@ def setup_parser(subparser):
push.add_argument(
"-k", "--key", metavar="key", type=str, default=None, help="Key for signing."
)
push.add_argument("mirror", type=str, help="Mirror name, path, or URL.")
output = push.add_mutually_exclusive_group(required=False)
# TODO: remove from Spack 0.21
output.add_argument(
"-d",
"--directory",
metavar="directory",
dest="mirror_flag",
type=arguments.mirror_directory,
help="local directory where buildcaches will be written. (deprecated)",
)
# TODO: remove from Spack 0.21
output.add_argument(
"-m",
"--mirror-name",
metavar="mirror-name",
dest="mirror_flag",
type=arguments.mirror_name,
help="name of the mirror where buildcaches will be written. (deprecated)",
)
# TODO: remove from Spack 0.21
output.add_argument(
"--mirror-url",
metavar="mirror-url",
dest="mirror_flag",
type=arguments.mirror_url,
help="URL of the mirror where buildcaches will be written. (deprecated)",
)
# Unfortunately we cannot add this to the mutually exclusive group above,
# because we have further positional arguments.
# TODO: require from Spack 0.21
push.add_argument("mirror", type=str, help="Mirror name, path, or URL.", nargs="?")
push.add_argument(
"--update-index",
"--rebuild-index",
@@ -90,6 +127,13 @@ def setup_parser(subparser):
install.add_argument(
"-m", "--multiple", action="store_true", help="allow all matching packages "
)
# TODO: remove from Spack 0.21
install.add_argument(
"-a",
"--allow-root",
action="store_true",
help="allow install root string in binary files after RPATH substitution. (deprecated)",
)
install.add_argument(
"-u",
"--unsigned",
@@ -224,21 +268,75 @@ def setup_parser(subparser):
# Sync buildcache entries from one mirror to another
sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
sync.add_argument(
"--manifest-glob", help="A quoted glob pattern identifying copy manifest files"
"--manifest-glob",
default=None,
help="A quoted glob pattern identifying copy manifest files",
)
sync.add_argument(
source = sync.add_mutually_exclusive_group(required=False)
# TODO: remove in Spack 0.21
source.add_argument(
"--src-directory",
metavar="DIRECTORY",
dest="src_mirror_flag",
type=arguments.mirror_directory,
help="Source mirror as a local file path (deprecated)",
)
# TODO: remove in Spack 0.21
source.add_argument(
"--src-mirror-name",
metavar="MIRROR_NAME",
dest="src_mirror_flag",
type=arguments.mirror_name,
help="Name of the source mirror (deprecated)",
)
# TODO: remove in Spack 0.21
source.add_argument(
"--src-mirror-url",
metavar="MIRROR_URL",
dest="src_mirror_flag",
type=arguments.mirror_url,
help="URL of the source mirror (deprecated)",
)
# TODO: only support this in 0.21
source.add_argument(
"src_mirror",
metavar="source mirror",
type=arguments.mirror_name_or_url,
nargs="?",
help="Source mirror name, path, or URL",
nargs="?",
)
sync.add_argument(
dest = sync.add_mutually_exclusive_group(required=False)
# TODO: remove in Spack 0.21
dest.add_argument(
"--dest-directory",
metavar="DIRECTORY",
dest="dest_mirror_flag",
type=arguments.mirror_directory,
help="Destination mirror as a local file path (deprecated)",
)
# TODO: remove in Spack 0.21
dest.add_argument(
"--dest-mirror-name",
metavar="MIRROR_NAME",
type=arguments.mirror_name,
dest="dest_mirror_flag",
help="Name of the destination mirror (deprecated)",
)
# TODO: remove in Spack 0.21
dest.add_argument(
"--dest-mirror-url",
metavar="MIRROR_URL",
dest="dest_mirror_flag",
type=arguments.mirror_url,
help="URL of the destination mirror (deprecated)",
)
# TODO: only support this in 0.21
dest.add_argument(
"dest_mirror",
metavar="destination mirror",
type=arguments.mirror_name_or_url,
nargs="?",
help="Destination mirror name, path, or URL",
nargs="?",
)
sync.set_defaults(func=sync_fn)
@@ -246,8 +344,39 @@ def setup_parser(subparser):
update_index = subparsers.add_parser(
"update-index", aliases=["rebuild-index"], help=update_index_fn.__doc__
)
update_index.add_argument(
"mirror", type=arguments.mirror_name_or_url, help="Destination mirror name, path, or URL"
update_index_out = update_index.add_mutually_exclusive_group(required=True)
# TODO: remove in Spack 0.21
update_index_out.add_argument(
"-d",
"--directory",
metavar="directory",
dest="mirror_flag",
type=arguments.mirror_directory,
help="local directory where buildcaches will be written (deprecated)",
)
# TODO: remove in Spack 0.21
update_index_out.add_argument(
"-m",
"--mirror-name",
metavar="mirror-name",
dest="mirror_flag",
type=arguments.mirror_name,
help="name of the mirror where buildcaches will be written (deprecated)",
)
# TODO: remove in Spack 0.21
update_index_out.add_argument(
"--mirror-url",
metavar="mirror-url",
dest="mirror_flag",
type=arguments.mirror_url,
help="URL of the mirror where buildcaches will be written (deprecated)",
)
# TODO: require from Spack 0.21
update_index_out.add_argument(
"mirror",
type=arguments.mirror_name_or_url,
help="Destination mirror name, path, or URL",
nargs="?",
)
update_index.add_argument(
"-k",
@@ -307,12 +436,32 @@ def _concrete_spec_from_args(args):
def push_fn(args):
"""create a binary package and push it to a mirror"""
mirror = arguments.mirror_name_or_url(args.mirror)
if args.mirror_flag:
mirror = args.mirror_flag
elif not args.mirror:
raise ValueError("No mirror provided")
else:
mirror = arguments.mirror_name_or_url(args.mirror)
if args.mirror_flag:
tty.warn(
"Using flags to specify mirrors is deprecated and will be removed in "
"Spack 0.21, use positional arguments instead."
)
if args.rel:
tty.warn("The --rel flag is deprecated and will be removed in Spack 0.21")
# TODO: remove this in 0.21. If we have mirror_flag, the first
# spec is in the positional mirror arg due to argparse limitations.
input_specs = args.specs
if args.mirror_flag and args.mirror:
input_specs.insert(0, args.mirror)
url = mirror.push_url
specs = bindist.specs_to_be_packaged(
_matching_specs(args.specs, args.spec_file),
_matching_specs(input_specs, args.spec_file),
root="package" in args.things_to_install,
dependencies="dependencies" in args.things_to_install,
)
@@ -337,6 +486,7 @@ def push_fn(args):
url,
bindist.PushOptions(
force=args.force,
relative=args.rel,
unsigned=args.unsigned,
allow_root=args.allow_root,
key=args.key,
@@ -374,6 +524,9 @@ def install_fn(args):
if not args.specs:
tty.die("a spec argument is required to install from a buildcache")
if args.allow_root:
tty.warn("The --allow-root flag is deprecated and will be removed in Spack 0.21")
query = bindist.BinaryCacheQuery(all_architectures=args.otherarch)
matches = spack.store.find(args.specs, multiple=args.multiple, query_fn=query)
for match in matches:
@@ -557,11 +710,21 @@ def sync_fn(args):
manifest_copy(glob.glob(args.manifest_glob))
return 0
if args.src_mirror is None or args.dest_mirror is None:
tty.die("Provide mirrors to sync from and to.")
# If no manifest_glob, require a source and dest mirror.
# TODO: Simplify in Spack 0.21
if not (args.src_mirror_flag or args.src_mirror) or not (
args.dest_mirror_flag or args.dest_mirror
):
raise ValueError("Source and destination mirror are required.")
src_mirror = args.src_mirror
dest_mirror = args.dest_mirror
if args.src_mirror_flag or args.dest_mirror_flag:
tty.warn(
"Using flags to specify mirrors is deprecated and will be removed in "
"Spack 0.21, use positional arguments instead."
)
src_mirror = args.src_mirror_flag if args.src_mirror_flag else args.src_mirror
dest_mirror = args.dest_mirror_flag if args.dest_mirror_flag else args.dest_mirror
src_mirror_url = src_mirror.fetch_url
dest_mirror_url = dest_mirror.push_url
@@ -640,7 +803,13 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
def update_index_fn(args):
"""Update a buildcache index."""
update_index(args.mirror, update_keys=args.keys)
if args.mirror_flag:
tty.warn(
"Using flags to specify mirrors is deprecated and will be removed in "
"Spack 0.21, use positional arguments instead."
)
mirror = args.mirror_flag if args.mirror_flag else args.mirror
update_index(mirror, update_keys=args.keys)
def buildcache(parser, args):

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import sys

View File

@@ -228,7 +228,7 @@ def ci_reindex(args):
Use the active, gitlab-enabled environment to rebuild the buildcache
index for the associated mirror."""
env = spack.cmd.require_active_env(cmd_name="ci rebuild-index")
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
yaml_root = ev.config_dict(env.manifest)
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
tty.die("spack ci rebuild-index requires an env containing a mirror")
@@ -274,6 +274,7 @@ def ci_rebuild(args):
signing_key = os.environ.get("SPACK_SIGNING_KEY")
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
compiler_action = os.environ.get("SPACK_COMPILER_ACTION")
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
@@ -294,6 +295,7 @@ def ci_rebuild(args):
tty.debug("pipeline_artifacts_dir = {0}".format(pipeline_artifacts_dir))
tty.debug("remote_mirror_url = {0}".format(remote_mirror_url))
tty.debug("job_spec_pkg_name = {0}".format(job_spec_pkg_name))
tty.debug("compiler_action = {0}".format(compiler_action))
# Query the environment manifest to find out whether we're reporting to a
# CDash instance, and if so, gather some information from the manifest to
@@ -409,6 +411,14 @@ def ci_rebuild(args):
if signing_key:
spack_ci.import_signing_key(signing_key)
# Depending on the specifics of this job, we might need to turn on the
# "config:install_missing compilers" option (to build this job spec
# with a bootstrapped compiler), or possibly run "spack compiler find"
# (to build a bootstrap compiler or one of its deps in a
# compiler-agnostic way), or maybe do nothing at all (to build a spec
# using a compiler already installed on the target system).
spack_ci.configure_compilers(compiler_action)
# Write this job's spec json into the reproduction directory, and it will
# also be used in the generated "spack install" command to install the spec
tty.debug("job concrete spec path: {0}".format(job_spec_json_path))

View File

@@ -3,22 +3,17 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import copy
import os
import re
import sys
from argparse import ArgumentParser, Namespace
from typing import IO, Any, Callable, Dict, Sequence, Set
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import (
ArgparseCompletionWriter,
ArgparseRstWriter,
ArgparseWriter,
Command,
)
from llnl.util.argparsewriter import ArgparseCompletionWriter, ArgparseRstWriter, ArgparseWriter
from llnl.util.tty.colify import colify
import spack.cmd
@@ -32,12 +27,12 @@
#: list of command formatters
formatters: Dict[str, Callable[[Namespace, IO], None]] = {}
formatters = {}
#: standard arguments for updating completion scripts
#: we iterate through these when called with --update-completion
update_completion_args: Dict[str, Dict[str, Any]] = {
update_completion_args = {
"bash": {
"aliases": True,
"format": "bash",
@@ -47,25 +42,13 @@
}
def formatter(func: Callable[[Namespace, IO], None]) -> Callable[[Namespace, IO], None]:
"""Decorator used to register formatters.
Args:
func: Formatting function.
Returns:
The same function.
"""
def formatter(func):
"""Decorator used to register formatters"""
formatters[func.__name__] = func
return func
def setup_parser(subparser: ArgumentParser) -> None:
"""Set up the argument parser.
Args:
subparser: Preliminary argument parser.
"""
def setup_parser(subparser):
subparser.add_argument(
"--update-completion",
action="store_true",
@@ -108,34 +91,18 @@ class SpackArgparseRstWriter(ArgparseRstWriter):
def __init__(
self,
prog: str,
out: IO = sys.stdout,
aliases: bool = False,
documented_commands: Set[str] = set(),
rst_levels: Sequence[str] = ["-", "-", "^", "~", ":", "`"],
prog,
out=None,
aliases=False,
documented_commands=[],
rst_levels=["-", "-", "^", "~", ":", "`"],
):
"""Initialize a new SpackArgparseRstWriter instance.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
documented_commands: Set of commands with additional documentation.
rst_levels: List of characters for rst section headings.
"""
super().__init__(prog, out, aliases, rst_levels)
out = sys.stdout if out is None else out
super(SpackArgparseRstWriter, self).__init__(prog, out, aliases, rst_levels)
self.documented = documented_commands
def usage(self, usage: str) -> str:
"""Example usage of a command.
Args:
usage: Command usage.
Returns:
Usage of a command.
"""
string = super().usage(usage)
def usage(self, *args):
string = super(SpackArgparseRstWriter, self).usage(*args)
cmd = self.parser.prog.replace(" ", "-")
if cmd in self.documented:
@@ -145,21 +112,11 @@ def usage(self, usage: str) -> str:
class SubcommandWriter(ArgparseWriter):
"""Write argparse output as a list of subcommands."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
def format(self, cmd):
return " " * self.level + cmd.prog + "\n"
_positional_to_subroutine: Dict[str, str] = {
_positional_to_subroutine = {
"package": "_all_packages",
"spec": "_all_packages",
"filter": "_all_packages",
@@ -181,19 +138,7 @@ def format(self, cmd: Command) -> str:
class BashCompletionWriter(ArgparseCompletionWriter):
"""Write argparse output as bash programmable tab completion."""
def body(
self, positionals: Sequence[str], optionals: Sequence[str], subcommands: Sequence[str]
) -> str:
"""Return the body of the function.
Args:
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
Returns:
Function body.
"""
def body(self, positionals, optionals, subcommands):
if positionals:
return """
if $list_options
@@ -223,15 +168,7 @@ def body(
self.optionals(optionals)
)
def positionals(self, positionals: Sequence[str]) -> str:
"""Return the syntax for reporting positional arguments.
Args:
positionals: List of positional arguments.
Returns:
Syntax for positional arguments.
"""
def positionals(self, positionals):
# If match found, return function name
for positional in positionals:
for key, value in _positional_to_subroutine.items():
@@ -241,49 +178,22 @@ def positionals(self, positionals: Sequence[str]) -> str:
# If no matches found, return empty list
return 'SPACK_COMPREPLY=""'
def optionals(self, optionals: Sequence[str]) -> str:
"""Return the syntax for reporting optional flags.
Args:
optionals: List of optional arguments.
Returns:
Syntax for optional flags.
"""
def optionals(self, optionals):
return 'SPACK_COMPREPLY="{0}"'.format(" ".join(optionals))
def subcommands(self, subcommands: Sequence[str]) -> str:
"""Return the syntax for reporting subcommands.
Args:
subcommands: List of subcommand parsers.
Returns:
Syntax for subcommand parsers
"""
def subcommands(self, subcommands):
return 'SPACK_COMPREPLY="{0}"'.format(" ".join(subcommands))
@formatter
def subcommands(args: Namespace, out: IO) -> None:
"""Hierarchical tree of subcommands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def subcommands(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
writer = SubcommandWriter(parser.prog, out, args.aliases)
writer.write(parser)
def rst_index(out: IO) -> None:
"""Generate an index of all commands.
Args:
out: File object to write to.
"""
def rst_index(out):
out.write("\n")
index = spack.main.index_commands()
@@ -311,19 +221,13 @@ def rst_index(out: IO) -> None:
@formatter
def rst(args: Namespace, out: IO) -> None:
"""ReStructuredText documentation of subcommands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def rst(args, out):
# create a parser with all commands
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
# extract cross-refs of the form `_cmd-spack-<cmd>:` from rst files
documented_commands: Set[str] = set()
documented_commands = set()
for filename in args.rst_files:
with open(filename) as f:
for line in f:
@@ -341,13 +245,7 @@ def rst(args: Namespace, out: IO) -> None:
@formatter
def names(args: Namespace, out: IO) -> None:
"""Simple list of top-level commands.
args:
args: Command-line arguments.
out: File object to write to.
"""
def names(args, out):
commands = copy.copy(spack.cmd.all_commands())
if args.aliases:
@@ -357,13 +255,7 @@ def names(args: Namespace, out: IO) -> None:
@formatter
def bash(args: Namespace, out: IO) -> None:
"""Bash tab-completion script.
args:
args: Command-line arguments.
out: File object to write to.
"""
def bash(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
@@ -371,13 +263,7 @@ def bash(args: Namespace, out: IO) -> None:
writer.write(parser)
def prepend_header(args: Namespace, out: IO) -> None:
"""Prepend header text at the beginning of a file.
Args:
args: Command-line arguments.
out: File object to write to.
"""
def prepend_header(args, out):
if not args.header:
return
@@ -385,14 +271,10 @@ def prepend_header(args: Namespace, out: IO) -> None:
out.write(header.read())
def _commands(parser: ArgumentParser, args: Namespace) -> None:
def _commands(parser, args):
"""This is the 'regular' command, which can be called multiple times.
See ``commands()`` below for ``--update-completion`` handling.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
formatter = formatters[args.format]
@@ -414,15 +296,12 @@ def _commands(parser: ArgumentParser, args: Namespace) -> None:
formatter(args, sys.stdout)
def update_completion(parser: ArgumentParser, args: Namespace) -> None:
def update_completion(parser, args):
"""Iterate through the shells and update the standard completion files.
This is a convenience method to avoid calling this command many
times, and to simplify completion update for developers.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
for shell, shell_args in update_completion_args.items():
for attr, value in shell_args.items():
@@ -430,20 +309,14 @@ def update_completion(parser: ArgumentParser, args: Namespace) -> None:
_commands(parser, args)
def commands(parser: ArgumentParser, args: Namespace) -> None:
"""Main function that calls formatter functions.
Args:
parser: Argument parser.
args: Command-line arguments.
"""
def commands(parser, args):
if args.update_completion:
if args.format != "names" or any([args.aliases, args.update, args.header]):
tty.die("--update-completion can only be specified alone.")
# this runs the command multiple times with different arguments
update_completion(parser, args)
return update_completion(parser, args)
else:
# run commands normally
_commands(parser, args)
return _commands(parser, args)

View File

@@ -36,10 +36,7 @@ def shell_init_instructions(cmd, equivalent):
" source %s/setup-env.fish" % spack.paths.share_path,
"",
color.colorize("@*c{For Windows batch:}"),
" %s\\spack_cmd.bat" % spack.paths.bin_path,
"",
color.colorize("@*c{For PowerShell:}"),
" %s\\setup-env.ps1" % spack.paths.share_path,
" source %s/spack_cmd.bat" % spack.paths.share_path,
"",
"Or, if you do not want to use shell support, run "
+ ("one of these" if shell_specific else "this")
@@ -53,7 +50,6 @@ def shell_init_instructions(cmd, equivalent):
equivalent.format(sh_arg="--csh ") + " # csh/tcsh",
equivalent.format(sh_arg="--fish") + " # fish",
equivalent.format(sh_arg="--bat ") + " # batch",
equivalent.format(sh_arg="--pwsh") + " # powershell",
]
else:
msg += [" " + equivalent]

View File

@@ -349,7 +349,7 @@ def install_status():
"-I",
"--install-status",
action="store_true",
default=True,
default=False,
help="show install status of packages. packages can be: "
"installed [+], missing and needed by an installed package [-], "
"installed in and upstream instance [^], "
@@ -357,17 +357,6 @@ def install_status():
)
@arg
def no_install_status():
return Args(
"--no-install-status",
dest="install_status",
action="store_false",
default=True,
help="do not show install status annotations",
)
@arg
def no_checksum():
return Args(
@@ -479,7 +468,7 @@ def __init__(
# substituting '_' for ':'.
dest = dest.replace(":", "_")
super().__init__(
super(ConfigSetAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import os

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import sys
@@ -51,7 +53,7 @@ def setup_parser(subparser):
"--scope",
choices=scopes,
metavar=scopes_metavar,
default=None,
default=spack.config.default_modify_scope("compilers"),
help="configuration scope to modify",
)
@@ -104,21 +106,19 @@ def compiler_find(args):
def compiler_remove(args):
compiler_spec = spack.spec.CompilerSpec(args.compiler_spec)
candidate_compilers = spack.compilers.compilers_for_spec(compiler_spec, scope=args.scope)
if not candidate_compilers:
tty.die("No compilers match spec %s" % compiler_spec)
if not args.all and len(candidate_compilers) > 1:
tty.error(f"Multiple compilers match spec {compiler_spec}. Choose one:")
colify(reversed(sorted([c.spec.display_str for c in candidate_compilers])), indent=4)
cspec = spack.spec.CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.die("No compilers match spec %s" % cspec)
elif not args.all and len(compilers) > 1:
tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
colify(reversed(sorted([c.spec.display_str for c in compilers])), indent=4)
tty.msg("Or, use `spack compiler remove -a` to remove all of them.")
sys.exit(1)
for current_compiler in candidate_compilers:
spack.compilers.remove_compiler_from_config(current_compiler.spec, scope=args.scope)
tty.msg(f"{current_compiler.spec.display_str} has been removed")
for compiler in compilers:
spack.compilers.remove_compiler_from_config(compiler.spec, scope=args.scope)
tty.msg("Removed compiler %s" % compiler.spec.display_str)
def compiler_info(args):

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import os
import shutil

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import re
import urllib.parse
@@ -69,7 +71,7 @@ class {class_name}({base_class_name}):
'''
class BundlePackageTemplate:
class BundlePackageTemplate(object):
"""
Provides the default values to be used for a bundle package file template.
"""
@@ -120,7 +122,7 @@ def install(self, spec, prefix):
url_line = ' url = "{url}"'
def __init__(self, name, url, versions):
super().__init__(name, versions)
super(PackageTemplate, self).__init__(name, versions)
self.url_def = self.url_line.format(url=url)
@@ -198,7 +200,7 @@ def __init__(self, name, url, *args, **kwargs):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to lua-{0}".format(name))
name = "lua-{0}".format(name)
super().__init__(name, url, *args, **kwargs)
super(LuaPackageTemplate, self).__init__(name, url, *args, **kwargs)
class MesonPackageTemplate(PackageTemplate):
@@ -306,7 +308,7 @@ def __init__(self, name, url, *args, **kwargs):
tty.msg("Changing package name from {0} to rkt-{0}".format(name))
name = "rkt-{0}".format(name)
self.body_def = self.body_def.format(name[4:])
super().__init__(name, url, *args, **kwargs)
super(RacketPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PythonPackageTemplate(PackageTemplate):
@@ -398,7 +400,7 @@ def __init__(self, name, url, *args, **kwargs):
+ self.url_line
)
super().__init__(name, url, *args, **kwargs)
super(PythonPackageTemplate, self).__init__(name, url, *args, **kwargs)
class RPackageTemplate(PackageTemplate):
@@ -437,7 +439,7 @@ def __init__(self, name, url, *args, **kwargs):
if bioc:
self.url_line = ' url = "{0}"\n' ' bioc = "{1}"'.format(url, r_name)
super().__init__(name, url, *args, **kwargs)
super(RPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PerlmakePackageTemplate(PackageTemplate):
@@ -464,7 +466,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to perl-{0}".format(name))
name = "perl-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(PerlmakePackageTemplate, self).__init__(name, *args, **kwargs)
class PerlbuildPackageTemplate(PerlmakePackageTemplate):
@@ -497,7 +499,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to octave-{0}".format(name))
name = "octave-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(OctavePackageTemplate, self).__init__(name, *args, **kwargs)
class RubyPackageTemplate(PackageTemplate):
@@ -525,7 +527,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to ruby-{0}".format(name))
name = "ruby-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(RubyPackageTemplate, self).__init__(name, *args, **kwargs)
class MakefilePackageTemplate(PackageTemplate):
@@ -570,7 +572,7 @@ def __init__(self, name, *args, **kwargs):
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = "py-{0}".format(name)
super().__init__(name, *args, **kwargs)
super(SIPPackageTemplate, self).__init__(name, *args, **kwargs)
templates = {
@@ -713,7 +715,7 @@ def __call__(self, stage, url):
output = tar("--exclude=*/*/*", "-tf", stage.archive_file, output=str)
except ProcessError:
output = ""
lines = output.splitlines()
lines = output.split("\n")
# Determine the build system based on the files contained
# in the archive.

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import platform
import re

View File

@@ -13,6 +13,8 @@
It is up to the user to ensure binary compatibility between the deprecated
installation and its deprecator.
"""
from __future__ import print_function
import argparse
import os

View File

@@ -86,13 +86,6 @@ def env_activate_setup_parser(subparser):
const="bat",
help="print bat commands to activate the environment",
)
shells.add_argument(
"--pwsh",
action="store_const",
dest="shell",
const="pwsh",
help="print powershell commands to activate environment",
)
view_options = subparser.add_mutually_exclusive_group()
view_options.add_argument(
@@ -418,7 +411,7 @@ def env_list(args):
colify(color_names, indent=4)
class ViewAction:
class ViewAction(object):
regenerate = "regenerate"
enable = "enable"
disable = "disable"

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import errno
import os
@@ -77,12 +79,6 @@ def setup_parser(subparser):
read_cray_manifest.add_argument(
"--directory", default=None, help="specify a directory storing a group of manifest files"
)
read_cray_manifest.add_argument(
"--ignore-default-dir",
action="store_true",
default=False,
help="ignore the default directory of manifest files",
)
read_cray_manifest.add_argument(
"--dry-run",
action="store_true",
@@ -181,16 +177,11 @@ def external_read_cray_manifest(args):
manifest_directory=args.directory,
dry_run=args.dry_run,
fail_on_error=args.fail_on_error,
ignore_default_dir=args.ignore_default_dir,
)
def _collect_and_consume_cray_manifest_files(
manifest_file=None,
manifest_directory=None,
dry_run=False,
fail_on_error=False,
ignore_default_dir=False,
manifest_file=None, manifest_directory=None, dry_run=False, fail_on_error=False
):
manifest_files = []
if manifest_file:
@@ -200,7 +191,7 @@ def _collect_and_consume_cray_manifest_files(
if manifest_directory:
manifest_dirs.append(manifest_directory)
if not ignore_default_dir and os.path.isdir(cray_manifest.default_path):
if os.path.isdir(cray_manifest.default_path):
tty.debug(
"Cray manifest path {0} exists: collecting all files to read.".format(
cray_manifest.default_path

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import copy
import sys

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import textwrap
from itertools import zip_longest
@@ -71,7 +73,7 @@ def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter:
class VariantFormatter(object):
def __init__(self, variants):
self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import re
from collections import defaultdict
@@ -100,7 +102,7 @@ def list_files(args):
]
class LicenseError:
class LicenseError(object):
def __init__(self):
self.error_counts = defaultdict(int)

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import division, print_function
import argparse
import fnmatch
import json

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import llnl.util.tty as tty

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
from collections import defaultdict

View File

@@ -49,8 +49,9 @@ def setup_parser(subparser):
"-g",
"--git-installer-verbosity",
default="",
choices=["SILENT", "VERYSILENT"],
help="Level of verbosity provided by bundled Git Installer. Default is fully verbose",
choices=set(["SILENT", "VERYSILENT"]),
help="Level of verbosity provided by bundled Git Installer.\
Default is fully verbose",
required=False,
action="store",
dest="git_verbosity",

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import sys
from llnl.util import tty

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import itertools
import os

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import code
import os

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import sys

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import llnl.util.tty as tty

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import re
import sys
@@ -42,11 +44,7 @@ def setup_parser(subparser):
)
# Below are arguments w.r.t. spec display (like spack spec)
arguments.add_common_arguments(subparser, ["long", "very_long"])
install_status_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
arguments.add_common_arguments(subparser, ["long", "very_long", "install_status"])
subparser.add_argument(
"-y",
"--yaml",

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import sys
import llnl.util.lang as lang
@@ -29,11 +31,7 @@ def setup_parser(subparser):
for further documentation regarding the spec syntax, see:
spack help --spec
"""
arguments.add_common_arguments(subparser, ["long", "very_long"])
install_status_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
arguments.add_common_arguments(subparser, ["long", "very_long", "install_status"])
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
"-y",

View File

@@ -60,7 +60,7 @@ def is_package(f):
#: decorator for adding tools to the list
class tool:
class tool(object):
def __init__(self, name, required=False):
self.name = name
self.required = required

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import fnmatch
import os

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os.path
import shutil

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import sys
from typing import Dict, List, Optional

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import division, print_function
import argparse
import collections
import io

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import division, print_function
import urllib.parse
from collections import defaultdict
@@ -288,7 +290,7 @@ def url_stats(args):
# dictionary of issue type -> package -> descriptions
issues = defaultdict(lambda: defaultdict(lambda: []))
class UrlStats:
class UrlStats(object):
def __init__(self):
self.total = 0
self.schemes = defaultdict(lambda: 0)

View File

@@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import llnl.util.tty as tty

View File

@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import sys
import llnl.util.tty as tty

View File

@@ -189,7 +189,7 @@ def in_system_subdirectory(path):
return any(path_contains_subdirectory(path, x) for x in system_dirs)
class Compiler:
class Compiler(object):
"""This class encapsulates a Spack "compiler", which includes C,
C++, and Fortran compilers. Subclasses should implement
support for specific compilers, their possible names, arguments,
@@ -673,17 +673,17 @@ class CompilerAccessError(spack.error.SpackError):
def __init__(self, compiler, paths):
msg = "Compiler '%s' has executables that are missing" % compiler.spec
msg += " or are not executable: %s" % paths
super().__init__(msg)
super(CompilerAccessError, self).__init__(msg)
class InvalidCompilerError(spack.error.SpackError):
def __init__(self):
super().__init__("Compiler has no executables.")
super(InvalidCompilerError, self).__init__("Compiler has no executables.")
class UnsupportedCompilerFlag(spack.error.SpackError):
def __init__(self, compiler, feature, flag_name, ver_string=None):
super().__init__(
super(UnsupportedCompilerFlag, self).__init__(
"{0} ({1}) does not support {2} (as compiler.{3}).".format(
compiler.name, ver_string if ver_string else compiler.version, feature, flag_name
),

View File

@@ -37,6 +37,7 @@
"implicit_rpaths",
"extra_rpaths",
]
_cache_config_file = []
# TODO: Caches at module level make it difficult to mock configurations in
# TODO: unit tests. It might be worth reworking their implementation.
@@ -111,26 +112,36 @@ def _to_dict(compiler):
def get_compiler_config(scope=None, init_config=True):
"""Return the compiler configuration for the specified architecture."""
config = spack.config.get("compilers", scope=scope) or []
if config or not init_config:
return config
def init_compiler_config():
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
compilers_dict.append(_to_dict(compiler))
spack.config.set("compilers", compilers_dict, scope=scope)
merged_config = spack.config.get("compilers")
if merged_config:
return config
_init_compiler_config(scope=scope)
config = spack.config.get("compilers", scope=scope)
return config
def _init_compiler_config(*, scope):
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
compilers_dict.append(_to_dict(compiler))
spack.config.set("compilers", compilers_dict, scope=scope)
# Update the configuration if there are currently no compilers
# configured. Avoid updating automatically if there ARE site
# compilers configured but no user ones.
if not config and init_config:
if scope is None:
# We know no compilers were configured in any scope.
init_compiler_config()
config = spack.config.get("compilers", scope=scope)
elif scope == "user":
# Check the site config and update the user config if
# nothing is configured at the site level.
site_config = spack.config.get("compilers", scope="site")
sys_config = spack.config.get("compilers", scope="system")
if not site_config and not sys_config:
init_compiler_config()
config = spack.config.get("compilers", scope=scope)
return config
elif config:
return config
else:
return [] # Return empty list which we will later append to.
def compiler_config_files():
@@ -154,65 +165,52 @@ def add_compilers_to_config(compilers, scope=None, init_config=True):
compiler_config = get_compiler_config(scope, init_config)
for compiler in compilers:
compiler_config.append(_to_dict(compiler))
global _cache_config_file
_cache_config_file = compiler_config
spack.config.set("compilers", compiler_config, scope=scope)
@_auto_compiler_spec
def remove_compiler_from_config(compiler_spec, scope=None):
"""Remove compilers from configuration by spec.
If scope is None, all the scopes are searched for removal.
"""Remove compilers from the config, by spec.
Arguments:
compiler_spec: compiler to be removed
scope: configuration scope to modify
compiler_specs: a list of CompilerSpec objects.
scope: configuration scope to modify.
"""
candidate_scopes = [scope]
if scope is None:
candidate_scopes = spack.config.config.scopes.keys()
# Need a better way for this
global _cache_config_file
removal_happened = False
for current_scope in candidate_scopes:
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
return removal_happened
def _remove_compiler_from_scope(compiler_spec, scope):
"""Removes a compiler from a specific configuration scope.
Args:
compiler_spec: compiler to be removed
scope: configuration scope under consideration
Returns:
True if one or more compiler entries were actually removed, False otherwise
"""
assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config(scope)
config_length = len(compiler_config)
filtered_compiler_config = [
compiler_entry
for compiler_entry in compiler_config
comp
for comp in compiler_config
if not spack.spec.parse_with_version_concrete(
compiler_entry["compiler"]["spec"], compiler=True
comp["compiler"]["spec"], compiler=True
).satisfies(compiler_spec)
]
if len(filtered_compiler_config) == len(compiler_config):
return False
# We need to preserve the YAML type for comments, hence we are copying the
# items in the list that has just been retrieved
compiler_config[:] = filtered_compiler_config
spack.config.set("compilers", compiler_config, scope=scope)
return True
# Update the cache for changes
_cache_config_file = filtered_compiler_config
if len(filtered_compiler_config) == config_length: # No items removed
CompilerSpecInsufficientlySpecificError(compiler_spec)
spack.config.set("compilers", filtered_compiler_config, scope=scope)
def all_compilers_config(scope=None, init_config=True):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
return get_compiler_config(scope, init_config)
# Get compilers for this architecture.
# Create a cache of the config file so we don't load all the time.
global _cache_config_file
if not _cache_config_file:
_cache_config_file = get_compiler_config(scope, init_config)
return _cache_config_file
else:
return _cache_config_file
def all_compiler_specs(scope=None, init_config=True):
@@ -235,38 +233,25 @@ def find_compilers(path_hints=None):
path_hints = get_path("PATH")
default_paths = fs.search_paths_for_executables(*path_hints)
# To detect the version of the compilers, we dispatch a certain number
# of function calls to different workers. Here we construct the list
# of arguments for each call.
arguments = []
for o in all_os_classes():
search_paths = getattr(o, "compiler_search_paths", default_paths)
arguments.extend(arguments_to_detect_version_fn(o, search_paths))
pkg_cls_to_check = [spack.repo.path.get_pkg_class(pkg) for pkg in ["apple-clang"]]
detected_compilers = spack.detection.by_executable(pkg_cls_to_check, path_hints=default_paths)
# Here we map the function arguments to the corresponding calls
tp = multiprocessing.pool.ThreadPool()
try:
detected_versions = tp.map(detect_version, arguments)
finally:
tp.close()
platform = spack.platforms.host()
operating_system = platform.operating_system("default_os")
target = platform.target("default_target")
def valid_version(item):
value, error = item
if error is None:
return True
try:
# This will fail on Python 2.6 if a non ascii
# character is in the error
tty.debug(error)
except UnicodeEncodeError:
pass
return False
compilers = []
for name, detected in detected_compilers.items():
for entry in detected:
compiler_cls = spack.compilers.class_for_compiler_name(name)
spec = spack.spec.CompilerSpec(entry.spec.name, f"={entry.spec.versions}")
paths = [
entry.spec.extra_attributes["compilers"].get(x, None)
for x in ("c", "cxx", "f77", "fc")
]
compilers.append(compiler_cls(spec, str(operating_system), str(target), paths))
def remove_errors(item):
value, _ = item
return value
return make_compiler_list(map(remove_errors, filter(valid_version, detected_versions)))
return compilers
def find_new_compilers(path_hints=None, scope=None):
@@ -369,7 +354,7 @@ def compiler_specs_for_arch(arch_spec, scope=None):
return [c.spec for c in compilers_for_arch(arch_spec, scope)]
class CacheReference:
class CacheReference(object):
"""This acts as a hashable reference to any object (regardless of whether
the object itself is hashable) and also prevents the object from being
garbage-collected (so if two CacheReference objects are equal, they
@@ -820,7 +805,7 @@ def name_matches(name, name_list):
class InvalidCompilerConfigurationError(spack.error.SpackError):
def __init__(self, compiler_spec):
super().__init__(
super(InvalidCompilerConfigurationError, self).__init__(
'Invalid configuration for [compiler "%s"]: ' % compiler_spec,
"Compiler configuration must contain entries for all compilers: %s"
% _path_instance_vars,
@@ -829,17 +814,19 @@ def __init__(self, compiler_spec):
class NoCompilersError(spack.error.SpackError):
def __init__(self):
super().__init__("Spack could not find any compilers!")
super(NoCompilersError, self).__init__("Spack could not find any compilers!")
class UnknownCompilerError(spack.error.SpackError):
def __init__(self, compiler_name):
super().__init__("Spack doesn't support the requested compiler: {0}".format(compiler_name))
super(UnknownCompilerError, self).__init__(
"Spack doesn't support the requested compiler: {0}".format(compiler_name)
)
class NoCompilerForSpecError(spack.error.SpackError):
def __init__(self, compiler_spec, target):
super().__init__(
super(NoCompilerForSpecError, self).__init__(
"No compilers for operating system %s satisfy spec %s" % (target, compiler_spec)
)
@@ -858,9 +845,11 @@ def __init__(self, compiler_spec, arch_spec):
+ " in the following files:\n\t"
+ "\n\t".join(duplicate_msg(x, y) for x, y in duplicate_table)
)
super().__init__(msg)
super(CompilerDuplicateError, self).__init__(msg)
class CompilerSpecInsufficientlySpecificError(spack.error.SpackError):
def __init__(self, compiler_spec):
super().__init__("Multiple compilers satisfy spec %s" % compiler_spec)
super(CompilerSpecInsufficientlySpecificError, self).__init__(
"Multiple compilers satisfy spec %s" % compiler_spec
)

View File

@@ -132,7 +132,7 @@ def setup_custom_environment(self, pkg, env):
the 'DEVELOPER_DIR' environment variables to cause the xcrun and
related tools to use this Xcode.app.
"""
super().setup_custom_environment(pkg, env)
super(AppleClang, self).setup_custom_environment(pkg, env)
if not pkg.use_xcode:
# if we do it for all packages, we get into big troubles with MPI:

View File

@@ -12,7 +12,7 @@ class Cce(Compiler):
"""Cray compiler environment compiler."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super(Cce, self).__init__(*args, **kwargs)
# For old cray compilers on module based systems we replace
# ``version_argument`` with the old value. Cannot be a property
# as the new value is used in classmethods for path-based detection

View File

@@ -77,7 +77,7 @@ class Msvc(Compiler):
def __init__(self, *args, **kwargs):
new_pth = [pth if pth else get_valid_fortran_pth(args[0].version) for pth in args[3]]
args[3][:] = new_pth
super().__init__(*args, **kwargs)
super(Msvc, self).__init__(*args, **kwargs)
if os.getenv("ONEAPI_ROOT"):
# If this found, it sets all the vars
self.setvarsfile = os.path.join(os.getenv("ONEAPI_ROOT"), "setvars.bat")
@@ -151,11 +151,7 @@ def setup_custom_environment(self, pkg, env):
arch = arch.replace("-", "_")
# vcvars can target specific sdk versions, force it to pick up concretized sdk
# version, if needed by spec
sdk_ver = (
""
if "win-sdk" not in pkg.spec or pkg.name == "win-sdk"
else pkg.spec["win-sdk"].version.string + ".0"
)
sdk_ver = "" if "win-sdk" not in pkg.spec else pkg.spec["win-sdk"].version.string + ".0"
# provide vcvars with msvc version selected by concretization,
# not whatever it happens to pick up on the system (highest available version)
out = subprocess.check_output( # novermin

View File

@@ -14,6 +14,8 @@
TODO: make this customizable and allow users to configure
concretization policies.
"""
from __future__ import print_function
import functools
import platform
import tempfile
@@ -48,7 +50,7 @@
@functools.total_ordering
class reverse_order:
class reverse_order(object):
"""Helper for creating key functions.
This is a wrapper that inverts the sense of the natural
@@ -65,7 +67,7 @@ def __lt__(self, other):
return other.value < self.value
class Concretizer:
class Concretizer(object):
"""You can subclass this class to override some of the default
concretization strategies, or you can override all of them.
"""
@@ -792,7 +794,9 @@ def __init__(self, arch, available_os_targets):
" operating systems and targets:\n\t" + "\n\t".join(available_os_target_strs)
)
super().__init__(err_msg, "Run 'spack compiler find' to add compilers.")
super(NoCompilersForArchError, self).__init__(
err_msg, "Run 'spack compiler find' to add compilers."
)
class UnavailableCompilerVersionError(spack.error.SpackError):
@@ -804,7 +808,7 @@ def __init__(self, compiler_spec, arch=None):
if arch:
err_msg += " for operating system {0} and target {1}.".format(arch.os, arch.target)
super().__init__(
super(UnavailableCompilerVersionError, self).__init__(
err_msg,
"Run 'spack compiler find' to add compilers or "
"'spack compilers' to see which compilers are already recognized"
@@ -817,7 +821,7 @@ class NoValidVersionError(spack.error.SpackError):
particular spec."""
def __init__(self, spec):
super().__init__(
super(NoValidVersionError, self).__init__(
"There are no valid versions for %s that match '%s'" % (spec.name, spec.versions)
)
@@ -828,7 +832,7 @@ class InsufficientArchitectureInfoError(spack.error.SpackError):
system"""
def __init__(self, spec, archs):
super().__init__(
super(InsufficientArchitectureInfoError, self).__init__(
"Cannot determine necessary architecture information for '%s': %s"
% (spec.name, str(archs))
)
@@ -844,4 +848,4 @@ def __init__(self, spec):
"The spec\n '%s'\n is configured as not buildable, "
"and no matching external installs were found"
)
super().__init__(msg % spec)
super(NoBuildError, self).__init__(msg % spec)

View File

@@ -81,7 +81,7 @@
# Same as above, but including keys for environments
# this allows us to unify config reading between configs and environments
all_schemas = copy.deepcopy(section_schemas)
all_schemas.update({spack.schema.env.TOP_LEVEL_KEY: spack.schema.env.schema})
all_schemas.update(dict((key, spack.schema.env.schema) for key in spack.schema.env.keys))
#: Path to the default configuration
configuration_defaults_path = ("defaults", os.path.join(spack.paths.etc_path, "defaults"))
@@ -111,7 +111,15 @@
overrides_base_name = "overrides-"
class ConfigScope:
def first_existing(dictionary, keys):
"""Get the value of the first key in keys that is in the dictionary."""
try:
return next(k for k in keys if k in dictionary)
except StopIteration:
raise KeyError("None of %s is in dict!" % str(keys))
class ConfigScope(object):
"""This class represents a configuration scope.
A scope is one directory containing named configuration files.
@@ -182,7 +190,7 @@ def __init__(self, name, path, schema, yaml_path=None):
config:
install_tree: $spack/opt/spack
"""
super().__init__(name, path)
super(SingleFileScope, self).__init__(name, path)
self._raw_data = None
self.schema = schema
self.yaml_path = yaml_path or []
@@ -310,7 +318,7 @@ class InternalConfigScope(ConfigScope):
"""
def __init__(self, name, data=None):
super().__init__(name, None)
super(InternalConfigScope, self).__init__(name, None)
self.sections = syaml.syaml_dict()
if data:
@@ -382,7 +390,7 @@ def _method(self, *args, **kwargs):
return _method
class Configuration:
class Configuration(object):
"""A full Spack configuration, from a hierarchy of config files.
This class makes it easy to add a new scope on top of an existing one.
@@ -830,10 +838,12 @@ def _config():
def add_from_file(filename, scope=None):
"""Add updates to a config from a filename"""
# Extract internal attributes, if we are dealing with an environment
import spack.environment as ev
# Get file as config dict
data = read_config_file(filename)
if spack.schema.env.TOP_LEVEL_KEY in data:
data = data[spack.schema.env.TOP_LEVEL_KEY]
if any(k in data for k in spack.schema.env.keys):
data = ev.config_dict(data)
# update all sections from config dict
# We have to iterate on keys to keep overrides from the file
@@ -1343,11 +1353,17 @@ def use_configuration(*scopes_or_paths):
configuration = _config_from(scopes_or_paths)
config.clear_caches(), configuration.clear_caches()
# Save and clear the current compiler cache
saved_compiler_cache = spack.compilers._cache_config_file
spack.compilers._cache_config_file = []
saved_config, config = config, configuration
try:
yield configuration
finally:
# Restore previous config files
spack.compilers._cache_config_file = saved_compiler_cache
config = saved_config
@@ -1495,7 +1511,7 @@ def __init__(self, validation_error, data, filename=None, line=None):
location += ":%d" % line
message = "%s: %s" % (location, validation_error.message)
super().__init__(message)
super(ConfigError, self).__init__(message)
def _get_mark(self, validation_error, data):
"""Get the file/line mark fo a validation error from a Spack YAML file."""

View File

@@ -37,7 +37,7 @@ def validate(configuration_file):
config = syaml.load(f)
# Ensure we have a "container" attribute with sensible defaults set
env_dict = config[ev.TOP_LEVEL_KEY]
env_dict = ev.config_dict(config)
env_dict.setdefault(
"container", {"format": "docker", "images": {"os": "ubuntu:22.04", "spack": "develop"}}
)

View File

@@ -17,7 +17,7 @@
"template": "container/fedora_38.dockerfile",
"image": "docker.io/fedora:38"
},
"os_package_manager": "dnf",
"os_package_manager": "yum",
"build": "spack/fedora38",
"build_tags": {
"develop": "latest"
@@ -31,7 +31,7 @@
"template": "container/fedora_37.dockerfile",
"image": "docker.io/fedora:37"
},
"os_package_manager": "dnf",
"os_package_manager": "yum",
"build": "spack/fedora37",
"build_tags": {
"develop": "latest"
@@ -45,7 +45,7 @@
"template": "container/rockylinux_9.dockerfile",
"image": "docker.io/rockylinux:9"
},
"os_package_manager": "dnf_epel",
"os_package_manager": "yum",
"build": "spack/rockylinux9",
"build_tags": {
"develop": "latest"
@@ -59,7 +59,7 @@
"template": "container/rockylinux_8.dockerfile",
"image": "docker.io/rockylinux:8"
},
"os_package_manager": "dnf_epel",
"os_package_manager": "yum",
"build": "spack/rockylinux8",
"build_tags": {
"develop": "latest"
@@ -73,7 +73,7 @@
"template": "container/almalinux_9.dockerfile",
"image": "quay.io/almalinux/almalinux:9"
},
"os_package_manager": "dnf_epel",
"os_package_manager": "yum",
"build": "spack/almalinux9",
"build_tags": {
"develop": "latest"
@@ -87,7 +87,7 @@
"template": "container/almalinux_8.dockerfile",
"image": "quay.io/almalinux/almalinux:8"
},
"os_package_manager": "dnf_epel",
"os_package_manager": "yum",
"build": "spack/almalinux8",
"build_tags": {
"develop": "latest"
@@ -101,7 +101,7 @@
"template": "container/centos_stream.dockerfile",
"image": "quay.io/centos/centos:stream"
},
"os_package_manager": "dnf_epel",
"os_package_manager": "yum",
"build": "spack/centos-stream",
"final": {
"image": "quay.io/centos/centos:stream"
@@ -185,16 +185,6 @@
"install": "apt-get -yqq install",
"clean": "rm -rf /var/lib/apt/lists/*"
},
"dnf": {
"update": "dnf update -y",
"install": "dnf install -y",
"clean": "rm -rf /var/cache/dnf && dnf clean all"
},
"dnf_epel": {
"update": "dnf update -y && dnf install -y epel-release && dnf update -y",
"install": "dnf install -y",
"clean": "rm -rf /var/cache/dnf && dnf clean all"
},
"yum": {
"update": "yum update -y && yum install -y epel-release && yum update -y",
"install": "yum install -y",

Some files were not shown because too many files have changed in this diff Show More