Compare commits

..

1 Commits

Author SHA1 Message Date
Gregory Becker
dd668047fb prototype: check commits against the tip of known branches 2022-03-03 10:35:15 -08:00
7646 changed files with 177036 additions and 241251 deletions

42
.flake8
View File

@@ -1,25 +1,43 @@
# -*- conf -*-
# flake8 settings for Spack.
# flake8 settings for Spack core files.
#
# These exceptions are for Spack core files. We're slightly more lenient
# with packages. See .flake8_packages for that.
#
# This is the only flake8 rule Spack violates somewhat flagrantly
# E1: Indentation
# - E129: visually indented line with same indent as next logical line
#
# E2: Whitespace
# - E221: multiple spaces before operator
# - E241: multiple spaces after ','
# - E272: multiple spaces before keyword
#
# E7: Statement
# - E731: do not assign a lambda expression, use a def
#
# This is the only flake8 exception needed when using Black.
# - E203: white space around slice operators can be required, ignore : warn
# W5: Line break warning
# - W503: line break before binary operator
# - W504: line break after binary operator
#
# We still allow these in packages (Would like to get rid of them or rely on mypy
# in the future)
# - F403: from/import * used; unable to detect undefined names
# These are required to get the package.py files to test clean:
# - F999: syntax error in doctest
#
# N8: PEP8-naming
# - N801: class names should use CapWords convention
# - N813: camelcase imported as lowercase
# - N814: camelcase imported as constant
#
# F4: pyflakes import checks, these are now checked by mypy more precisely
# - F403: from module import *
# - F405: undefined name or from *
# - F821: undefined name (needed with from/import *)
#
# Black ignores, these are incompatible with black style and do not follow PEP-8
# - E203: white space around slice operators can be required, ignore : warn
# - W503: see above, already ignored for line-breaks
#
[flake8]
#ignore = E129,,W503,W504,F999,N801,N813,N814,F403,F405,E203
extend-ignore = E731,E203
max-line-length = 99
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814,F403,F405
max-line-length = 88
# F4: Import
# - F405: `name` may be undefined, or undefined from star imports: `module`
@@ -28,7 +46,7 @@ max-line-length = 99
# - F821: undefined name `name`
#
per-file-ignores =
var/spack/repos/*/package.py:F403,F405,F821
var/spack/repos/*/package.py:F405,F821
# exclude things we usually do not want linting for.
# These still get linted when passed explicitly, as when spack flake8 passes

View File

@@ -1,3 +0,0 @@
# .git-blame-ignore-revs
# Formatted entire codebase with black
f52f6e99dbf1131886a80112b8c79dfc414afb7c

View File

@@ -12,7 +12,6 @@ on:
# built-in repository or documentation
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
- '!var/spack/repos/builtin/packages/clingo/**'
- '!var/spack/repos/builtin/packages/python/**'
- '!var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/docs/**'
@@ -20,16 +19,11 @@ on:
# nightly at 2:16 AM
- cron: '16 2 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: "fedora:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -37,25 +31,19 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -63,7 +51,6 @@ jobs:
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -74,25 +61,19 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -100,7 +81,6 @@ jobs:
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -110,20 +90,14 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -131,10 +105,10 @@ jobs:
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -144,55 +118,47 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup repo
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
macos-clingo-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack external find --not-buildable cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
macos-clingo-binaries:
runs-on: ${{ matrix.macos-version }}
runs-on: macos-latest
strategy:
matrix:
python-version: ['3.6', '3.7', '3.8', '3.9', '3.10']
macos-version: ['macos-11', 'macos-12']
if: github.repository == 'spack/spack'
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
steps:
- name: Install dependencies
run: |
brew install tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo
@@ -206,15 +172,13 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9']
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Setup repo
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
@@ -229,7 +193,6 @@ jobs:
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -239,20 +202,14 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
@@ -264,7 +221,6 @@ jobs:
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -275,40 +231,32 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-binaries:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -318,27 +266,17 @@ jobs:
macos-gnupg-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install gawk tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap untrust github-actions-v0.2
spack bootstrap untrust github-actions
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -13,16 +13,10 @@ on:
paths:
- '.github/workflows/build-containers.yml'
- 'share/spack/docker/*'
- 'share/templates/container/*'
- 'lib/spack/spack/container/*'
# Let's also build & tag Spack containers on releases.
release:
types: [published]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
deploy-images:
runs-on: ubuntu-latest
@@ -35,22 +29,15 @@ jobs:
# A matrix of Dockerfile paths, associated tags, and which architectures
# they support.
matrix:
# Meaning of the various items in the matrix list
# 0: Container name (e.g. ubuntu-bionic)
# 1: Platforms to build for
# 2: Base image (e.g. ubuntu:18.04)
dockerfile: [[amazon-linux, 'linux/amd64,linux/arm64', 'amazonlinux:2'],
[centos7, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:7'],
[centos-stream, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:stream'],
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
[ubuntu-bionic, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:18.04'],
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04']]
dockerfile: [[amazon-linux, amazonlinux-2.dockerfile, 'linux/amd64,linux/arm64'],
[centos7, centos-7.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
[leap15, leap-15.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
[ubuntu-xenial, ubuntu-1604.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
[ubuntu-bionic, ubuntu-1804.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -65,54 +52,40 @@ jobs:
versioned="${{matrix.dockerfile[0]}}:${GITHUB_REF##*/}"
echo "versioned=${versioned}" >> $GITHUB_ENV
- name: Generate the Dockerfile
env:
SPACK_YAML_OS: "${{ matrix.dockerfile[2] }}"
- name: Check ${{ matrix.dockerfile[1] }} Exists
run: |
.github/workflows/generate_spack_yaml_containerize.sh
. share/spack/setup-env.sh
mkdir -p dockerfiles/${{ matrix.dockerfile[0] }}
spack containerize --last-stage=bootstrap | tee dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile
printf "Preparing to build ${{ env.container }} from dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile"
if [ ! -f "dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile" ]; then
printf "dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile does not exist"
printf "Preparing to build ${{ env.container }} from ${{ matrix.dockerfile[1] }}"
if [ ! -f "share/spack/docker/${{ matrix.dockerfile[1]}}" ]; then
printf "Dockerfile ${{ matrix.dockerfile[0]}} does not exist"
exit 1;
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # @v1
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@c84f38281176d4c9cdb1626ffafcd6b3911b5d94 # @v2
- name: Build & Deploy ${{ matrix.dockerfile[1] }}
uses: docker/build-push-action@7f9d37fa544684fb73bfe4835ed7214c255ce02b # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}
file: share/spack/docker/${{matrix.dockerfile[1]}}
platforms: ${{ matrix.dockerfile[2] }}
push: ${{ github.event_name != 'pull_request' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: |
spack/${{ env.container }}
spack/${{ env.versioned }}

View File

@@ -1,7 +0,0 @@
$ proc = Start-Process ${{ env.spack_installer }}\spack.exe "/install /quiet" -Passthru
$handle = $proc.Handle # cache proc.Handle
$proc.WaitForExit();
if ($proc.ExitCode -ne 0) {
Write-Warning "$_ exited with status code $($proc.ExitCode)"
}

View File

@@ -1,9 +0,0 @@
#!/bin/bash
(echo "spack:" \
&& echo " specs: []" \
&& echo " container:" \
&& echo " format: docker" \
&& echo " images:" \
&& echo " os: \"${SPACK_YAML_OS}\"" \
&& echo " spack:" \
&& echo " ref: ${GITHUB_REF}") > spack.yaml

64
.github/workflows/macos_python.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
# These are nightly package tests for macOS
# focus areas:
# - initial user experience
# - scientific python stack
name: macOS builds nightly
on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
pull_request:
branches:
- develop
paths:
# Run if we modify this yaml file
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
jobs:
install_gcc:
name: gcc with clang
runs-on: macos-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
# 9.2.0 is the latest version on which we apply homebrew patch
spack install -v --fail-fast gcc@11.2.0 %apple-clang
install_jupyter_clang:
name: jupyter
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-jupyterlab %apple-clang
install_scipy_clang:
name: scipy, mpl, pd
runs-on: macos-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-scipy %apple-clang
spack install -v --fail-fast py-matplotlib %apple-clang
spack install -v --fail-fast py-pandas %apple-clang

View File

@@ -1,12 +0,0 @@
# (c) 2021 Lawrence Livermore National Laboratory
Set-Location spack
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git config --global core.longpaths true
if ($(git branch --show-current) -ne "develop")
{
git branch develop origin/develop
}

View File

@@ -1,4 +0,0 @@
param ($systemFolder, $shortcut)
$start = [System.Environment]::GetFolderPath("$systemFolder")
Invoke-Item "$start\Programs\Spack\$shortcut"

View File

@@ -9,39 +9,34 @@ on:
branches:
- develop
- releases/**
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: '3.10'
python-version: 3.9
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv var/spack/repos
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: '3.10'
python-version: 3.9
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools types-six
@@ -62,7 +57,7 @@ jobs:
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
@@ -101,18 +96,20 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]
concretizer: ['clingo']
include:
- python-version: 2.7
concretizer: original
- python-version: 3.6
concretizer: original
- python-version: 3.9
concretizer: original
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -128,7 +125,7 @@ jobs:
# ensure style checks are not skipped in unit tests for python >= 3.6
# note that true/false (i.e., 1/0) are opposite in conditions in python and bash
if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click==8.0.4" "black<=21.12b0"
pip install --upgrade flake8 isort>=4.3.5 mypy>=0.900 black
fi
- name: Pin pathlib for Python 2.7
if: ${{ matrix.python-version == 2.7 }}
@@ -165,7 +162,7 @@ jobs:
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,${{ matrix.concretizer }}
@@ -174,12 +171,12 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: '3.10'
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
@@ -203,7 +200,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: shelltests,linux
@@ -221,7 +218,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -240,12 +237,12 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: '3.10'
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
@@ -277,7 +274,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,clingo
@@ -289,10 +286,10 @@ jobs:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -323,7 +320,7 @@ jobs:
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
files: ./coverage.xml
@@ -334,10 +331,10 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@0ebf233433c08fb9061af664d501c3f3ff0e9e20 # @v2
with:
python-version: '3.10'
python-version: 3.9
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
@@ -348,12 +345,12 @@ jobs:
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
- name: Package audits (wwithout coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,193 +0,0 @@
name: windows tests
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
defaults:
run:
shell:
powershell Invoke-Expression -Command ".\share\spack\qa\windows_test_setup.ps1"; {0}
jobs:
validate:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python Packages
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.6-' -v spack/lib/spack/spack/ spack/lib/spack/llnl/ spack/bin/
- name: vermin (Repositories)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.6-' -v spack/var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six setuptools flake8 "isort>=4.3.5" "mypy>=0.800" "click==8.0.4" "black<=21.12b0" pywin32 types-python-dateutil
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Run style tests
run: |
spack style
- name: Verify license headers
run: |
python spack\bin\spack license verify
unittest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
spack unit-test --verbose --ignore=lib/spack/spack/test/cmd
unittest-cmd:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Command Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
spack unit-test lib/spack/spack/test/cmd --verbose
buildtest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Build Test
run: |
spack compiler find
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
spack external find cmake
spack external find ninja
spack install abseil-cpp
generate-installer-test:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- name: Disable Windows Symlinks
run: |
git config --global core.symlinks false
shell:
powershell
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Add Light and Candle to Path
run: |
$env:WIX >> $GITHUB_PATH
- name: Run Installer
run: |
.\spack\share\spack\qa\setup_spack.ps1
spack make-installer -s spack -g SILENT pkg
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env:
ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi
execute-installer:
needs: generate-installer-test
runs-on: windows-latest
defaults:
run:
shell: pwsh
steps:
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Setup installer directory
run: |
mkdir -p spack_installer
echo "spack_installer=$((pwd).Path)\spack_installer" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
- uses: actions/download-artifact@v3
with:
name: Windows Spack Installer Bundle
path: ${{ env.spack_installer }}
- name: Execute Bundled Installer
run: |
$proc = Start-Process ${{ env.spack_installer }}\spack.exe "/install /quiet" -Passthru
$handle = $proc.Handle # cache proc.Handle
$proc.WaitForExit();
$LASTEXITCODE
env:
ProgressPreference: SilentlyContinue
- uses: actions/download-artifact@v3
with:
name: Windows Spack Installer
path: ${{ env.spack_installer }}
- name: Execute MSI
run: |
$proc = Start-Process ${{ env.spack_installer }}\spack.msi "/quiet" -Passthru
$handle = $proc.Handle # cache proc.Handle
$proc.WaitForExit();
$LASTEXITCODE

View File

@@ -1,248 +1,3 @@
# v0.18.1 (2022-07-19)
### Spack Bugfixes
* Fix several bugs related to bootstrapping (#30834,#31042,#31180)
* Fix a regression that was causing spec hashes to differ between
Python 2 and Python 3 (#31092)
* Fixed compiler flags for oneAPI and DPC++ (#30856)
* Fixed several issues related to concretization (#31142,#31153,#31170,#31226)
* Improved support for Cray manifest file and `spack external find` (#31144,#31201,#31173,#31186)
* Assign a version to openSUSE Tumbleweed according to the GLIBC version
in the system (#19895)
* Improved Dockerfile generation for `spack containerize` (#29741,#31321)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
### Package updates
* WarpX: add v22.06, fixed libs property (#30866,#31102)
* openPMD: add v0.14.5, update recipe for @develop (#29484,#31023)
# v0.18.0 (2022-05-28)
`v0.18.0` is a major feature release.
## Major features in this release
1. **Concretizer now reuses by default**
`spack install --reuse` was introduced in `v0.17.0`, and `--reuse`
is now the default concretization mode. Spack will try hard to
resolve dependencies using installed packages or binaries (#30396).
To avoid reuse and to use the latest package configurations, (the
old default), you can use `spack install --fresh`, or add
configuration like this to your environment or `concretizer.yaml`:
```yaml
concretizer:
reuse: false
```
2. **Finer-grained hashes**
Spack hashes now include `link`, `run`, *and* `build` dependencies,
as well as a canonical hash of package recipes. Previously, hashes
only included `link` and `run` dependencies (though `build`
dependencies were stored by environments). We coarsened the hash to
reduce churn in user installations, but the new default concretizer
behavior mitigates this concern and gets us reuse *and* provenance.
You will be able to see the build dependencies of new installations
with `spack find`. Old installations will not change and their
hashes will not be affected. (#28156, #28504, #30717, #30861)
3. **Improved error messages**
Error handling with the new concretizer is now done with
optimization criteria rather than with unsatisfiable cores, and
Spack reports many more details about conflicting constraints.
(#30669)
4. **Unify environments when possible**
Environments have thus far supported `concretization: together` or
`concretization: separately`. These have been replaced by a new
preference in `concretizer.yaml`:
```yaml
concretizer:
unify: [true|false|when_possible]
```
`concretizer:unify:when_possible` will *try* to resolve a fully
unified environment, but if it cannot, it will create multiple
configurations of some packages where it has to. For large
environments that previously had to be concretized separately, this
can result in a huge speedup (40-50x). (#28941)
5. **Automatically find externals on Cray machines**
Spack can now automatically discover installed packages in the Cray
Programming Environment by running `spack external find` (or `spack
external read-cray-manifest` to *only* query the PE). Packages from
the PE (e.g., `cray-mpich` are added to the database with full
dependency information, and compilers from the PE are added to
`compilers.yaml`. Available with the June 2022 release of the Cray
Programming Environment. (#24894, #30428)
6. **New binary format and hardened signing**
Spack now has an updated binary format, with improvements for
security. The new format has a detached signature file, and Spack
verifies the signature before untarring or decompressing the binary
package. The previous format embedded the signature in a `tar`
file, which required the client to run `tar` *before* verifying
(#30750). Spack can still install from build caches using the old
format, but we encourage users to switch to the new format going
forward.
Production GitLab pipelines have been hardened to securely sign
binaries. There is now a separate signing stage so that signing
keys are never exposed to build system code, and signing keys are
ephemeral and only live as long as the signing pipeline stage.
(#30753)
7. **Bootstrap mirror generation**
The `spack bootstrap mirror` command can automatically create a
mirror for bootstrapping the concretizer and other needed
dependencies in an air-gapped environment. (#28556)
8. **Nascent Windows support**
Spack now has initial support for Windows. Spack core has been
refactored to run in the Windows environment, and a small number of
packages can now build for Windows. More details are
[in the documentation](https://spack.rtfd.io/en/latest/getting_started.html#spack-on-windows)
(#27021, #28385, many more)
9. **Makefile generation**
`spack env depfile` can be used to generate a `Makefile` from an
environment, which can be used to build packages the environment
in parallel on a single node. e.g.:
```console
spack -e myenv env depfile > Makefile
make
```
Spack propagates `gmake` jobserver information to builds so that
their jobs can share cores. (#30039, #30254, #30302, #30526)
10. **New variant features**
In addition to being conditional themselves, variants can now have
[conditional *values*](https://spack.readthedocs.io/en/latest/packaging_guide.html#conditional-possible-values)
that are only possible for certain configurations of a package. (#29530)
Variants can be
[declared "sticky"](https://spack.readthedocs.io/en/latest/packaging_guide.html#sticky-variants),
which prevents them from being enabled or disabled by the
concretizer. Sticky variants must be set explicitly by users
on the command line or in `packages.yaml`. (#28630)
* Allow conditional possible values in variants
* Add a "sticky" property to variants
## Other new features of note
* Environment views can optionally link only `run` dependencies
with `link:run` (#29336)
* `spack external find --all` finds library-only packages in
addition to build dependencies (#28005)
* Customizable `config:license_dir` option (#30135)
* `spack external find --path PATH` takes a custom search path (#30479)
* `spack spec` has a new `--format` argument like `spack find` (#27908)
* `spack concretize --quiet` skips printing concretized specs (#30272)
* `spack info` now has cleaner output and displays test info (#22097)
* Package-level submodule option for git commit versions (#30085, #30037)
* Using `/hash` syntax to refer to concrete specs in an environment
now works even if `/hash` is not installed. (#30276)
## Major internal refactors
* full hash (see above)
* new develop versioning scheme `0.19.0-dev0`
* Allow for multiple dependencies/dependents from the same package (#28673)
* Splice differing virtual packages (#27919)
## Performance Improvements
* Concretization of large environments with `unify: when_possible` is
much faster than concretizing separately (#28941, see above)
* Single-pass view generation algorithm is 2.6x faster (#29443)
## Archspec improvements
* `oneapi` and `dpcpp` flag support (#30783)
* better support for `M1` and `a64fx` (#30683)
## Removals and Deprecations
* Spack no longer supports Python `2.6` (#27256)
* Removed deprecated `--run-tests` option of `spack install`;
use `spack test` (#30461)
* Removed deprecated `spack flake8`; use `spack style` (#27290)
* Deprecate `spack:concretization` config option; use
`concretizer:unify` (#30038)
* Deprecate top-level module configuration; use module sets (#28659)
* `spack activate` and `spack deactivate` are deprecated in favor of
environments; will be removed in `0.19.0` (#29430; see also `link:run`
in #29336 above)
## Notable Bugfixes
* Fix bug that broke locks with many parallel builds (#27846)
* Many bugfixes and consistency improvements for the new concretizer
and `--reuse` (#30357, #30092, #29835, #29933, #28605, #29694, #28848)
## Packages
* `CMakePackage` uses `CMAKE_INSTALL_RPATH_USE_LINK_PATH` (#29703)
* Refactored `lua` support: `lua-lang` virtual supports both
`lua` and `luajit` via new `LuaPackage` build system(#28854)
* PythonPackage: now installs packages with `pip` (#27798)
* Python: improve site_packages_dir handling (#28346)
* Extends: support spec, not just package name (#27754)
* `find_libraries`: search for both .so and .dylib on macOS (#28924)
* Use stable URLs and `?full_index=1` for all github patches (#29239)
## Spack community stats
* 6,416 total packages, 458 new since `v0.17.0`
* 219 new Python packages
* 60 new R packages
* 377 people contributed to this release
* 337 committers to packages
* 85 committers to core
# v0.17.3 (2022-07-14)
### Spack bugfixes
* Fix missing chgrp on symlinks in package installations (#30743)
* Allow having non-existing upstreams (#30744, #30746)
* Fix `spack stage` with custom paths (#30448)
* Fix failing call for `spack buildcache save-specfile` (#30637)
* Fix globbing in compiler wrapper (#30699)
# v0.17.2 (2022-04-13)
### Spack bugfixes
* Fix --reuse with upstreams set in an environment (#29680)
* config add: fix parsing of validator error to infer type from oneOf (#29475)
* Fix spack -C command_line_scope used in conjunction with other flags (#28418)
* Use Spec.constrain to construct spec lists for stacks (#28783)
* Fix bug occurring when searching for inherited patches in packages (#29574)
* Fixed a few bugs when manipulating symlinks (#28318, #29515, #29636)
* Fixed a few minor bugs affecting command prompt, terminal title and argument completion (#28279, #28278, #28939, #29405, #29070, #29402)
* Fixed a few bugs affecting the spack ci command (#29518, #29419)
* Fix handling of Intel compiler environment (#29439)
* Fix a few edge cases when reindexing the DB (#28764)
* Remove "Known issues" from documentation (#29664)
* Other miscellaneous bugfixes (0b72e070583fc5bcd016f5adc8a84c99f2b7805f, #28403, #29261)
# v0.17.1 (2021-12-23)
### Spack Bugfixes

View File

@@ -2,10 +2,10 @@
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
Spack is a multi-platform package manager that builds and installs

View File

@@ -1,18 +0,0 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import subprocess
import sys
def getpywin():
try:
import win32con # noqa: F401
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "--upgrade", "pip"])
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "pywin32"])
if __name__ == "__main__":
getpywin()

View File

@@ -77,23 +77,21 @@ if "ruamel" in sys.modules:
try:
import argparse
except ImportError:
argparse_pyc = os.path.join(spack_external_libs, "argparse.pyc")
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
if not os.path.exists(argparse_pyc):
raise
try:
os.remove(argparse_pyc)
import argparse # noqa: F401
import argparse # noqa
except Exception:
msg = (
"The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. "
"Either delete it manually or ask some administrator to "
"delete it for you."
)
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
'Either delete it manually or ask some administrator to '
'delete it for you.')
print(msg.format(argparse_pyc))
sys.exit(1)
import spack.main # noqa: E402
import spack.main # noqa
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":

View File

@@ -1,223 +0,0 @@
:: Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
:: Spack Project Developers. See the top-level COPYRIGHT file for details.
::
:: SPDX-License-Identifier: (Apache-2.0 OR MIT)
::#######################################################################
::
:: This file is part of Spack and sets up the spack environment for batch,
:: This includes environment modules and lmod support,
:: and it also puts spack in your path. The script also checks that at least
:: module support exists, and provides suggestions if it doesn't. Source
:: it like this:
::
:: . /path/to/spack/install/spack_cmd.bat
::
@echo off
set spack=%SPACK_ROOT%\bin\spack
::#######################################################################
:: This is a wrapper around the spack command that forwards calls to
:: 'spack load' and 'spack unload' to shell functions. This in turn
:: allows them to be used to invoke environment modules functions.
::
:: 'spack load' is smarter than just 'load' because it converts its
:: arguments into a unique Spack spec that is then passed to module
:: commands. This allows the user to use packages without knowing all
:: their installation details.
::
:: e.g., rather than requiring a full spec for libelf, the user can type:
::
:: spack load libelf
::
:: This will first find the available libelf module file and use a
:: matching one. If there are two versions of libelf, the user would
:: need to be more specific, e.g.:
::
:: spack load libelf@0.8.13
::
:: This is very similar to how regular spack commands work and it
:: avoids the need to come up with a user-friendly naming scheme for
:: spack module files.
::#######################################################################
:_sp_shell_wrapper
set "_sp_flags="
set "_sp_args="
set "_sp_subcommand="
setlocal enabledelayedexpansion
:: commands have the form '[flags] [subcommand] [args]'
:: flags will always start with '-', e.g. --help or -V
:: subcommands will never start with '-'
:: everything after the subcommand is an arg
for %%x in (%*) do (
set t="%%~x"
if "!t:~0,1!" == "-" (
if defined _sp_subcommand (
:: We already have a subcommand, processing args now
set "_sp_args=!_sp_args! !t!"
) else (
set "_sp_flags=!_sp_flags! !t!"
shift
)
) else if not defined _sp_subcommand (
set "_sp_subcommand=!t!"
shift
) else (
set "_sp_args=!_sp_args! !t!"
shift
)
)
:: --help, -h and -V flags don't require further output parsing.
:: If we encounter, execute and exit
if defined _sp_flags (
if NOT "%_sp_flags%"=="%_sp_flags:-h=%" (
python "%spack%" %_sp_flags%
exit /B 0
) else if NOT "%_sp_flags%"=="%_sp_flags:--help=%" (
python "%spack%" %_sp_flags%
exit /B 0
) else if NOT "%_sp_flags%"=="%_sp_flags:-V=%" (
python "%spack%" %_sp_flags%
exit /B 0
)
)
:: pass parsed variables outside of local scope. Need to do
:: this because delayedexpansion can only be set by setlocal
echo %_sp_flags%>flags
echo %_sp_args%>args
echo %_sp_subcommand%>subcmd
endlocal
set /p _sp_subcommand=<subcmd
set /p _sp_flags=<flags
set /p _sp_args=<args
set str_subcommand=%_sp_subcommand:"='%
set str_flags=%_sp_flags:"='%
set str_args=%_sp_args:"='%
if "%str_subcommand%"=="ECHO is off." (set "_sp_subcommand=")
if "%str_flags%"=="ECHO is off." (set "_sp_flags=")
if "%str_args%"=="ECHO is off." (set "_sp_args=")
del subcmd
del flags
del args
:: Filter out some commands. For any others, just run the command.
if "%_sp_subcommand%" == "cd" (
goto :case_cd
) else if "%_sp_subcommand%" == "env" (
goto :case_env
) else if "%_sp_subcommand%" == "load" (
goto :case_load
) else if "%_sp_subcommand%" == "unload" (
goto :case_load
) else (
goto :default_case
)
::#######################################################################
:case_cd
:: Check for --help or -h
:: TODO: This is not exactly the same as setup-env.
:: In setup-env, '--help' or '-h' must follow the cd
:: Here, they may be anywhere in the args
if defined _sp_args (
if NOT "%_sp_args%"=="%_sp_args:--help=%" (
python "%spack%" cd -h
goto :end_switch
) else if NOT "%_sp_args%"=="%_sp_args:-h=%" (
python "%spack%" cd -h
goto :end_switch
)
)
for /F "tokens=* USEBACKQ" %%F in (
`python "%spack%" location %_sp_args%`) do (
set "LOC=%%F"
)
for %%Z in ("%LOC%") do if EXIST %%~sZ\NUL (cd /d "%LOC%")
goto :end_switch
:case_env
:: If no args or args contain --bat or -h/--help: just execute.
if NOT defined _sp_args (
goto :default_case
)else if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args: -h=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:deactivate=%" (
for /f "tokens=* USEBACKQ" %%I in (
`call python "%spack%" %_sp_flags% env deactivate --bat %_sp_args:deactivate=%`
) do %%I
) else if NOT "%_sp_args%"=="%_sp_args:activate=%" (
for /f "tokens=* USEBACKQ" %%I in (
`call python "%spack%" %_sp_flags% env activate --bat %_sp_args:activate=%`
) do %%I
) else (
goto :default_case
)
goto :end_switch
:case_load
:: If args contain --sh, --csh, or -h/--help: just execute.
if defined _sp_args (
if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args: -h=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case
)
)
for /f "tokens=* USEBACKQ" %%I in (
`python "%spack%" %_sp_flags% %_sp_subcommand% --bat %_sp_args%`) do %%I
)
goto :end_switch
:case_unload
goto :case_load
:default_case
python "%spack%" %_sp_flags% %_sp_subcommand% %_sp_args%
goto :end_switch
:end_switch
exit /B %ERRORLEVEL%
::########################################################################
:: Prepends directories to path, if they exist.
:: pathadd /path/to/dir # add to PATH
:: or pathadd OTHERPATH /path/to/dir # add to OTHERPATH
::########################################################################
:_spack_pathadd
set "_pa_varname=PATH"
set "_pa_new_path=%~1"
if NOT "%~2" == "" (
set "_pa_varname=%~1"
set "_pa_new_path=%~2"
)
set "_pa_oldvalue=%_pa_varname%"
for %%Z in ("%_pa_new_path%") do if EXIST %%~sZ\NUL (
if defined %_pa_oldvalue% (
set "_pa_varname=%_pa_new_path%:%_pa_oldvalue%"
) else (
set "_pa_varname=%_pa_new_path%"
)
)
exit /b 0
:: set module system roots
:_sp_multi_pathadd
for %%I in (%~2) do (
for %%Z in (%_sp_compatible_sys_types%) do (
:pathadd "%~1" "%%I\%%Z"
)
)
exit /B %ERRORLEVEL%

View File

@@ -1,72 +0,0 @@
@ECHO OFF
setlocal EnableDelayedExpansion
:: (c) 2021 Lawrence Livermore National Laboratory
:: To use this file independently of Spack's installer, execute this script in its directory, or add the
:: associated bin directory to your PATH. Invoke to launch Spack Shell.
::
:: source_dir/spack/bin/spack_cmd.bat
::
pushd %~dp0..
set SPACK_ROOT=%CD%
pushd %CD%\..
set spackinstdir=%CD%
popd
:: Check if Python is on the PATH
if not defined python_pf_ver (
(for /f "delims=" %%F in ('where python.exe') do (
set "python_pf_ver=%%F"
goto :found_python
) ) 2> NUL
)
:found_python
if not defined python_pf_ver (
:: If not, look for Python from the Spack installer
:get_builtin
(for /f "tokens=*" %%g in ('dir /b /a:d "!spackinstdir!\Python*"') do (
set "python_ver=%%g")) 2> NUL
if not defined python_ver (
echo Python was not found on your system.
echo Please install Python or add Python to your PATH.
) else (
set "py_path=!spackinstdir!\!python_ver!"
set "py_exe=!py_path!\python.exe"
)
goto :exitpoint
) else (
:: Python is already on the path
set "py_exe=!python_pf_ver!"
(for /F "tokens=* USEBACKQ" %%F in (
`"!py_exe!" --version`) do (set "output=%%F")) 2>NUL
if not "!output:Microsoft Store=!"=="!output!" goto :get_builtin
goto :exitpoint
)
:exitpoint
set "PATH=%SPACK_ROOT%\bin\;%PATH%"
if defined py_path (
set "PATH=%py_path%;%PATH%"
)
if defined py_exe (
"%py_exe%" "%SPACK_ROOT%\bin\haspywin.py"
"%py_exe%" "%SPACK_ROOT%\bin\spack" external find python >NUL
)
set "EDITOR=notepad"
DOSKEY spacktivate=spack env activate $*
@echo **********************************************************************
@echo ** Spack Package Manager
@echo **********************************************************************
IF "%1"=="" GOTO CONTINUE
set
GOTO:EOF
:continue
set PROMPT=[spack] %PROMPT%
%comspec% /k

View File

@@ -1,10 +0,0 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
$Env:SPACK_PS1_PATH="$PSScriptRoot\..\share\spack\setup-env.ps1"
& (Get-Process -Id $pid).Path -NoExit {
. $Env:SPACK_PS1_PATH ;
Push-Location $ENV:SPACK_ROOT
}

View File

@@ -6,17 +6,27 @@ bootstrap:
# by Spack is installed in a "store" subfolder of this root directory
root: $user_cache_path/bootstrap
# Methods that can be used to bootstrap software. Each method may or
# may not be able to bootstrap all the software that Spack needs,
# may not be able to bootstrap all of the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.2
- name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
- name: 'github-actions'
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
# This method is just Spack bootstrapping the software it needs from sources.
# It has been added here so that users can selectively disable bootstrapping
# from sources by "untrusting" it.
- name: spack-install
type: install
description: |
Specs built from sources by Spack. May take a long time.
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow
github-actions-v0.2: true
github-actions: true
spack-install: true

View File

@@ -14,23 +14,4 @@ concretizer:
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: true
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets
# considered.
targets:
# Determine whether we want to target specific or generic microarchitectures.
# An example of the first kind might be for instance "skylake" or "bulldozer",
# while generic microarchitectures are for instance "aarch64" or "x86_64_v4".
granularity: microarchitectures
# If "false" allow targets that are incompatible with the current host (for
# instance concretize with target "icelake" while running on "haswell").
# If "true" only allow targets that are compatible with the host.
host_compatible: true
# When "true" concretize root specs of environments together, so that each unique
# package in an environment corresponds to one concrete spec. This ensures
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: false
reuse: false

View File

@@ -33,9 +33,6 @@ config:
template_dirs:
- $spack/share/spack/templates
# Directory where licenses should be located
license_dir: $spack/etc/spack/licenses
# Temporary locations Spack can try to use for builds.
#
# Recommended options are given below.

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules: {}
modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules: {}
modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -35,10 +35,13 @@ modules:
# These are configurations for the module set named "default"
default:
# These values are defaulted in the code. They are not defaulted here so
# that we can enable backwards compatibility with the old syntax more
# easily (old value is in the config yaml, config:module_roots)
# Where to install modules
roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
# roots:
# tcl: $spack/share/spack/modules
# lmod: $spack/share/spack/lmod
# What type of modules to use
enable:
- tcl

View File

@@ -25,19 +25,17 @@ packages:
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fuse: [libfuse]
gl: [glx, osmesa]
gl: [mesa+opengl, mesa18, opengl]
glu: [mesa-glu, openglu]
glx: [mesa+glx, mesa18+glx, opengl]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libglx: [mesa+glx, mesa18+glx]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit]
libllvm: [llvm, llvm-amdgpu]
lua-lang: [lua, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb]
mkl: [intel-mkl]
mpe: [mpe2]
@@ -45,6 +43,7 @@ packages:
mysql-client: [mysql, mariadb-c-client]
opencl: [pocl]
onedal: [intel-oneapi-dal]
osmesa: [mesa+osmesa, mesa18+osmesa]
pbs: [openpbs, torque]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]

View File

@@ -1,5 +0,0 @@
config:
locks: false
concretizer: original
build_stage::
- '$spack/.staging'

View File

@@ -192,32 +192,32 @@ you can use them to customize an installation in :ref:`sec-specs`.
Reusing installed dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack install``, Spack tries hard to reuse existing installations
as dependencies, either from a local store or from remote buildcaches if configured.
This minimizes unwanted rebuilds of common dependencies, in particular if
you update Spack frequently.
.. warning::
In case you want the latest versions and configurations to be installed instead,
you can add the ``--fresh`` option:
The ``--reuse`` option described here will become the default installation
method in the next Spack version, and you will be able to get the current
behavior by using ``spack install --fresh``.
By default, when you run ``spack install``, Spack tries to build a new
version of the package you asked for, along with updated versions of
its dependencies. This gets you the latest versions and configurations,
but it can result in unwanted rebuilds if you update Spack frequently.
If you want Spack to try hard to reuse existing installations as dependencies,
you can add the ``--reuse`` option:
.. code-block:: console
$ spack install --fresh mpich
$ spack install --reuse mpich
Reusing installations in this mode is "accidental", and happening only if
there's a match between existing installations and what Spack would have installed
anyhow.
You can use the ``spack spec -I mpich`` command to see what
This will not do anything if ``mpich`` is already installed. If ``mpich``
is not installed, but dependencies like ``hwloc`` and ``libfabric`` are,
the ``mpich`` will be build with the installed versions, if possible.
You can use the :ref:`spack spec -I <cmd-spack-spec>` command to see what
will be reused and what will be built before you install.
You can configure Spack to use the ``--fresh`` behavior by default in
``concretizer.yaml``:
.. code-block:: yaml
concretizer:
reuse: false
You can configure Spack to use the ``--reuse`` behavior by default in
``concretizer.yaml``.
.. _cmd-spack-uninstall:
@@ -896,8 +896,8 @@ your path:
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH``
and ``MANPATH`` according to the
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
:ref:`prefix inspections <customize-env-modifications>` defined in your
modules configuration.
When you no longer want to use a package, you can type unload or
@@ -1093,8 +1093,6 @@ could depend on ``mpich@1.2:`` if it can only build with version
Below are more details about the specifiers that you can add to specs.
.. _version-specifier:
^^^^^^^^^^^^^^^^^
Version specifier
^^^^^^^^^^^^^^^^^
@@ -1110,37 +1108,6 @@ set of arbitrary versions, such as ``@1.0,1.5,1.7`` (``1.0``, ``1.5``,
or ``1.7``). When you supply such a specifier to ``spack install``,
it constrains the set of versions that Spack will install.
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
reference provided. Acceptable syntaxes for this are:
.. code-block:: sh
# branches and tags
foo@git.develop # use the develop branch
foo@git.0.19 # use the 0.19 tag
# commit hashes
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234
Spack versions from git reference either have an associated version supplied by the user,
or infer a relationship to known versions from the structure of the git repository. If an
associated version is supplied by the user, Spack treats the git version as equivalent to that
version for all version comparisons in the package logic (e.g. ``depends_on('foo', when='@1.5')``).
The associated version can be assigned with ``[git ref]=[version]`` syntax, with the caveat that the specified version is known to Spack from either the package definition, or in the configuration preferences (i.e. ``packages.yaml``).
.. code-block:: sh
foo@git.my_ref=3.2 # use the my_ref tag or branch, but treat it as version 3.2 for version comparisons
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop # use the given commit, but treat it as develop for version comparisons
If an associated version is not supplied then the tags in the git repo are used to determine
the most recent previous version known to Spack. Details about how versions are compared
and how Spack determines if one version is less than another are discussed in the developer guide.
If the version spec is not provided, then Spack will choose one
according to policies set for the particular spack installation. If
the spec is ambiguous, i.e. it could match multiple versions, Spack
@@ -1756,8 +1723,8 @@ Activating Extensions in a View
Another way to use extensions is to create a view, which merges the
python installation along with the extensions into a single prefix.
See :ref:`configuring_environment_views` for a more in-depth description
of views.
See :ref:`filesystem-views` for a more in-depth description of views and
:ref:`cmd-spack-view` for usage of the ``spack view`` command.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions Globally

View File

@@ -50,13 +50,6 @@ build cache files for the "ninja" spec:
Note that the targeted spec must already be installed. Once you have a build cache,
you can add it as a mirror, discussed next.
.. warning::
Spack improved the format used for binary caches in v0.18. The entire v0.18 series
will be able to verify and install binary caches both in the new and in the old format.
Support for using the old format is expected to end in v0.19, so we advise users to
recreate relevant buildcaches using Spack v0.18 or higher.
---------------------------------------
Finding or installing build cache files
---------------------------------------

View File

@@ -1,160 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _bootstrapping:
=============
Bootstrapping
=============
In the :ref:`Getting started <getting_started>` Section we already mentioned that
Spack can bootstrap some of its dependencies, including ``clingo``. In fact, there
is an entire command dedicated to the management of every aspect of bootstrapping:
.. command-output:: spack bootstrap --help
The first thing to know to understand bootstrapping in Spack is that each of
Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console
% spack bootstrap status
Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs
[FAIL] Binary packages
[B] MISSING "gpg2": required to sign/verify buildcaches
Spack will take care of bootstrapping any missing dependency marked as [B]. Dependencies marked as [-] are instead required to be found on the system.
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. Running a command that concretize a spec, like:
.. code-block:: console
% spack solve zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.1/build_cache/darwin-catalina-x86_64/apple-clang-12.0.0/clingo-bootstrap-spack/darwin-catalina-x86_64-apple-clang-12.0.0-clingo-bootstrap-spack-p5on7i4hejl775ezndzfdkhvwra3hatn.spack
==> Installing "clingo-bootstrap@spack%apple-clang@12.0.0~docs~ipo+python build_type=Release arch=darwin-catalina-x86_64" from a buildcache
[ ... ]
triggers the bootstrapping of clingo from pre-built binaries as expected.
-----------------------
The Bootstrapping store
-----------------------
The software installed for bootstrapping purposes is deployed in a separate store.
Its location can be checked with the following command:
.. code-block:: console
% spack bootstrap root
It can also be changed with the same command by just specifying the newly desired path:
.. code-block:: console
% spack bootstrap root /opt/spack/bootstrap
You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
clingo-bootstrap@spack libassuan@2.5.5 libgpg-error@1.42 libksba@1.5.1 pinentry@1.1.1 zlib@1.2.11
gnupg@2.3.1 libgcrypt@1.9.3 libiconv@1.16 npth@1.6 python@3.8
In case it is needed you can remove all the software in the current bootstrapping store with:
.. code-block:: console
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
--------------------------------------------
Enabling and disabling bootstrapping methods
--------------------------------------------
Bootstrapping is always performed by trying the methods listed by:
.. command-output:: spack bootstrap list
in the order they appear, from top to bottom. By default Spack is
configured to try first bootstrapping from pre-built binaries and to
fall-back to bootstrapping from sources if that failed.
If need be, you can disable bootstrapping altogether by running:
.. code-block:: console
% spack bootstrap disable
in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by *untrusting*
them. For instance:
.. code-block:: console
% spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console
% spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults:
.. code-block:: console
% spack bootstrap reset
==> Bootstrapping configuration is being reset to Spack's defaults. Current configuration will be lost.
Do you want to continue? [Y/n]
%
----------------------------------------
Creating a mirror for air-gapped systems
----------------------------------------
Spack's default configuration for bootstrapping relies on the user having
access to the internet, either to fetch pre-compiled binaries or source tarballs.
Sometimes though Spack is deployed on air-gapped systems where such access is denied.
To help with similar situations Spack has a command that recreates, in a local folder
of choice, a mirror containing the source tarballs and/or binary packages needed for
bootstrapping.
.. code-block:: console
% spack bootstrap mirror --binary-packages /opt/bootstrap
==> Adding "clingo-bootstrap@spack+python %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "gnupg@2.3: %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "patchelf@0.13.1:0.13.99 %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding binary packages from "https://github.com/alalazo/spack-bootstrap-mirrors/releases/download/v0.1-rc.2/bootstrap-buildcache.tar.gz" to the mirror at /opt/bootstrap/local-mirror
To register the mirror on the platform where it's supposed to be used run the following command(s):
% spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources
% spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -5,9 +5,9 @@
.. _build-settings:
================================
Package Settings (packages.yaml)
================================
===================
Build Customization
===================
Spack allows you to customize how your software is built through the
``packages.yaml`` file. Using it, you can make Spack prefer particular
@@ -219,65 +219,33 @@ Concretizer options
but you can also use ``concretizer.yaml`` to customize aspects of the
algorithm it uses to select the dependencies you install:
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
:language: yaml
.. _code-block: yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Reuse already installed packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
concretizer:
# Whether to consider installed packages or packages from buildcaches when
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: false
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
^^^^^^^^^^^^^^^^
``reuse``
^^^^^^^^^^^^^^^^
This controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
``package.py`` files and ``packages.yaml`` (``false``). .
.. code-block:: console
You can use ``spack install --reuse`` to enable reuse for a single installation,
and you can use ``spack install --fresh`` to do a fresh install if ``reuse`` is
enabled by default.
% spack install --reuse <spec>
.. note::
to enable reuse for a single installation, and you can use:
``reuse: false`` is the current default, but ``reuse: true`` will be the default
in the next Spack release. You will still be able to use ``spack install --fresh``
to get the old behavior.
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: true`` is the default.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Selection of the target microarchitectures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The options under the ``targets`` attribute control which targets are considered during a solve.
Currently the options in this section are only configurable from the ``concretization.yaml`` file
and there are no corresponding command line arguments to enable them for a single solve.
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
If set to:
.. code-block:: yaml
concretizer:
targets:
granularity: microarchitectures
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
compatibility. If instead the option is set to:
.. code-block:: yaml
concretizer:
targets:
granularity: generic
Spack will consider only generic microarchitectures. For instance, when running on an
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
``x86_64_v3`` as the best target in the latter case.
The ``host_compatible`` option is a Boolean option that determines whether or not the
microarchitectures considered during the solve are constrained to be compatible with the
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-preferences:
@@ -339,72 +307,6 @@ concretization rules. A provider lists a value that packages may
``depend_on`` (e.g, MPI) and a list of rules for fulfilling that
dependency.
.. _package-requirements:
--------------------
Package Requirements
--------------------
You can use the configuration to force the concretizer to choose
specific properties for packages when building them. Like preferences,
these are only applied when the package is required by some other
request (e.g. if the package is needed as a dependency of a
request to ``spack install``).
An example of where this is useful is if you have a package that
is normally built as a dependency but only under certain circumstances
(e.g. only when a variant on a dependent is active): you can make
sure that it always builds the way you want it to; this distinguishes
package configuration requirements from constraints that you add to
``spack install`` or to environments (in those cases, the associated
packages are always built).
The following is an example of how to enforce package properties in
``packages.yaml``:
.. code-block:: yaml
packages:
libfabric:
require: "@1.13.2"
openmpi:
require:
- any_of: ["~cuda", "gcc"]
mpich:
require:
- one_of: ["+cuda", "+rocm"]
Requirements are expressed using Spec syntax (the same as what is provided
to ``spack install``). In the simplest case, you can specify attributes
that you always want the package to have by providing a single spec to
``require``; in the above example, ``libfabric`` will always build
with version 1.13.2.
You can provide a more-relaxed constraint and allow the concretizer to
choose between a set of options using ``any_of`` or ``one_of``:
* ``any_of`` is a list of specs. One of those specs must be satisfied
and it is also allowed for the concretized spec to match more than one.
In the above example, that means you could build ``openmpi+cuda%gcc``,
``openmpi~cuda%clang`` or ``openmpi~cuda%gcc`` (in the last case,
note that both specs in the ``any_of`` for ``openmpi`` are
satisfied).
* ``one_of`` is also a list of specs, and the final concretized spec
must match exactly one of them. In the above example, that means
you could build ``mpich+cuda`` or ``mpich+rocm`` but not
``mpich+cuda+rocm`` (note the current package definition for
``mpich`` already includes a conflict, so this is redundant but
still demonstrates the concept).
Other notes about ``requires``:
* You can only specify requirements for specific packages: you cannot
add ``requires`` under ``all``.
* You cannot specify requirements for virtual packages (e.g. you can
specify requirements for ``openmpi`` but not ``mpi``).
* For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
.. _package_permissions:

View File

@@ -39,7 +39,6 @@ on these ideas for each distinct build system that Spack supports:
build_systems/autotoolspackage
build_systems/cmakepackage
build_systems/cachedcmakepackage
build_systems/mesonpackage
build_systems/qmakepackage
build_systems/sippackage
@@ -48,12 +47,10 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Language-specific
build_systems/luapackage
build_systems/octavepackage
build_systems/perlpackage
build_systems/pythonpackage
build_systems/rpackage
build_systems/racketpackage
build_systems/rubypackage
.. toctree::
@@ -62,12 +59,11 @@ on these ideas for each distinct build system that Spack supports:
build_systems/bundlepackage
build_systems/cudapackage
build_systems/custompackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/multiplepackage
build_systems/rocmpackage
build_systems/sourceforgepackage
build_systems/custompackage
build_systems/multiplepackage
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
provide a list of build systems and methods/attributes that can be

View File

@@ -433,7 +433,7 @@ For example:
.. code-block:: python
variant('profiler', when='@2.0:')
config_args += self.with_or_without('profiler')
config_args += self.with_or_without('profiler)
will neither add ``--with-profiler`` nor ``--without-profiler`` when the version is
below ``2.0``.

View File

@@ -1,123 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _cachedcmakepackage:
------------------
CachedCMakePackage
------------------
The CachedCMakePackage base class is used for CMake-based workflows
that create a CMake cache file prior to running ``cmake``. This is
useful for packages with arguments longer than the system limit, and
for reproducibility.
The documentation for this class assumes that the user is familiar with
the ``CMakePackage`` class from which it inherits. See the documentation
for :ref:`CMakePackage <cmakepackage>`.
^^^^^^
Phases
^^^^^^
The ``CachedCMakePackage`` base class comes with the following phases:
#. ``initconfig`` - generate the CMake cache file
#. ``cmake`` - generate the Makefile
#. ``build`` - build the package
#. ``install`` - install the package
By default, these phases run:
.. code-block:: console
$ mkdir spack-build
$ cd spack-build
$ cat << EOF > name-arch-compiler@version.cmake
# Write information on compilers and dependencies
# includes information on mpi and cuda if applicable
$ cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/installation/prefix -C name-arch-compiler@version.cmake
$ make
$ make test # optional
$ make install
The ``CachedCMakePackage`` class inherits from the ``CMakePackage``
class, and accepts all of the same options and adds all of the same
flags to the ``cmake`` command. Similar to the ``CMakePAckage`` class,
you may need to add a few arguments yourself, and the
``CachedCMakePackage`` provides the same interface to add those
flags.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding entries to the CMake cache
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to adding flags to the ``cmake`` command, you may need to
add entries to the CMake cache in the ``initconfig`` phase. This can
be done by overriding one of four methods:
#. ``CachedCMakePackage.initconfig_compiler_entries``
#. ``CachedCMakePackage.initconfig_mpi_entries``
#. ``CachedCMakePackage.initconfig_hardware_entries``
#. ``CachedCMakePackage.initconfig_package_entries``
Each of these methods returns a list of CMake cache strings. The
distinction between these methods is merely to provide a
well-structured and legible cmake cache file -- otherwise, entries
from each of these methods are handled identically.
Spack also provides convenience methods for generating CMake cache
entries. These methods are available at module scope in every Spack
package. Because CMake parses boolean options, strings, and paths
differently, there are three such methods:
#. ``cmake_cache_option``
#. ``cmake_cache_string``
#. ``cmake_cache_path``
These methods each accept three parameters -- the name of the CMake
variable associated with the entry, the value of the entry, and an
optional comment -- and return strings in the appropriate format to be
returned from any of the ``initconfig*`` methods. Additionally, these
methods may return comments beginning with the ``#`` character.
A typical usage of these methods may look something like this:
.. code-block:: python
def initconfig_mpi_entries(self)
# Get existing MPI configurations
entries = super(self, Foo).initconfig_mpi_entries()
# The existing MPI configurations key on whether ``mpi`` is in the spec
# This spec has an MPI variant, and we need to enable MPI when it is on.
# This hypothetical package controls MPI with the ``FOO_MPI`` option to
# cmake.
if '+mpi' in self.spec:
entries.append(cmake_cache_option('FOO_MPI', True, "enable mpi"))
else:
entries.append(cmake_cache_option('FOO_MPI', False, "disable mpi"))
def initconfig_package_entries(self):
# Package specific options
entries = []
entries.append('#Entries for build options')
bar_on = '+bar' in self.spec
entries.append(cmake_cache_option('FOO_BAR', bar_on, 'toggle bar'))
entries.append('#Entries for dependencies')
if self.spec['blas'].name == 'baz': # baz is our blas provider
entries.append(cmake_cache_string('FOO_BLAS', 'baz', 'Use baz'))
entries.append(cmake_cache_path('BAZ_PREFIX', self.spec['baz'].prefix))
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on CMake cache files, see:
https://cmake.org/cmake/help/latest/manual/cmake.1.html

View File

@@ -159,85 +159,6 @@ and CMake simply ignores the empty command line argument. For example the follow
will generate ``'cmake' '-DEXAMPLE=ON' ...`` when `@2.0: +example` is met, but will
result in ``'cmake' '' ...`` when the spec version is below ``2.0``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
CMake arguments provided by Spack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following default arguments are controlled by Spack:
``CMAKE_INSTALL_PREFIX``
------------------------
Is set to the the package's install directory.
``CMAKE_PREFIX_PATH``
---------------------
CMake finds dependencies through calls to ``find_package()``, ``find_program()``,
``find_library()``, ``find_file()``, and ``find_path()``, which use a list of search
paths from ``CMAKE_PREFIX_PATH``. Spack sets this variable to a list of prefixes of the
spec's transitive dependencies.
For troubleshooting cases where CMake fails to find a dependency, add the
``--debug-find`` flag to ``cmake_args``.
``CMAKE_BUILD_TYPE``
--------------------
Every CMake-based package accepts a ``-DCMAKE_BUILD_TYPE`` flag to
dictate which level of optimization to use. In order to ensure
uniformity across packages, the ``CMakePackage`` base class adds
a variant to control this:
.. code-block:: python
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
However, not every CMake package accepts all four of these options.
Grep the ``CMakeLists.txt`` file to see if the default values are
missing or replaced. For example, the
`dealii <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/dealii/package.py>`_
package overrides the default variant with:
.. code-block:: python
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
For more information on ``CMAKE_BUILD_TYPE``, see:
https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
``CMAKE_INSTALL_RPATH`` and ``CMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``
--------------------------------------------------------------------
CMake uses different RPATHs during the build and after installation, so that executables
can locate the libraries they're linked to during the build, and installed executables
do not have RPATHs to build directories. In Spack, we have to make sure that RPATHs are
set properly after installation.
Spack sets ``CMAKE_INSTALL_RPATH`` to a list of ``<prefix>/lib`` or ``<prefix>/lib64``
directories of the spec's link-type dependencies. Apart from that, it sets
``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, which should add RPATHs for directories of
linked libraries not in the directories covered by ``CMAKE_INSTALL_RPATH``.
Usually it's enough to set only ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, but the
reason to provide both options is that packages may dynamically open shared libraries,
which CMake cannot detect. In those cases, the RPATHs from ``CMAKE_INSTALL_RPATH`` are
used as search paths.
.. note::
Some packages provide stub libraries, which contain an interface for linking without
an implementation. When using such libraries, it's best to override the option
``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=OFF`` in ``cmake_args``, so that stub libraries
are not used at runtime.
^^^^^^^^^^
Generators
@@ -275,6 +196,36 @@ generators, but it should be simple to add support for alternative
generators. For more information on CMake generators, see:
https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
^^^^^^^^^^^^^^^^
CMAKE_BUILD_TYPE
^^^^^^^^^^^^^^^^
Every CMake-based package accepts a ``-DCMAKE_BUILD_TYPE`` flag to
dictate which level of optimization to use. In order to ensure
uniformity across packages, the ``CMakePackage`` base class adds
a variant to control this:
.. code-block:: python
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
However, not every CMake package accepts all four of these options.
Grep the ``CMakeLists.txt`` file to see if the default values are
missing or replaced. For example, the
`dealii <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/dealii/package.py>`_
package overrides the default variant with:
.. code-block:: python
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
For more information on ``CMAKE_BUILD_TYPE``, see:
https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
CMakeLists.txt in a sub-directory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -84,8 +84,8 @@ build ``hdf5`` with Intel oneAPI MPI do::
spack install hdf5 +mpi ^intel-oneapi-mpi
Using Externally Installed oneAPI Tools
=======================================
Using an Externally Installed oneAPI
====================================
Spack can also use oneAPI tools that are manually installed with
`Intel Installers`_. The procedures for configuring Spack to use
@@ -110,7 +110,7 @@ Another option is to manually add the configuration to
Libraries
---------
If you want Spack to use oneMKL that you have installed without Spack in
If you want Spack to use MKL that you have installed without Spack in
the default location, then add the following to
``~/.spack/packages.yaml``, adjusting the version as appropriate::
@@ -139,7 +139,7 @@ You can also use Spack-installed libraries. For example::
spack load intel-oneapi-mkl
Will update your environment CPATH, LIBRARY_PATH, and other
environment variables for building an application with oneMKL.
environment variables for building an application with MKL.
More information
================

View File

@@ -15,9 +15,6 @@ IntelPackage
Intel packages in Spack
^^^^^^^^^^^^^^^^^^^^^^^^
This is an earlier version of Intel software development tools and has
now been replaced by Intel oneAPI Toolkits.
Spack can install and use several software development products offered by Intel.
Some of these are available under no-cost terms, others require a paid license.
All share the same basic steps for configuration, installation, and, where

View File

@@ -1,105 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _luapackage:
------------
LuaPackage
------------
LuaPackage is a helper for the common case of Lua packages that provide
a rockspec file. This is not meant to take a rock archive, but to build
a source archive or repository that provides a rockspec, which should cover
most lua packages. In the case a Lua package builds by Make rather than
luarocks, prefer MakefilePackage.
^^^^^^
Phases
^^^^^^
The ``LuaPackage`` base class comes with the following phases:
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
#. ``preprocess`` - adjust sources or rockspec to fix build
#. ``install`` - install the project
By default, these phases run:
.. code-block:: console
# If the archive is a source rock
$ luarocks unpack <archive>.src.rock
$ # preprocess is a noop by default
$ luarocks make <name>.rockspec
Any of these phases can be overridden in your package as necessary.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Packages that use the Lua/LuaRocks build system can be identified by the
presence of a ``*.rockspec`` file in their sourcetree, or can be fetched as
a source rock archive (``.src.rock``). This file declares things like build
instructions and dependencies, the ``.src.rock`` also contains all code.
It is common for the rockspec file to list the lua version required in
a dependency. The LuaPackage class adds appropriate dependencies on a Lua
implementation, but it is a good idea to specify the version required with
a ``depends_on`` statement. The block normally will be a table definition like
this:
.. code-block:: lua
dependencies = {
"lua >= 5.1",
}
The LuaPackage class supports source repositories and archives containing
a rockspec and directly downloading source rock files. It *does not* support
downloading dependencies listed inside a rockspec, and thus does not support
directly downloading a rockspec as an archive.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
All base dependencies are added by the build system, but LuaRocks is run to
avoid downloading extra Lua dependencies during build. If the package needs
Lua libraries outside the standard set, they should be added as dependencies.
To specify a Lua version constraint but allow all lua implementations, prefer
to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible
version. If the package requires LuaJit rather than Lua,
a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is
used instead of the Lua interpreter. Alternately, if only interpreted Lua will
work ``depends_on("lua")`` will express that.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to luarocks make
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you need to pass any arguments to the ``luarocks make`` call, you can
override the ``luarocks_args`` method like so:
.. code-block:: python
def luarocks_args(self):
return ['flag1', 'flag2']
One common use of this is to override warnings or flags for newer compilers, as in:
.. code-block:: python
def luarocks_args(self):
return ["CFLAGS='-Wno-error=implicit-function-declaration'"]
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the LuaRocks build system, see:
https://luarocks.org/

View File

@@ -48,10 +48,8 @@ important to understand.
**build backend**
Libraries used to define how to build a wheel. Examples
include `setuptools <https://setuptools.pypa.io/>`__,
`flit <https://flit.pypa.io/>`_,
`poetry <https://python-poetry.org/>`_,
`hatchling <https://hatch.pypa.io/latest/>`_, and
`meson <https://meson-python.readthedocs.io/>`_.
`flit <https://flit.readthedocs.io/>`_, and
`poetry <https://python-poetry.org/>`_.
^^^^^^^^^^^
Downloading
@@ -175,9 +173,9 @@ package. The "Project description" tab may also contain a longer
description of the package. Either of these can be used to populate
the package docstring.
^^^^^^^^^^^^
Dependencies
^^^^^^^^^^^^
^^^^^^^^^^^^^
Build backend
^^^^^^^^^^^^^
Once you've determined the basic metadata for a package, the next
step is to determine the build backend. ``PythonPackage`` uses
@@ -215,33 +213,12 @@ Note that ``py-wheel`` is already listed as a build dependency in the
need to specify a specific version requirement or change the
dependency type.
See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`__ and
See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`_ and
`PEP 518 <https://www.python.org/dev/peps/pep-0518/>`_ for more
information on the design of ``pyproject.toml``.
Depending on which build backend a project uses, there are various
places that run-time dependencies can be listed. Most modern build
backends support listing dependencies directly in ``pyproject.toml``.
Look for dependencies under the following keys:
* ``requires-python`` under ``[project]``
This specifies the version of Python that is required
* ``dependencies`` under ``[project]``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``[project.optional-dependencies]``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be ``False``
by default.
Some build backends may have additional locations where dependencies
can be found.
places that run-time dependencies can be listed.
"""""""""
distutils
@@ -267,9 +244,9 @@ If the ``pyproject.toml`` lists ``setuptools.build_meta`` as a
``build-backend``, or if the package has a ``setup.py`` that imports
``setuptools``, or if the package has a ``setup.cfg`` file, then it
uses setuptools to build. Setuptools is a replacement for the
distutils library, and has almost the exact same API. In addition to
``pyproject.toml``, dependencies can be listed in the ``setup.py`` or
``setup.cfg`` file. Look for the following arguments:
distutils library, and has almost the exact same API. Dependencies
can be listed in the ``setup.py`` or ``setup.cfg`` file. Look for the
following arguments:
* ``python_requires``
@@ -314,22 +291,25 @@ listed directly in the ``pyproject.toml`` file. Older versions of
flit used to store this info in a ``flit.ini`` file, so check for
this too.
In addition to the default ``pyproject.toml`` keys listed above,
older versions of flit may use the following keys:
Either of these files may contain keys like:
* ``requires`` under ``[tool.flit.metadata]``
* ``requires-python``
This specifies the version of Python that is required
* ``dependencies`` or ``requires``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``[tool.flit.metadata.requires-extra]``
* ``project.optional-dependencies`` or ``requires-extra``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be False
by default.
See https://flit.pypa.io/en/latest/pyproject_toml.html for
See https://flit.readthedocs.io/en/latest/pyproject_toml.html for
more information.
""""""
@@ -346,28 +326,6 @@ for specifying the version requirements. Note that ``~=`` works
differently in poetry than in setuptools and flit for versions that
start with a zero.
"""""""""
hatchling
"""""""""
If the ``pyproject.toml`` lists ``hatchling.build`` as the
``build-backend``, it uses the hatchling build system. Hatchling
uses the default ``pyproject.toml`` keys to list dependencies.
See https://hatch.pypa.io/latest/config/dependency/ for more
information.
"""""
meson
"""""
If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``,
it uses the meson build system. Meson uses the default
``pyproject.toml`` keys to list dependencies.
See https://meson-python.readthedocs.io/en/latest/usage/start.html
for more information.
""""""
wheels
""""""
@@ -412,34 +370,6 @@ packages. However, the installation instructions for a package may
suggest passing certain flags to the ``setup.py`` call. The
``PythonPackage`` class has two techniques for doing this.
"""""""""""""""
Config settings
"""""""""""""""
These settings are passed to
`PEP 517 <https://peps.python.org/pep-0517/>`__ build backends.
For example, ``py-scipy`` package allows you to specify the name of
the BLAS/LAPACK library you want pkg-config to search for:
.. code-block:: python
depends_on('py-pip@22.1:', type='build')
def config_settings(self, spec, prefix):
return {
'blas': spec['blas'].libs.names[0],
'lapack': spec['lapack'].libs.names[0],
}
.. note::
This flag only works for packages that define a ``build-backend``
in ``pyproject.toml``. Also, it is only supported by pip 22.1+,
which requires Python 3.7+. For packages that still support Python
3.6 and older, ``install_options`` should be used instead.
""""""""""""""
Global options
""""""""""""""
@@ -459,16 +389,6 @@ has an optional dependency on ``libyaml`` that can be enabled like so:
return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
"""""""""""""""
Install options
"""""""""""""""
@@ -489,16 +409,6 @@ allows you to specify the directories to search for ``libyaml``:
return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
^^^^^^^
Testing
^^^^^^^
@@ -735,7 +645,8 @@ are not yet in Spack, and Spack contains many Python packages that are
not yet in Anaconda. The main advantage of Spack over Anaconda is its
ability to choose a specific compiler and BLAS/LAPACK or MPI library.
Spack also has better platform support for supercomputers, and can build
optimized binaries for your specific microarchitecture.
optimized binaries for your specific microarchitecture. On the other hand,
Anaconda offers Windows support.
^^^^^^^^^^^^^^^^^^^^^^
External documentation
@@ -754,7 +665,5 @@ For more information on build and installation frontend tools, see:
For more information on build backend tools, see:
* setuptools: https://setuptools.pypa.io/
* flit: https://flit.pypa.io/
* flit: https://flit.readthedocs.io/
* poetry: https://python-poetry.org/
* hatchling: https://hatch.pypa.io/latest/
* meson: https://meson-python.readthedocs.io/

View File

@@ -1,46 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _racketpackage:
-------------
RacketPackage
-------------
Much like Python, Racket packages and modules have their own special build system.
To learn more about the specifics of Racket package system, please refer to the
`Racket Docs <https://docs.racket-lang.org/pkg/cmdline.html>`_.
^^^^^^
Phases
^^^^^^
The ``RacketPackage`` base class provides an ``install`` phase that
can be overridden, corresponding to the use of:
.. code-block:: console
$ raco pkg install
^^^^^^^
Caveats
^^^^^^^
In principle, ``raco`` supports a second, ``setup`` phase; however, we have not
implemented this separately, as in normal circumstances, ``install`` also handles
running ``setup`` automatically.
Unlike Python, Racket currently on supports two installation scopes for packages, user
or system, and keeps a registry of installed packages at each scope in its configuration files.
This means we can't simply compose a "``RACKET_PATH``" environment variable listing all of the
places packages are installed, and update this at will.
Unfortunately this means that all currently installed packages which extend Racket via ``raco pkg install``
are accessible whenever Racket is accessible.
Additionally, because Spack does not implement uninstall hooks, uninstalling a Spack ``rkt-`` package
will have no effect on the ``raco`` installed packages visible to your Racket installation.
Instead, you must manually run ``raco pkg remove`` to keep the two package managers in a mutually
consistent state.

View File

@@ -95,7 +95,7 @@ class of your package. For example, you can add it to your
# Set up the hip macros needed by the build
args.extend([
'-DENABLE_HIP=ON',
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix)])
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
rocm_archs = spec.variants['amdgpu_target'].value
if 'none' not in rocm_archs:
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'

View File

@@ -1,55 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _sourceforgepackage:
------------------
SourceforgePackage
------------------
``SourceforgePackage`` is a
`mixin-class <https://en.wikipedia.org/wiki/Mixin>`_. It automatically
sets the URL based on a list of Sourceforge mirrors listed in
`sourceforge_mirror_path`, which defaults to a half dozen known mirrors.
Refer to the package source
(`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/sourceforge.py>`__) for the current list of mirrors used by Spack.
^^^^^^^
Methods
^^^^^^^
This package provides a method for populating mirror URLs.
**urls**
This method returns a list of possible URLs for package source.
It is decorated with `property` so its results are treated as
a package attribute.
Refer to
`<https://spack.readthedocs.io/en/latest/packaging_guide.html#mirrors-of-the-main-url>`__
for information on how Spack uses the `urls` attribute during
fetching.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package and defining the relative location of an archive
file for one version of your software.
.. code-block:: python
:emphasize-lines: 1,3
class MyPackage(AutotoolsPackage, SourceforgePackage):
...
sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz"
...
Over 40 packages are using ``SourceforcePackage`` this mix-in as of
July 2022 so there are multiple packages to choose from if you want
to see a real example.

View File

@@ -23,48 +23,43 @@
import sys
from glob import glob
from docutils.statemachine import StringList
from sphinx.domains.python import PythonDomain
from sphinx.ext.apidoc import main as sphinx_apidoc
from sphinx.parsers import RSTParser
# -- Spack customizations -----------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external"))
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/pytest-fallback"))
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external'))
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external/pytest-fallback'))
if sys.version_info[0] < 3:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib"))
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib'))
else:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib3"))
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib3'))
sys.path.append(os.path.abspath("_spack_root/lib/spack/"))
sys.path.append(os.path.abspath('_spack_root/lib/spack/'))
# Add the Spack bin directory to the path so that we can use its output in docs.
os.environ["SPACK_ROOT"] = os.path.abspath("_spack_root")
os.environ["PATH"] += "%s%s" % (os.pathsep, os.path.abspath("_spack_root/bin"))
os.environ['SPACK_ROOT'] = os.path.abspath('_spack_root')
os.environ['PATH'] += "%s%s" % (os.pathsep, os.path.abspath('_spack_root/bin'))
# Set an environment variable so that colify will print output like it would to
# a terminal.
os.environ["COLIFY_SIZE"] = "25x120"
os.environ["COLUMNS"] = "120"
os.environ['COLIFY_SIZE'] = '25x120'
os.environ['COLUMNS'] = '120'
# Generate full package list if needed
subprocess.call(["spack", "list", "--format=html", "--update=package_list.html"])
subprocess.call([
'spack', 'list', '--format=html', '--update=package_list.html'])
# Generate a command index if an update is needed
subprocess.call(
[
"spack",
"commands",
"--format=rst",
"--header=command_index.in",
"--update=command_index.rst",
]
+ glob("*rst")
)
subprocess.call([
'spack', 'commands',
'--format=rst',
'--header=command_index.in',
'--update=command_index.rst'] + glob('*rst'))
#
# Run sphinx-apidoc
@@ -74,12 +69,12 @@
# Without this, the API Docs will never actually update
#
apidoc_args = [
"--force", # Overwrite existing files
"--no-toc", # Don't create a table of contents file
"--output-dir=.", # Directory to place all output
'--force', # Overwrite existing files
'--no-toc', # Don't create a table of contents file
'--output-dir=.', # Directory to place all output
]
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/spack"])
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/llnl"])
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/spack'])
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/llnl'])
# Enable todo items
todo_include_todos = True
@@ -87,77 +82,60 @@
#
# Disable duplicate cross-reference warnings.
#
from sphinx.domains.python import PythonDomain
class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node:
del node["refspecific"]
if 'refspecific' in node:
del node['refspecific']
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode
)
#
# Disable tabs to space expansion in code blocks
# since Makefiles require tabs.
#
class NoTabExpansionRSTParser(RSTParser):
def parse(self, inputstring, document):
if isinstance(inputstring, str):
lines = inputstring.splitlines()
inputstring = StringList(lines, document.current_source)
super().parse(inputstring, document)
env, fromdocname, builder, typ, target, node, contnode)
def setup(sphinx):
sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.4"
needs_sphinx = '3.4'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinxcontrib.programoutput",
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
# Set default graphviz options
graphviz_dot_args = [
"-Grankdir=LR",
"-Gbgcolor=transparent",
"-Nshape=box",
"-Nfontname=monaco",
"-Nfontsize=10",
]
'-Grankdir=LR', '-Gbgcolor=transparent',
'-Nshape=box', '-Nfontname=monaco', '-Nfontsize=10']
# Get nice vector graphics
graphviz_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ".rst"
source_suffix = '.rst'
# The encoding of source files.
source_encoding = "utf-8-sig"
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
master_doc = 'index'
# General information about the project.
project = u"Spack"
copyright = u"2013-2021, Lawrence Livermore National Laboratory."
project = u'Spack'
copyright = u'2013-2021, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -166,16 +144,16 @@ def setup(sphinx):
# The short X.Y version.
import spack
version = ".".join(str(s) for s in spack.spack_version_info[:2])
version = '.'.join(str(s) for s in spack.spack_version_info[:2])
# The full version, including alpha/beta/rc tags.
release = spack.spack_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
#language = None
# Places to look for .po/.mo files for doc translations
# locale_dirs = []
#locale_dirs = []
# Sphinx gettext settings
gettext_compact = True
@@ -183,42 +161,40 @@ def setup(sphinx):
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_spack_root", ".spack-env"]
exclude_patterns = ['_build', '_spack_root', '.spack-env']
nitpicky = True
nitpick_ignore = [
# Python classes that intersphinx is unable to resolve
("py:class", "argparse.HelpFormatter"),
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
('py:class', 'argparse.HelpFormatter'),
('py:class', 'contextlib.contextmanager'),
('py:class', 'module'),
('py:class', '_io.BufferedReader'),
('py:class', 'unittest.case.TestCase'),
('py:class', '_frozen_importlib_external.SourceFileLoader'),
# Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"),
('py:class', 'spack.provider_index._IndexBase'),
]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications
@@ -229,151 +205,156 @@ def setup(sphinx):
class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8"
background_color = "#f4f4f8"
styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9"
import pkg_resources
dist = pkg_resources.Distribution(__file__)
sys.path.append(".") # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse("spack = conf:SpackStyle", dist=dist)
dist._ep_map = {"pygments.styles": {"plugin1": ep}}
sys.path.append('.') # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
dist._ep_map = {'pygments.styles': {'plugin1': ep}}
pkg_resources.working_set.add(dist)
pygments_style = "spack"
pygments_style = 'spack'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"logo_only": True}
html_theme_options = { 'logo_only' : True }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_spack_root/share/spack/logo/spack-logo-white-text.svg"
html_logo = '_spack_root/share/spack/logo/spack-logo-white-text.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_spack_root/share/spack/logo/favicon.ico"
html_favicon = '_spack_root/share/spack/logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
#html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
#html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = False
#html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Spackdoc"
htmlhelp_basename = 'Spackdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Spack.tex", u"Spack Documentation", u"Todd Gamblin", "manual"),
('index', 'Spack.tex', u'Spack Documentation',
u'Todd Gamblin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
#latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
#latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "spack", u"Spack Documentation", [u"Todd Gamblin"], 1)]
man_pages = [
('index', 'spack', u'Spack Documentation',
[u'Todd Gamblin'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@@ -382,25 +363,19 @@ class SpackStyle(DefaultStyle):
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Spack",
u"Spack Documentation",
u"Todd Gamblin",
"Spack",
"One line description of project.",
"Miscellaneous",
),
('index', 'Spack', u'Spack Documentation',
u'Todd Gamblin', 'Spack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
#texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
#texinfo_show_urls = 'footnote'
# -- Extension configuration -------------------------------------------------

View File

@@ -5,9 +5,9 @@
.. _config-yaml:
============================
Spack Settings (config.yaml)
============================
==============
Basic Settings
==============
Spack's basic configuration options are set in ``config.yaml``. You can
see the default settings by looking at
@@ -72,6 +72,21 @@ used to configure module names.
packages have been installed will prevent Spack from being
able to find the old installation directories.
--------------------
``module_roots``
--------------------
Controls where Spack installs generated module files. You can customize
the location for each type of module. e.g.:
.. code-block:: yaml
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
See :ref:`modules` for details.
--------------------
``build_stage``
--------------------

View File

@@ -37,6 +37,8 @@ Here is an example ``config.yaml`` file:
config:
install_tree: $spack/opt/spack
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
@@ -251,6 +253,8 @@ your configurations look like this:
config:
install_tree: $spack/opt/spack
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
@@ -274,6 +278,8 @@ command:
$ spack config get config
config:
install_tree: /some/other/directory
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
@@ -339,11 +345,13 @@ higher-precedence scope is *prepended* to the defaults. ``spack config
get config`` shows the result:
.. code-block:: console
:emphasize-lines: 5-8
:emphasize-lines: 7-10
$ spack config get config
config:
install_tree: /some/other/directory
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- /lustre-scratch/$user/spack
- ~/mystage
@@ -367,11 +375,13 @@ user config looked like this:
The merged configuration would look like this:
.. code-block:: console
:emphasize-lines: 5-6
:emphasize-lines: 7-8
$ spack config get config
config:
install_tree: /some/other/directory
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- /lustre-scratch/$user/spack
- ~/mystage
@@ -492,6 +502,9 @@ account all scopes. For example, to see the fully merged
template_dirs:
- $spack/templates
directory_layout: {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
@@ -539,6 +552,9 @@ down the problem:
/home/myuser/spack/etc/spack/defaults/config.yaml:23 template_dirs:
/home/myuser/spack/etc/spack/defaults/config.yaml:24 - $spack/templates
/home/myuser/spack/etc/spack/defaults/config.yaml:28 directory_layout: {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}
/home/myuser/spack/etc/spack/defaults/config.yaml:32 module_roots:
/home/myuser/spack/etc/spack/defaults/config.yaml:33 tcl: $spack/share/spack/modules
/home/myuser/spack/etc/spack/defaults/config.yaml:34 lmod: $spack/share/spack/lmod
/home/myuser/spack/etc/spack/defaults/config.yaml:49 build_stage:
/home/myuser/spack/etc/spack/defaults/config.yaml:50 - $tempdir/$user/spack-stage
/home/myuser/spack/etc/spack/defaults/config.yaml:51 - ~/.spack/stage

View File

@@ -59,8 +59,7 @@ other techniques to minimize the size of the final image:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -109,10 +108,9 @@ Spack Images on Docker Hub
--------------------------
Docker images with Spack preinstalled and ready to be used are
built when a release is tagged, or nightly on ``develop``. The images
are then pushed both to `Docker Hub <https://hub.docker.com/u/spack>`_
and to `GitHub Container Registry <https://github.com/orgs/spack/packages?repo_name=spack>`_.
The OS that are currently supported are summarized in the table below:
built on `Docker Hub <https://hub.docker.com/u/spack>`_
at every push to ``develop`` or to a release branch. The OS that
are currently supported are summarized in the table below:
.. _containers-supported-os:
@@ -122,31 +120,22 @@ The OS that are currently supported are summarized in the table below:
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 16.04
- ``ubuntu:16.04``
- ``spack/ubuntu-xenial``
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04
- ``ubuntu:20.04``
- ``spack/ubuntu-focal``
* - Ubuntu 22.04
- ``ubuntu:22.04``
- ``spack/ubuntu-jammy``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
* - CentOS Stream
- ``quay.io/centos/centos:stream``
- ``spack/centos-stream``
* - openSUSE Leap
- ``opensuse/leap``
- ``spack/leap15``
* - Amazon Linux 2
- ``amazonlinux:2``
- ``spack/amazon-linux``
All the images are tagged with the corresponding release of Spack:
.. image:: images/ghcr_spack.png
.. image:: dockerhub_spack.png
with the exception of the ``latest`` tag that points to the HEAD
of the ``develop`` branch. These images are available for anyone
@@ -256,8 +245,7 @@ software is respectively built and installed:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -378,8 +366,7 @@ produces, for instance, the following ``Dockerfile``:
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml

View File

@@ -71,7 +71,7 @@ locally to speed up the review process.
new release that is causing problems. If this is the case, please file an issue.
We currently test against Python 2.7 and 3.6-3.10 on both macOS and Linux and
We currently test against Python 2.7 and 3.5-3.9 on both macOS and Linux and
perform 3 types of tests:
.. _cmd-spack-unit-test:

View File

@@ -107,6 +107,7 @@ with a high level view of Spack's directory structure:
llnl/ <- some general-use libraries
spack/ <- spack module; contains Python code
analyzers/ <- modules to run analysis on installed packages
build_systems/ <- modules for different build systems
cmd/ <- each file in here is a spack subcommand
compilers/ <- compiler description files
@@ -150,7 +151,7 @@ Package-related modules
^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.package`
Contains the :class:`~spack.package_base.Package` class, which
Contains the :class:`~spack.package.Package` class, which
is the superclass for all packages in Spack. Methods on ``Package``
implement all phases of the :ref:`package lifecycle
<package-lifecycle>` and manage the build process.
@@ -241,6 +242,22 @@ Unit tests
Implements Spack's test suite. Add a module and put its name in
the test suite in ``__init__.py`` to add more unit tests.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Research and Monitoring Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.monitor`
Contains :class:`~spack.monitor.SpackMonitorClient`. This is accessed from
the ``spack install`` and ``spack analyze`` commands to send build and
package metadata up to a `Spack Monitor
<https://github.com/spack/spack-monitor>`_ server.
:mod:`spack.analyzers`
A module folder with a :class:`~spack.analyzers.analyzer_base.AnalyzerBase`
that provides base functions to run, save, and (optionally) upload analysis
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
^^^^^^^^^^^^^
Other Modules
@@ -284,6 +301,240 @@ Most spack commands look something like this:
The information in Package files is used at all stages in this
process.
Conceptually, packages are overloaded. They contain:
-------------
Stage objects
-------------
.. _writing-analyzers:
-----------------
Writing analyzers
-----------------
To write an analyzer, you should add a new python file to the
analyzers module directory at ``lib/spack/spack/analyzers`` .
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
to add an analyzer class ``Myanalyzer`` you would write to
``spack/analyzers/myanalyzer.py`` and import and
use the base as follows:
.. code-block:: python
from .analyzer_base import AnalyzerBase
class Myanalyzer(AnalyzerBase):
Note that the class name is your module file name, all lowercase
except for the first capital letter. You can look at other analyzers in
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
^^^^^^^^^^^^^^^^^^^^^^^^^
Analyzer Output Directory
^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack analyze run`` an analyzer output directory will
be created in your spack user directory in your ``$HOME``. The reason we output here
is because the install directory might not always be writable.
.. code-block:: console
~/.spack/
analyzers
Result files will be written here, organized in subfolders in the same structure
as the package, with each analyzer owning it's own subfolder. for example:
.. code-block:: console
$ tree ~/.spack/analyzers/
/home/spackuser/.spack/analyzers/
└── linux-ubuntu20.04-skylake
└── gcc-9.3.0
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
├── environment_variables
│   └── spack-analyzer-environment-variables.json
├── install_files
│   └── spack-analyzer-install-files.json
└── libabigail
└── lib
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
Notice that for the libabigail analyzer, since results are generated per object,
we honor the object's folder in case there are equivalently named files in
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
^^^^^^^^^^^^^^^^^
Analyzer Metadata
^^^^^^^^^^^^^^^^^
Your analyzer is required to have the class attributes ``name``, ``outfile``,
and ``description``. These are printed to the user with they use the subcommand
``spack analyze list-analyzers``. Here is an example.
As we mentioned above, note that this analyzer would live in a module named
``libabigail.py`` in the analyzers folder so that the class can be discovered.
.. code-block:: python
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
This means that the name and output file should be unique for your analyzer.
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
that the user wants to run all analyzers.
.. _analyzer_run_function:
^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer run Function
^^^^^^^^^^^^^^^^^^^^^^^^
The core of an analyzer is its ``run()`` function, which should accept no
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
and it's up to the run function to generate whatever analysis data you need,
and then return the object with a key as the analyzer name. The result data
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
and one of ``value`` or ``binary_value``. The install file should be for a relative
path, and not the absolute path. For example, let's say we extract a metric called
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
We might have data that looks like this:
.. code-block:: python
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
.. code-block:: python
return {self.name: result}
This will save the complete result to the analyzer metadata folder, as described
previously. If you want support for adding a different kind of metadata (e.g.,
not associated with an install file) then the monitor server would need to be updated
to support this first.
^^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer init Function
^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't need any extra dependencies or checks, you can skip defining an analyzer
init function, as the base class will handle it. Typically, it will accept
a spec, and an optional output directory (if the user does not want the default
metadata folder for analyzer results). The analyzer init function should call
it's parent init, and then do any extra checks or validation that are required to
work. For example:
.. code-block:: python
def __init__(self, spec, dirname=None):
super(Myanalyzer, self).__init__(spec, dirname)
# install extra dependencies, do extra preparation and checks here
At the end of the init, you will have available to you:
- **self.spec**: the spec object
- **self.dirname**: an optional directory name the user as provided at init to save
- **self.output_dir**: the analyzer metadata directory, where we save by default
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
And can proceed to write your analyzer.
^^^^^^^^^^^^^^^^^^^^^^^
Saving Analyzer Results
^^^^^^^^^^^^^^^^^^^^^^^
The analyzer will have ``save_result`` called, with the result object generated
to save it to the filesystem, and if the user has added the ``--monitor`` flag
to upload it to a monitor server. If your result follows an accepted result
format and you don't need to parse it further, you don't need to add this
function to your class. However, if your result data is large or otherwise
needs additional parsing, you can define it. If you define the function, it
is useful to know about the ``output_dir`` property, which you can join
with your output file relative path of choice:
.. code-block:: python
outfile = os.path.join(self.output_dir, "my-output-file.txt")
The directory will be provided by the ``output_dir`` property but it won't exist,
so you should create it:
.. code::block:: python
# Create the output directory
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
If you are generating results that match to specific files in the package
install directory, you should try to maintain those paths in the case that
there are equivalently named files in different directories that would
overwrite one another. As an example of an analyzer with a custom save,
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
folder in ``run()``, as they are either binaries, or as xml (text) would
usually be too big to pass in one request. For this reason, the files
are saved during ``run()`` and the filenames added to the result object,
and then when the result object is passed back into ``save_result()``,
we skip saving to the filesystem, and instead read the file and send
each one (separately) to the monitor:
.. code-block:: python
def save_result(self, result, monitor=None, overwrite=False):
"""ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not monitor:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
Notice that this function, if you define it, requires a result object (generated by
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
to check if a result exists first, and not write to it if the result exists and
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
yet, you might want to do that here. You should also use ``tty.info`` to give
the user a message of "Writing result to $DIRNAME."
.. _writing-commands:
@@ -448,6 +699,23 @@ with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
"""""""""""""""""""""""""""""""""
``on_analyzer_save(pkg, result)``
"""""""""""""""""""""""""""""""""
After an analyzer has saved some result for a package, this hook is called,
and it provides the package that we just ran the analysis for, along with
the loaded result. Typically, a result is structured to have the name
of the analyzer as key, and the result object that is defined in detail in
:ref:`analyzer_run_function`.
.. code-block:: python
def on_analyzer_save(pkg, result):
"""given a package and a result...
"""
print('Do something extra with a package analysis result here')
^^^^^^^^^^^^^^^^^^^^^^
Adding a New Hook Type
@@ -789,39 +1057,39 @@ Release branches
^^^^^^^^^^^^^^^^
There are currently two types of Spack releases: :ref:`major releases
<major-releases>` (``0.17.0``, ``0.18.0``, etc.) and :ref:`point releases
<point-releases>` (``0.17.1``, ``0.17.2``, ``0.17.3``, etc.). Here is a
<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
diagram of how Spack release branches work::
o branch: develop (latest version, v0.19.0.dev0)
o branch: develop (latest version)
|
o
| o branch: releases/v0.18, tag: v0.18.1
o |
| o tag: v0.18.0
o |
| o
o merge v0.14.1 into develop
|\
| o branch: releases/v0.14, tag: v0.14.1
o | merge v0.14.0 into develop
|\|
| o tag: v0.14.0
|/
o
|
o
| o branch: releases/v0.17, tag: v0.17.2
o |
| o tag: v0.17.1
o |
| o tag: v0.17.0
o merge v0.13.2 into develop
|\
| o branch: releases/v0.13, tag: v0.13.2
o | merge v0.13.1 into develop
|\|
| o tag: v0.13.1
o | merge v0.13.0 into develop
|\|
| o tag: v0.13.0
o |
| o
|/
o
The ``develop`` branch has the latest contributions, and nearly all pull
requests target ``develop``. The ``develop`` branch will report that its
version is that of the next **major** release with a ``.dev0`` suffix.
requests target ``develop``.
Each Spack release series also has a corresponding branch, e.g.
``releases/v0.18`` has ``0.18.x`` versions of Spack, and
``releases/v0.17`` has ``0.17.x`` versions. A major release is the first
``releases/v0.14`` has ``0.14.x`` versions of Spack, and
``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
tagged version on a release branch. Minor releases are back-ported from
develop onto release branches. This is typically done by cherry-picking
bugfix commits off of ``develop``.
@@ -832,20 +1100,12 @@ packages. They should generally only contain fixes to the Spack core.
However, sometimes priorities are such that new functionality needs to
be added to a minor release.
Both major and minor releases are tagged. As a convenience, we also tag
the latest release as ``releases/latest``, so that users can easily check
it out to get the latest stable version. See :ref:`updating-latest-release`
for more details.
.. note::
Older spack releases were merged **back** into develop so that we could
do fancy things with tags, but since tarballs and many git checkouts do
not have tags, this proved overly complex and confusing.
We have since converted to using `PEP 440 <https://peps.python.org/pep-0440/>`_
compliant versions. `See here <https://github.com/spack/spack/pull/25267>`_ for
details.
Both major and minor releases are tagged. After each release, we merge
the release branch back into ``develop`` so that the version bump and any
other release-specific changes are visible in the mainline. As a
convenience, we also tag the latest release as ``releases/latest``,
so that users can easily check it out to get the latest
stable version. See :ref:`merging-releases` for more details.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scheduling work for releases
@@ -903,11 +1163,10 @@ completed, the steps to make the major release are:
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
branch if you are preparing the ``X.Y.0`` release.
#. Remove the ``dev0`` development release segment from the version tuple in
``lib/spack/spack/__init__.py``.
#. Bump the version in ``lib/spack/spack/__init__.py``.
The version number itself should already be correct and should not be
modified.
See `this example from 0.13.0
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
#. Update ``CHANGELOG.md`` with major highlights in bullet form.
@@ -929,16 +1188,9 @@ completed, the steps to make the major release are:
is outdated submit pull requests to ``develop`` as normal
and keep rebasing the release branch on ``develop``.
#. Bump the major version in the ``develop`` branch.
Create a pull request targeting the ``develop`` branch, bumping the major
version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment.
For instance when you have just released ``v0.15.0``, set the version
to ``(0, 16, 0, 'dev0')`` on ``develop``.
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`updating-latest-release`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
@@ -965,13 +1217,8 @@ completed, the steps to make the point release are:
$ git checkout releases/v0.15
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
in the project, create it. This pull request ought to be created as early as
possible when working on a release project, so that we can build the release
commits incrementally, and identify potential conflicts at an early stage.
#. Cherry-pick each pull request in the ``Done`` column of the release
project board onto the ``Backports vX.Y.Z`` pull request.
project board onto the release branch.
This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests. That means there is only one commit
@@ -996,7 +1243,7 @@ completed, the steps to make the point release are:
It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When
cherry-picking look at the merge date,
cherry-picking onto a point release, look at the merge date,
**not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have
@@ -1017,19 +1264,18 @@ completed, the steps to make the point release are:
branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice.
#. When all the commits from the project board are cherry-picked into
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
#. Bump the version in ``lib/spack/spack/__init__.py``.
1. Bump the version in ``lib/spack/spack/__init__.py``.
2. Update ``CHANGELOG.md`` with a list of the changes.
See `this example from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Update ``CHANGELOG.md`` with a list of the changes.
This is typically a summary of the commits you cherry-picked onto the
release branch. See `the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
is needed to keep track in the release branch of all the commits that were
cherry-picked.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
@@ -1044,12 +1290,10 @@ completed, the steps to make the point release are:
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`updating-latest-release`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
#. Submit a PR to update the CHANGELOG in the `develop` branch
with the addition of this point release.
.. _publishing-releases:
@@ -1107,11 +1351,11 @@ Publishing a release on GitHub
selectable in the versions menu.
.. _updating-latest-release:
.. _merging-releases:
^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating `releases/latest`
^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating `releases/latest` and `develop`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the new release is the **highest** Spack release yet, you should
also tag it as ``releases/latest``. For example, suppose the highest
@@ -1135,6 +1379,40 @@ To tag ``releases/latest``, do this:
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
``releases/latest`` tag with the new one.
We also merge each release that we tag as ``releases/latest`` into ``develop``.
Make sure to do this with a merge commit:
.. code-block:: console
$ git checkout develop
$ git merge --no-ff -s ours vX.Y.Z # vX.Y.Z is the new release's tag
$ git push
We merge back to ``develop`` because it:
* updates the version and ``CHANGELOG.md`` on ``develop``; and
* ensures that your release tag is reachable from the head of
``develop``.
We *must* use a real merge commit (via the ``--no-ff`` option) to
ensure that the release tag is reachable from the tip of ``develop``.
This is necessary for ``spack -V`` to work properly -- it uses ``git
describe --tags`` to find the last reachable tag in the repository and
reports how far we are from it. For example:
.. code-block:: console
$ spack -V
0.14.2-1486-b80d5e74e5
This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
ahead of the ``0.14.2`` release.
We put this step last in the process because it's best to do it only once
the release is complete and tagged. If you do it before you've tagged the
release and later decide you want to tag some later commit, you'll need
to merge again.
.. _announcing-releases:

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

View File

@@ -5,9 +5,9 @@
.. _environments:
=========================
Environments (spack.yaml)
=========================
============
Environments
============
An environment is used to group together a set of specs for the
purpose of building, rebuilding and deploying in a coherent fashion.
@@ -273,9 +273,19 @@ or
Concretizing
^^^^^^^^^^^^
Once some user specs have been added to an environment, they can be concretized.
There are at the moment three different modes of operation to concretize an environment,
which are explained in details in :ref:`environments_concretization_config`.
Once some user specs have been added to an environment, they can be
concretized. *By default specs are concretized separately*, one after
the other. This mode of operation permits to deploy a full
software stack where multiple configurations of the same package
need to be installed alongside each other. Central installations done
at HPC centers by system administrators or user support groups
are a common case that fits in this behavior.
Environments *can also be configured to concretize all
the root specs in a self-consistent way* to ensure that
each package in the environment comes with a single configuration. This
mode of operation is usually what is required by software developers that
want to deploy their development environment.
Regardless of which mode of operation has been chosen, the following
command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration:
@@ -339,24 +349,6 @@ If the Environment has been concretized, Spack will install the
concretized specs. Otherwise, ``spack install`` will first concretize
the Environment and then install the concretized specs.
.. note::
Every ``spack install`` process builds one package at a time with multiple build
jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option
(see :ref:`build-jobs`). To speed up environment builds further, independent
packages can be installed in parallel by launching more Spack instances. For
example, the following will build at most four packages in parallel using
three background jobs:
.. code-block:: console
[myenv]$ spack install & spack install & spack install & spack install
Another option is to generate a ``Makefile`` and run ``make -j<N>`` to control
the number of parallel install processes. See :ref:`env-generate-depfile`
for details.
As it installs, ``spack install`` creates symbolic links in the
``logs/`` directory in the Environment, allowing for easy inspection
of build logs related to that environment. The ``spack install``
@@ -376,30 +368,6 @@ from being added again. At the same time, a spec that already exists in the
environment, but only as a dependency, will be added to the environment as a
root spec without the ``--no-add`` option.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Developing Packages in a Spack Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``spack develop`` command allows one to develop Spack packages in
an environment. It requires a spec containing a concrete version, and
will configure Spack to install the package from local source. By
default, it will also clone the package to a subdirectory in the
environment. This package will have a special variant ``dev_path``
set, and Spack will ensure the package and its dependents are rebuilt
any time the environment is installed if the package's local source
code has been modified. Spack ensures that all instances of a
developed package in the environment are concretized to match the
version (and other constraints) passed as the spec argument to the
``spack develop`` command.
For packages with ``git`` attributes, git branches, tags, and commits can
also be used as valid concrete versions (see :ref:`version-specifier`).
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
the ``main`` branch of the package, and ``spack install`` will install from
that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
^^^^^^^
Loading
^^^^^^^
@@ -416,11 +384,18 @@ Sourcing that file in Bash will make the environment available to the
user; and can be included in ``.bashrc`` files, etc. The ``loads``
file may also be copied out of the environment, renamed, etc.
----------
spack.yaml
----------
Spack environments can be customized at finer granularity by editing
the ``spack.yaml`` manifest file directly.
.. _environment-configuration:
------------------------
^^^^^^^^^^^^^^^^^^^^^^^^
Configuring Environments
------------------------
^^^^^^^^^^^^^^^^^^^^^^^^
A variety of Spack behaviors are changed through Spack configuration
files, covered in more detail in the :ref:`configuration`
@@ -442,9 +417,9 @@ environment can be specified by ``env:NAME`` (to affect environment
``foo``, set ``--scope env:foo``). These commands will automatically
manipulate configuration inline in the ``spack.yaml`` file.
^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""
Inline configurations
^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""
Inline Environment-scope configuration is done using the same yaml
format as standard Spack configuration scopes, covered in the
@@ -465,9 +440,9 @@ a ``packages.yaml`` file) could contain:
This configuration sets the default compiler for all packages to
``intel``.
^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""
Included configurations
^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""
Spack environments allow an ``include`` heading in their yaml
schema. This heading pulls in external configuration files and applies
@@ -487,9 +462,9 @@ to make small changes to an individual Environment. Included configs
listed earlier will have higher precedence, as the included configs are
applied in reverse order.
-------------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Manually Editing the Specs List
-------------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The list of abstract/root specs in the Environment is maintained in
the ``spack.yaml`` manifest under the heading ``specs``.
@@ -507,81 +482,37 @@ Appending to this list in the yaml is identical to using the ``spack
add`` command from the command line. However, there is more power
available from the yaml file.
.. _environments_concretization_config:
^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""
Spec concretization
^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under any environment
is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
"""""""""""""""""""
Specs can be concretized separately or together, as already
explained in :ref:`environments_concretization`. The behavior active
under any environment is determined by the ``concretization`` property:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: false
- ncview
- netcdf
- nco
- py-sphinx
concretization: together
This mode of operation permits to deploy a full software stack where multiple configurations of the same package
need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note::
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
replace the ``concretization`` property. For reference,
``concretization: together`` is replaced by ``concretizer:unify:true``,
and ``concretization: separately`` is replaced by ``concretizer:unify:false``.
which can currently take either one of the two allowed values ``together`` or ``separately``
(the default).
.. admonition:: Re-concretization of user specs
When concretizing specs *together* or *together where possible* the entire set of specs will be
When concretizing specs together the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead the specs are concretized
the environment remains consistent. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^
"""""""""""""
Spec Matrices
^^^^^^^^^^^^^
"""""""""""""
Entries in the ``specs`` list can be individual abstract specs or a
spec matrix.
@@ -641,9 +572,9 @@ This allows one to create toolchains out of combinations of
constraints and apply them somewhat indiscriminately to packages,
without regard for the applicability of the constraint.
^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""
Spec List References
^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""
The last type of possible entry in the specs list is a reference.
@@ -743,9 +674,9 @@ The valid variables for a ``when`` clause are:
#. ``hostname``. The hostname of the system (if ``hostname`` is an
executable in the user's PATH).
^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""
SpecLists as Constraints
^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""
Dependencies and compilers in Spack can be both packages in an
environment and constraints on other packages. References to SpecLists
@@ -777,41 +708,41 @@ For example, the following environment has three root packages:
This allows for a much-needed reduction in redundancy between packages
and constraints.
----------------
Filesystem Views
----------------
^^^^^^^^^^^^^^^^^^^^^^^^^
Environment-managed Views
^^^^^^^^^^^^^^^^^^^^^^^^^
Spack Environments can define filesystem views, which provide a direct access point
for software similar to the directory hierarchy that might exist under ``/usr/local``.
Filesystem views are updated every time the environment is written out to the lock
file ``spack.lock``, so the concrete environment and the view are always compatible.
The files of the view's installed packages are brought into the view by symbolic or
hard links, referencing the original Spack installation, or by copy.
Spack Environments can define filesystem views of their software,
which are maintained as packages and can be installed and uninstalled from
the Environment. Filesystem views provide an access point for packages
from the filesystem for users who want to access those packages
directly. For more information on filesystem views, see the section
:ref:`filesystem-views`.
Spack Environment managed views are updated every time the environment
is written out to the lock file ``spack.lock``, so the concrete
environment and the view are always compatible.
.. _configuring_environment_views:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuration in ``spack.yaml``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""
Configuring environment views
"""""""""""""""""""""""""""""
The Spack Environment manifest file has a top-level keyword
``view``. Each entry under that heading is a **view descriptor**, headed
by a name. Any number of views may be defined under the ``view`` heading.
The view descriptor contains the root of the view, and
``view``. Each entry under that heading is a view descriptor, headed
by a name. The view descriptor contains the root of the view, and
optionally the projections for the view, ``select`` and
``exclude`` lists for the view and link information via ``link`` and
``link_type``.
For example, in the following manifest
``link_type``. For example, in the following manifest
file snippet we define a view named ``mpis``, rooted at
``/path/to/view`` in which all projections use the package name,
version, and compiler name to determine the path for a given
package. This view selects all packages that depend on MPI, and
excludes those built with the PGI compiler at version 18.5.
The root specs with their (transitive) link and run type dependencies
will be put in the view due to the ``link: all`` option,
and the files in the view will be symlinks to the spack install
directories.
All the dependencies of each root spec in the environment will be linked
in the view due to the command ``link: all`` and the files in the view will
be symlinks to the spack install directories.
.. code-block:: yaml
@@ -823,30 +754,20 @@ directories.
select: [^mpi]
exclude: ['%pgi@18.5']
projections:
all: '{name}/{version}-{compiler.name}'
all: {name}/{version}-{compiler.name}
link: all
link_type: symlink
The default for the ``select`` and
For more information on using view projections, see the section on
:ref:`adding_projections_to_views`. The default for the ``select`` and
``exclude`` values is to select everything and exclude nothing. The
default projection is the default view projection (``{}``). The ``link``
attribute allows the following values:
#. ``link: all`` include root specs with their transitive run and link type
dependencies (default);
#. ``link: run`` include root specs with their transitive run type dependencies;
#. ``link: roots`` include root specs without their dependencies.
The ``link_type`` defaults to ``symlink`` but can also take the value
of ``hardlink`` or ``copy``.
.. tip::
The option ``link: run`` can be used to create small environment views for
Python packages. Python will be able to import packages *inside* of the view even
when the environment is not activated, and linked libraries will be located
*outside* of the view thanks to rpaths.
defaults to ``all`` but can also be ``roots`` when only the root specs
in the environment are desired in the view. The ``link_type`` defaults
to ``symlink`` but can also take the value of ``hardlink`` or ``copy``.
Any number of views may be defined under the ``view`` heading in a
Spack Environment.
There are two shorthands for environments with a single view. If the
environment at ``/path/to/env`` has a single view, with a root at
@@ -912,47 +833,9 @@ regenerate`` will regenerate the views for the environment. This will
apply any updates in the environment configuration that have not yet
been applied.
.. _view_projections:
""""""""""""""""
View Projections
""""""""""""""""
The default projection into a view is to link every package into the
root of the view. The projections attribute is a mapping of partial specs to
spec format strings, defined by the :meth:`~spack.spec.Spec.format`
function, as shown in the example below:
.. code-block:: yaml
projections:
zlib: {name}-{version}
^mpi: {name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}
all: {name}-{version}/{compiler.name}-{compiler.version}
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if
there is an entry for ``all`` and no other entry is satisfied by the
spec. Where the keyword ``all`` appears in the file does not
matter.
Given the example above, the spec ``zlib@1.2.8``
will be linked into ``/my/view/zlib-1.2.8/``, the spec
``hdf5@1.8.10+mpi %gcc@4.9.3 ^mvapich2@2.2`` will be linked into
``/my/view/hdf5-1.8.10/mvapich2-2.2-gcc-4.9.3``, and the spec
``hdf5@1.8.10~mpi %gcc@4.9.3`` will be linked into
``/my/view/hdf5-1.8.10/gcc-4.9.3``.
If the keyword ``all`` does not appear in the projections
configuration file, any spec that does not satisfy any entry in the
file will be linked into the root of the view as in a single-prefix
view. Any entries that appear below the keyword ``all`` in the
projections configuration file will not be used, as all specs will use
the projection under ``all`` before reaching those entries.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""
Activating environment views
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""
The ``spack env activate`` command will put the default view for the
environment into the user's path, in addition to activating the
@@ -972,6 +855,9 @@ Variable Paths
PATH bin
MANPATH man, share/man
ACLOCAL_PATH share/aclocal
LD_LIBRARY_PATH lib, lib64
LIBRARY_PATH lib, lib64
CPATH include
PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig
CMAKE_PREFIX_PATH .
=================== =========
@@ -983,89 +869,3 @@ environment.
The ``spack env deactivate`` command will remove the default view of
the environment from the user's path.
.. _env-generate-depfile:
------------------------------------------
Generating Depfiles from Environments
------------------------------------------
Spack can generate ``Makefile``\s to make it easier to build multiple
packages in an environment in parallel. Generated ``Makefile``\s expose
targets that can be included in existing ``Makefile``\s, to allow
other targets to depend on the environment installation.
A typical workflow is as follows:
.. code:: console
spack env create -d .
spack -e . add perl
spack -e . concretize
spack -e . env depfile > Makefile
make -j64
This generates a ``Makefile`` from a concretized environment in the
current working directory, and ``make -j64`` installs the environment,
exploiting parallelism across packages as much as possible. Spack
respects the Make jobserver and forwards it to the build environment
of packages, meaning that a single ``-j`` flag is enough to control the
load, even when packages are built in parallel.
By default the following phony convenience targets are available:
- ``make all``: installs the environment (default target);
- ``make fetch-all``: only fetch sources of all packages;
- ``make clean``: cleans files used by make, but does not uninstall packages.
.. tip::
GNU Make version 4.3 and above have great support for output synchronization
through the ``-O`` and ``--output-sync`` flags, which ensure that output is
printed orderly per package install. To get synchronized output with colors,
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
The following advanced example shows how generated targets can be used in a
``Makefile``:
.. code:: Makefile
SPACK ?= spack
.PHONY: all clean env
all: env
spack.lock: spack.yaml
$(SPACK) -e . concretize -f
env.mk: spack.lock
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack
env: spack/env
$(info Environment installed!)
clean:
rm -rf spack.lock env.mk spack/
ifeq (,$(filter clean,$(MAKECMDGOALS)))
include env.mk
endif
When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
from its rule, which triggers concretization. When done, the generated target
``spack/env`` is available. In the above example, the ``env`` target uses this generated
target as a prerequisite, meaning that it can make use of the installed packages in
its commands.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional.
.. note::
When including generated ``Makefile``\s, it is important to use
the ``--make-target-prefix`` flag and use the non-phony target
``<target-prefix>/env`` as prerequisite, instead of the phony target
``<target-prefix>/all``.

View File

@@ -149,28 +149,27 @@ Spack fall back to bootstrapping from sources:
.. code-block:: console
$ spack bootstrap untrust github-actions-v0.2
==> "github-actions-v0.2" is now untrusted and will not be used for bootstrapping
$ spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
You can verify that the new settings are effective with:
.. code-block:: console
$ spack bootstrap list
Name: github-actions-v0.2 UNTRUSTED
Name: github-actions UNTRUSTED
Type: buildcache
Info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
Description:
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
[ ... ]
Name: spack-install TRUSTED
@@ -1517,238 +1516,3 @@ To ensure that Spack does not autodetect the Cray programming
environment, unset the environment variable ``MODULEPATH``. This
will cause Spack to treat a linux container on a Cray system as a base
linux distro.
.. _windows_support:
----------------
Spack On Windows
----------------
Windows support for Spack is currently under development. While this work is still in an early stage,
it is currently possible to set up Spack and perform a few operations on Windows. This section will guide
you through the steps needed to install Spack and start running it on a fresh Windows machine.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 1: Install prerequisites
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use Spack on Windows, you will need the following packages:
Required:
* Microsoft Visual Studio
* Python
* Git
Optional:
* Intel Fortran (needed for some packages)
.. note::
Currently MSVC is the only compiler tested for C/C++ projects. Intel OneAPI provides Fortran support.
"""""""""""""""""""""""
Microsoft Visual Studio
"""""""""""""""""""""""
Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack.
We require several specific components to be included in the Visual Studio installation.
One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools,"
depending on installation type (Professional, Build Tools, etc.) The other required component is
"C++ CMake tools for Windows," which can be selected from among the optional packages.
This provides CMake and Ninja for use during Spack configuration.
If you already have Visual Studio installed, you can make sure these components are installed by
rerunning the installer. Next to your installation, select "Modify" and look at the
"Installation details" pane on the right.
"""""""""""""
Intel Fortran
"""""""""""""
For Fortran-based packages on Windows, we strongly recommend Intel's oneAPI Fortran compilers.
The suite is free to download from Intel's website, located at
https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/fortran-compiler.html#gs.70t5tw.
The executable of choice for Spack will be Intel's Beta Compiler, ifx, which supports the classic
compiler's (ifort's) frontend and runtime libraries by using LLVM.
""""""
Python
""""""
As Spack is a Python-based package, an installation of Python will be needed to run it.
Python 3 can be downloaded and installed from the Windows Store, and will be automatically added
to your ``PATH`` in this case.
.. note::
Spack currently supports Python versions later than 3.2 inclusive.
"""
Git
"""
A bash console and GUI can be downloaded from https://git-scm.com/downloads.
If you are unfamiliar with Git, there are a myriad of resources online to help
guide you through checking out repositories and switching development branches.
When given the option of adjusting your ``PATH``, choose the ``Git from the
command line and also from 3rd-party software`` option. This will automatically
update your ``PATH`` variable to include the ``git`` command.
Spack support on Windows is currently dependent on installing the Git for Windows project
as the project providing Git support on Windows. This is additionally the recommended method
for installing Git on Windows, a link to which can be found above. Spack requires the
utilities vendored by this project.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 2: Install and setup Spack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We are now ready to get the Spack environment set up on our machine. We
begin by using Git to clone the Spack repo, hosted at https://github.com/spack/spack.git
into a desired directory, for our purposes today, called ``spack_install``.
In order to install Spack with Windows support, run the following one liner
in a Windows CMD prompt.
.. code-block:: console
git clone https://github.com/spack/spack.git
.. note::
If you chose to install Spack into a directory on Windows that is set up to require Administrative
Privleges, Spack will require elevated privleges to run.
Administrative Privleges can be denoted either by default such as
``C:\Program Files``, or aministrator applied administrative restrictions
on a directory that spack installs files to such as ``C:\Users``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 3: Run and configure Spack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use Spack, run ``bin\spack_cmd.bat`` (you may need to Run as Administrator) from the top-level spack
directory. This will provide a Windows command prompt with an environment properly set up with Spack
and its prerequisites. If you receive a warning message that Python is not in your ``PATH``
(which may happen if you installed Python from the website and not the Windows Store) add the location
of the Python executable to your ``PATH`` now. You can permanently add Python to your ``PATH`` variable
by using the ``Edit the system environment variables`` utility in Windows Control Panel.
.. note::
Alternatively, Powershell can be used in place of CMD
To configure Spack, first run the following command inside the Spack console:
.. code-block:: console
spack compiler find
This creates a ``.staging`` directory in our Spack prefix, along with a ``windows`` subdirectory
containing a ``compilers.yaml`` file. On a fresh Windows install with the above packages
installed, this command should only detect Microsoft Visual Studio and the Intel Fortran
compiler will be integrated within the first version of MSVC present in the ``compilers.yaml``
output.
Spack provides a default ``config.yaml`` file for Windows that it will use unless overridden.
This file is located at ``etc\spack\defaults\windows\config.yaml``. You can read more on how to
do this and write your own configuration files in the :ref:`Configuration Files<configuration>` section of our
documentation. If you do this, pay particular attention to the ``build_stage`` block of the file
as this specifies the directory that will temporarily hold the source code for the packages to
be installed. This path name must be sufficiently short for compliance with cmd, otherwise you
will see build errors during installation (particularly with CMake) tied to long path names.
To allow Spack use of external tools and dependencies already on your system, the
external pieces of software must be described in the ``packages.yaml`` file.
There are two methods to populate this file:
The first and easiest choice is to use Spack to find installation on your system. In
the Spack terminal, run the following commands:
.. code-block:: console
spack external find cmake
spack external find ninja
The ``spack external find <name>`` will find executables on your system
with the same name given. The command will store the items found in
``packages.yaml`` in the ``.staging\`` directory.
Assuming that the command found CMake and Ninja executables in the previous
step, continue to Step 4. If no executables were found, we may need to manually direct spack towards the CMake
and Ninja installations we set up with Visual Studio. Therefore, your ``packages.yaml`` file will look something
like this, with possibly slight variants in the paths to CMake and Ninja:
.. code-block:: yaml
packages:
cmake:
externals:
- spec: cmake@3.19
prefix: 'c:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake'
buildable: False
ninja:
externals:
- spec: ninja@1.8.2
prefix: 'c:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja'
buildable: False
You can also use an separate installation of CMake if you have one and prefer
to use it. If you don't have a path to Ninja analogous to the above, then you can
obtain it by running the Visual Studio Installer and following the instructions
at the start of this section. Also note that .yaml files use spaces for indentation
and not tabs, so ensure that this is the case when editing one directly.
.. note:: Cygwin
The use of Cygwin is not officially supported by Spack and is not tested.
However Spack will not throw an error, so use if choosing to use Spack
with Cygwin, know that no functionality is garunteed.
^^^^^^^^^^^^^^^^^
Step 4: Use Spack
^^^^^^^^^^^^^^^^^
Once the configuration is complete, it is time to give the installation a test. Install a basic package though the
Spack console via:
.. code-block:: console
spack install cpuinfo
If in the previous step, you did not have CMake or Ninja installed, running the command above should boostrap both packages
"""""""""""""""""""""""""""
Windows Compatible Packages
"""""""""""""""""""""""""""
Many Spack packages are not currently compatible with Windows, due to Unix
dependencies or incompatible build tools like autoconf. Here are several
packages known to work on Windows:
* abseil-cpp
* clingo
* cpuinfo
* cmake
* glm
* nasm
* netlib-lapack (requires Intel Fortran)
* ninja
* openssl
* perl
* python
* ruby
* wrf
* zlib
.. note::
This is by no means a comprehensive list
^^^^^^^^^^^^^^
For developers
^^^^^^^^^^^^^^
The intent is to provide a Windows installer that will automatically set up
Python, Git, and Spack, instead of requiring the user to do so manually.
Instructions for creating the installer are at
https://github.com/spack/spack/blob/develop/lib/spack/spack/cmd/installer/README.md
Alternatively a pre-built copy of the Windows installer is available as an artifact of Spack's Windows CI

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

View File

@@ -54,8 +54,8 @@ or refer to the full manual below.
features
getting_started
basic_usage
workflows
Tutorial: Spack 101 <https://spack-tutorial.readthedocs.io>
replace_conda_homebrew
known_issues
.. toctree::
@@ -64,7 +64,6 @@ or refer to the full manual below.
configuration
config_yaml
bootstrapping
build_settings
environments
containers

View File

@@ -7,28 +7,71 @@
Known Issues
============
This is a list of known issues in Spack. It provides ways of getting around these
This is a list of known bugs in Spack. It provides ways of getting around these
problems if you encounter them.
------------------------------------------------
Spack does not seem to respect ``packages.yaml``
------------------------------------------------
---------------------------------------------------
Variants are not properly forwarded to dependencies
---------------------------------------------------
A common problem in Spack v0.18 and above is that package, compiler and target
preferences specified in ``packages.yaml`` do not seem to be respected. Spack picks the
"wrong" compilers and their versions, package versions and variants, and
micro-architectures.
**Status:** Expected to be fixed by Spack's new concretizer
This is however not a bug. In order to reduce the number of builds of the same
packages, the concretizer values reuse of installed packages higher than preferences
set in ``packages.yaml``. Note that ``packages.yaml`` specifies only preferences, not
hard constraints.
Sometimes, a variant of a package can also affect how its dependencies are
built. For example, in order to build MPI support for a package, it may
require that its dependencies are also built with MPI support. In the
``package.py``, this looks like:
There are multiple workarounds:
.. code-block:: python
1. Disable reuse during concretization: ``spack install --fresh <spec>`` when installing
from the command line, or ``spack concretize --fresh --force`` when using
environments.
2. Turn preferences into constrains, by moving them to the input spec. For example,
use ``spack spec zlib%gcc@12`` when you want to force GCC 12 even if ``zlib`` was
already installed with GCC 10.
depends_on('hdf5~mpi', when='~mpi')
depends_on('hdf5+mpi', when='+mpi')
Spack handles this situation properly for *immediate* dependencies, and
builds ``hdf5`` with the same variant you used for the package that
depends on it. However, for *indirect* dependencies (dependencies of
dependencies), Spack does not backtrack up the DAG far enough to handle
this. Users commonly run into this situation when trying to build R with
X11 support:
.. code-block:: console
$ spack install r+X
...
==> Error: Invalid spec: 'cairo@1.14.8%gcc@6.2.1+X arch=linux-fedora25-x86_64 ^bzip2@1.0.6%gcc@6.2.1+shared arch=linux-fedora25-x86_64 ^font-util@1.3.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^fontconfig@2.12.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^freetype@2.7.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^gettext@0.19.8.1%gcc@6.2.1+bzip2+curses+git~libunistring+libxml2+tar+xz arch=linux-fedora25-x86_64 ^glib@2.53.1%gcc@6.2.1~libmount arch=linux-fedora25-x86_64 ^inputproto@2.3.2%gcc@6.2.1 arch=linux-fedora25-x86_64 ^kbproto@1.0.7%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libffi@3.2.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libpng@1.6.29%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libpthread-stubs@0.4%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libx11@1.6.5%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libxau@1.0.8%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libxcb@1.12%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libxdmcp@1.1.2%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libxext@1.3.3%gcc@6.2.1 arch=linux-fedora25-x86_64 ^libxml2@2.9.4%gcc@6.2.1~python arch=linux-fedora25-x86_64 ^libxrender@0.9.10%gcc@6.2.1 arch=linux-fedora25-x86_64 ^ncurses@6.0%gcc@6.2.1~symlinks arch=linux-fedora25-x86_64 ^openssl@1.0.2k%gcc@6.2.1 arch=linux-fedora25-x86_64 ^pcre@8.40%gcc@6.2.1+utf arch=linux-fedora25-x86_64 ^pixman@0.34.0%gcc@6.2.1 arch=linux-fedora25-x86_64 ^pkg-config@0.29.2%gcc@6.2.1+internal_glib arch=linux-fedora25-x86_64 ^python@2.7.13%gcc@6.2.1+shared~tk~ucs4 arch=linux-fedora25-x86_64 ^readline@7.0%gcc@6.2.1 arch=linux-fedora25-x86_64 ^renderproto@0.11.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^sqlite@3.18.0%gcc@6.2.1 arch=linux-fedora25-x86_64 ^tar^util-macros@1.19.1%gcc@6.2.1 arch=linux-fedora25-x86_64 ^xcb-proto@1.12%gcc@6.2.1 arch=linux-fedora25-x86_64 ^xextproto@7.3.0%gcc@6.2.1 arch=linux-fedora25-x86_64 ^xproto@7.0.31%gcc@6.2.1 arch=linux-fedora25-x86_64 ^xtrans@1.3.5%gcc@6.2.1 arch=linux-fedora25-x86_64 ^xz@5.2.3%gcc@6.2.1 arch=linux-fedora25-x86_64 ^zlib@1.2.11%gcc@6.2.1+pic+shared arch=linux-fedora25-x86_64'.
Package cairo requires variant ~X, but spec asked for +X
A workaround is to explicitly activate the variants of dependencies as well:
.. code-block:: console
$ spack install r+X ^cairo+X ^pango+X
See https://github.com/spack/spack/issues/267 and
https://github.com/spack/spack/issues/2546 for further details.
-----------------------------------------------
depends_on cannot handle recursive dependencies
-----------------------------------------------
**Status:** Not yet a work in progress
Although ``depends_on`` can handle any aspect of Spack's spec syntax,
it currently cannot handle recursive dependencies. If the ``^`` sigil
appears in a ``depends_on`` statement, the concretizer will hang.
For example, something like:
.. code-block:: python
depends_on('mfem+cuda ^hypre+cuda', when='+cuda')
should be rewritten as:
.. code-block:: python
depends_on('mfem+cuda', when='+cuda')
depends_on('hypre+cuda', when='+cuda')
See https://github.com/spack/spack/issues/17660 and
https://github.com/spack/spack/issues/11160 for more details.

View File

@@ -5,9 +5,9 @@
.. _mirrors:
======================
Mirrors (mirrors.yaml)
======================
=======
Mirrors
=======
Some sites may not have access to the internet for fetching packages.
These sites will need a local repository of tarballs from which they

View File

@@ -5,9 +5,9 @@
.. _modules:
======================
Modules (modules.yaml)
======================
=======
Modules
=======
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is often embraced also by
@@ -113,8 +113,6 @@ from language interpreters into their extensions. The latter two instead permit
fine tune the filesystem layout, content and creation of module files to meet
site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -136,7 +134,7 @@ The second method:
pass
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired
that depend on it. In both cases you need to fill ``run_env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
@@ -183,7 +181,10 @@ to the environment variables listed below the folder name.
Spack modules can be configured for multiple module sets. The default
module set is named ``default``. All Spack commands which operate on
modules default to apply the ``default`` module set, but can be
applied to any module set in the configuration.
applied to any module set in the configuration. Settings applied at
the root of the configuration (e.g. ``modules:enable`` rather than
``modules:default:enable``) are applied to the default module set for
backwards compatibility.
"""""""""""""""""""""""""
Changing the modules root
@@ -310,7 +311,7 @@ the variable ``FOOBAR`` will be unset.
spec constraints are instead evaluated top to bottom.
""""""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files
Blacklist or whitelist specific module files
""""""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or
@@ -324,8 +325,8 @@ your system. If you write a configuration file like:
modules:
default:
tcl:
include: ['gcc', 'llvm'] # include will have precedence over exclude
exclude: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
whitelist: ['gcc', 'llvm'] # Whitelist will have precedence over blacklist
blacklist: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
@@ -377,7 +378,7 @@ most likely via the ``+blas`` variant specification.
The most heavyweight solution to module naming is to change the entire
naming convention for module files. This uses the projections format
covered in :ref:`view_projections`.
covered in :ref:`adding_projections_to_views`.
.. code-block:: yaml
@@ -492,7 +493,7 @@ satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
.. warning::
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
@@ -520,33 +521,18 @@ inspections and customize them per-module-set.
prefix_inspections:
bin:
- PATH
man:
- MANPATH
lib:
- LIBRARY_PATH
'':
- CMAKE_PREFIX_PATH
Prefix inspections are only applied if the relative path inside the
installation prefix exists. In this case, for a Spack package ``foo``
installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
``bin`` but no manpages in ``man``, the generated module file for
``bin`` but no libraries in ``lib``, the generated module file for
``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and
``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not
update ``MANPATH``.
The default list of environment variables in this config section
inludes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH``
and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH``
on macOS. On Linux however, the corresponding ``LD_LIBRARY_PATH``
variable is *not* set, because it affects the behavior of
system executables too.
.. note::
In general, the ``LD_LIBRARY_PATH`` variable is not required
when using packages built with Spack, thanks to the use of RPATH.
Some packages may still need the variable, which is best handled
on a per-package basis instead of globally, as explained in
:ref:`overide-api-calls-in-package-py`.
update ``LIBRARY_PATH``.
There is a special case for prefix inspections relative to environment
views. If all of the following conditions hold for a module set
@@ -554,7 +540,8 @@ configuration:
#. The configuration is for an :ref:`environment <environments>` and
will never be applied outside the environment,
#. The environment in question is configured to use a view,
#. The environment in question is configured to use a :ref:`view
<filesystem-views>`,
#. The :ref:`environment view is configured
<configuring_environment_views>` with a projection that ensures
every package is linked to a unique directory,
@@ -606,7 +593,7 @@ Filter out environment modifications
Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections.
If you want to prevent modifications to some environment variables, you can
do so by using the ``exclude_env_vars``:
do so by using the environment blacklist:
.. code-block:: yaml
@@ -616,7 +603,7 @@ do so by using the ``exclude_env_vars``:
all:
filter:
# Exclude changes to any of these variables
exclude_env_vars: ['CPATH', 'LIBRARY_PATH']
environment_blacklist: ['CPATH', 'LIBRARY_PATH']
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.

View File

@@ -126,7 +126,7 @@ generates a boilerplate template for your package, and opens up the new
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack.package import *
from spack import *
class Gmp(AutotoolsPackage):
@@ -699,7 +699,7 @@ Spack versions may also be arbitrary non-numeric strings, for example
``@develop``, ``@master``, ``@local``.
The order on versions is defined as follows. A version string is split
into a list of components based on delimiters such as ``.``, ``-`` etc.
into a list of components based on delimiters such as ``.``, ``-`` etc.
Lists are then ordered lexicographically, where components are ordered
as follows:
@@ -1070,32 +1070,13 @@ Commits
Submodules
You can supply ``submodules=True`` to cause Spack to fetch submodules
recursively along with the repository at fetch time.
recursively along with the repository at fetch time. For more information
about git submodules see the manpage of git: ``man git-submodule``.
.. code-block:: python
version('1.0.1', tag='v1.0.1', submodules=True)
If a package has needs more fine-grained control over submodules, define
``submodules`` to be a callable function that takes the package instance as
its only argument. The function should return a list of submodules to be fetched.
.. code-block:: python
def submodules(package):
submodules = []
if "+variant-1" in package.spec:
submodules.append("submodule_for_variant_1")
if "+variant-2" in package.spec:
submodules.append("submodule_for_variant_2")
return submodules
class MyPackage(Package):
version("0.1.0", submodules=submodules)
For more information about git submodules see the manpage of git: ``man
git-submodule``.
.. _github-fetch:
@@ -1442,37 +1423,6 @@ other similar operations:
).with_default('auto').with_non_feature_values('auto'),
)
"""""""""""""""""""""""""""
Conditional Possible Values
"""""""""""""""""""""""""""
There are cases where a variant may take multiple values, and the list of allowed values
expand over time. Think for instance at the C++ standard with which we might compile
Boost, which can take one of multiple possible values with the latest standards
only available from a certain version on.
To model a similar situation we can use *conditional possible values* in the variant declaration:
.. code-block:: python
variant(
'cxxstd', default='98',
values=(
'98', '11', '14',
# C++17 is not supported by Boost < 1.63.0.
conditional('17', when='@1.63.0:'),
# C++20/2a is not support by Boost < 1.73.0
conditional('2a', '2b', when='@1.73.0:')
),
multi=False,
description='Use the specified C++ standard when building.',
)
The snippet above allows ``98``, ``11`` and ``14`` as unconditional possible values for the
``cxxstd`` variant, while ``17`` requires a version greater or equal to ``1.63.0``
and both ``2a`` and ``2b`` require a version greater or equal to ``1.73.0``.
^^^^^^^^^^^^^^^^^^^^
Conditional Variants
^^^^^^^^^^^^^^^^^^^^
@@ -2283,17 +2233,9 @@ The following dependency types are available:
One of the advantages of the ``build`` dependency type is that although the
dependency needs to be installed in order for the package to be built, it
can be uninstalled without concern afterwards. ``link`` and ``run`` disallow
this because uninstalling the dependency would break the package.
``build``, ``link``, and ``run`` dependencies all affect the hash of Spack
packages (along with ``sha256`` sums of patches and archives used to build the
package, and a [canonical hash](https://github.com/spack/spack/pull/28156) of
the ``package.py`` recipes). ``test`` dependencies do not affect the package
hash, as they are only used to construct a test environment *after* building and
installing a given package installation. Older versions of Spack did not include
build dependencies in the hash, but this has been
[fixed](https://github.com/spack/spack/pull/28504) as of [Spack
``v0.18``](https://github.com/spack/spack/releases/tag/v0.18.0)
this because uninstalling the dependency would break the package. Another
consequence of this is that ``build``-only dependencies do not affect the
hash of the package. The same is true for ``test`` dependencies.
If the dependency type is not specified, Spack uses a default of
``('build', 'link')``. This is the common case for compiler languages.
@@ -2420,9 +2362,9 @@ Influence how dependents are built or run
Spack provides a mechanism for dependencies to influence the
environment of their dependents by overriding the
:meth:`setup_dependent_run_environment <spack.package_base.PackageBase.setup_dependent_run_environment>`
:meth:`setup_dependent_run_environment <spack.package.PackageBase.setup_dependent_run_environment>`
or the
:meth:`setup_dependent_build_environment <spack.package_base.PackageBase.setup_dependent_build_environment>`
:meth:`setup_dependent_build_environment <spack.package.PackageBase.setup_dependent_build_environment>`
methods.
The Qt package, for instance, uses this call:
@@ -2444,7 +2386,7 @@ will have the ``PYTHONPATH``, ``PYTHONHOME`` and ``PATH`` environment
variables set appropriately before starting the installation. To make things
even simpler the ``python setup.py`` command is also inserted into the module
scope of dependents by overriding a third method called
:meth:`setup_dependent_package <spack.package_base.PackageBase.setup_dependent_package>`
:meth:`setup_dependent_package <spack.package.PackageBase.setup_dependent_package>`
:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
@@ -2601,7 +2543,7 @@ from being linked in at activation time.
Views
-----
The ``spack view`` command can be
As covered in :ref:`filesystem-views`, the ``spack view`` command can be
used to symlink a number of packages into a merged prefix. The methods of
``PackageViewMixin`` can be overridden to customize how packages are added
to views. Generally this can be used to create copies of specific files rather
@@ -2802,256 +2744,6 @@ Suppose a user invokes ``spack install`` like this:
Spack will fail with a constraint violation, because the version of
MPICH requested is too low for the ``mpi`` requirement in ``foo``.
.. _custom-attributes:
------------------
Custom attributes
------------------
Often a package will need to provide attributes for dependents to query
various details about what it provides. While any number of custom defined
attributes can be implemented by a package, the four specific attributes
described below are always available on every package with default
implementations and the ability to customize with alternate implementations
in the case of virtual packages provided:
=========== =========================================== =====================
Attribute Purpose Default
=========== =========================================== =====================
``home`` The installation path for the package ``spec.prefix``
``command`` An executable command for the package | ``spec.name`` found
in
| ``.home.bin``
``headers`` A list of headers provided by the package | All headers
searched
| recursively in
``.home.include``
``libs`` A list of libraries provided by the package | ``lib{spec.name}``
searched
| recursively in
``.home`` starting
| with ``lib``,
``lib64``, then the
| rest of ``.home``
=========== =========================================== =====================
Each of these can be customized by implementing the relevant attribute
as a ``@property`` in the package's class:
.. code-block:: python
:linenos:
class Foo(Package):
...
@property
def libs(self):
# The library provided by Foo is libMyFoo.so
return find_libraries('libMyFoo', root=self.home, recursive=True)
A package may also provide a custom implementation of each attribute
for the virtual packages it provides by implementing the
``virtualpackagename_attributename`` property in the package's class.
The implementation used is the first one found from:
#. Specialized virtual: ``Package.virtualpackagename_attributename``
#. Generic package: ``Package.attributename``
#. Default
The use of customized attributes is demonstrated in the next example.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Example: Customized attributes for virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider a package ``foo`` that can optionally provide two virtual
packages ``bar`` and ``baz``. When both are enabled the installation tree
appears as follows:
.. code-block:: console
include/foo.h
include/bar/bar.h
lib64/libFoo.so
lib64/libFooBar.so
baz/include/baz/baz.h
baz/lib/libFooBaz.so
The install tree shows that ``foo`` is providing the header ``include/foo.h``
and library ``lib64/libFoo.so`` in it's install prefix. The virtual
package ``bar`` is providing ``include/bar/bar.h`` and library
``lib64/libFooBar.so``, also in ``foo``'s install prefix. The ``baz``
package, however, is provided in the ``baz`` subdirectory of ``foo``'s
prefix with the ``include/baz/baz.h`` header and ``lib/libFooBaz.so``
library. Such a package could implement the optional attributes as
follows:
.. code-block:: python
:linenos:
class Foo(Package):
...
variant('bar', default=False, description='Enable the Foo implementation of bar')
variant('baz', default=False, description='Enable the Foo implementation of baz')
...
provides('bar', when='+bar')
provides('baz', when='+baz')
....
# Just the foo headers
@property
def headers(self):
return find_headers('foo', root=self.home.include, recursive=False)
# Just the foo libraries
@property
def libs(self):
return find_libraries('libFoo', root=self.home, recursive=True)
# The header provided by the bar virutal package
@property
def bar_headers(self):
return find_headers('bar/bar.h', root=self.home.include, recursive=False)
# The libary provided by the bar virtual package
@property
def bar_libs(self):
return find_libraries('libFooBar', root=sef.home, recursive=True)
# The baz virtual package home
@property
def baz_home(self):
return self.prefix.baz
# The header provided by the baz virtual package
@property
def baz_headers(self):
return find_headers('baz/baz', root=self.baz_home.include, recursive=False)
# The library provided by the baz virtual package
@property
def baz_libs(self):
return find_libraries('libFooBaz', root=self.baz_home, recursive=True)
Now consider another package, ``foo-app``, depending on all three:
.. code-block:: python
:linenos:
class FooApp(CMakePackage):
...
depends_on('foo')
depends_on('bar')
depends_on('baz')
The resulting spec objects for it's dependencies shows the result of
the above attribute implementations:
.. code-block:: python
# The core headers and libraries of the foo package
>>> spec['foo']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['foo'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the package install prefix without an explicit implementation
>>> spec['foo'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# foo headers from the foo prefix
>>> spec['foo'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/foo.h',
])
# foo include directories from the foo prefix
>>> spec['foo'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# foo libraries from the foo prefix
>>> spec['foo'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFoo.so',
])
# foo library directories from the foo prefix
>>> spec['foo'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual bar package in the same prefix as foo
# bar resolves to the foo package
>>> spec['bar']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['bar'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the foo prefix without either a Foo.bar_home
# or Foo.home implementation
>>> spec['bar'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# bar header in the foo prefix
>>> spec['bar'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/bar/bar.h'
])
# bar include dirs from the foo prefix
>>> spec['bar'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# bar library from the foo prefix
>>> spec['bar'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFooBar.so'
])
# bar library directories from the foo prefix
>>> spec['bar'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual baz package in a subdirectory of foo's prefix
# baz resolves to the foo package
>>> spec['baz']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['baz'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# baz_home implementation provides the subdirectory inside the foo prefix
>>> spec['baz'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz'
# baz headers in the baz subdirectory of the foo prefix
>>> spec['baz'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include/baz/baz.h'
])
# baz include directories in the baz subdirectory of the foo prefix
>>> spec['baz'].headers.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include'
]
# baz libraries in the baz subdirectory of the foo prefix
>>> spec['baz'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib/libFooBaz.so'
])
# baz library directories in the baz subdirectory of the foo porefix
>>> spec['baz'].libs.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib'
]
.. _abstract-and-concrete:
-------------------------
@@ -3299,7 +2991,7 @@ The classes that are currently provided by Spack are:
+----------------------------------------------------------+----------------------------------+
| **Base Class** | **Purpose** |
+==========================================================+==================================+
| :class:`~spack.package_base.Package` | General base class not |
| :class:`~spack.package.Package` | General base class not |
| | specialized for any build system |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.makefile.MakefilePackage` | Specialized class for packages |
@@ -3430,7 +3122,7 @@ for the install phase is:
For those not used to Python instance methods, this is the
package itself. In this case it's an instance of ``Foo``, which
extends ``Package``. For API docs on Package objects, see
:py:class:`Package <spack.package_base.Package>`.
:py:class:`Package <spack.package.Package>`.
``spec``
This is the concrete spec object created by Spack from an
@@ -5753,24 +5445,6 @@ Version Lists
Spack packages should list supported versions with the newest first.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``home`` vs ``prefix``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``home`` and ``prefix`` are both attributes that can be queried on a
package's dependencies, often when passing configure arguments pointing to the
location of a dependency. The difference is that while ``prefix`` is the
location on disk where a concrete package resides, ``home`` is the `logical`
location that a package resides, which may be different than ``prefix`` in
the case of virtual packages or other special circumstances. For most use
cases inside a package, it's dependency locations can be accessed via either
``self.spec['foo'].home`` or ``self.spec['foo'].prefix``. Specific packages
that should be consumed by dependents via ``.home`` instead of ``.prefix``
should be noted in their respective documentation.
See :ref:`custom-attributes` for more details and an example implementing
a custom ``home`` attribute.
---------------------------
Packaging workflow commands
---------------------------

View File

@@ -115,8 +115,7 @@ And here's the spack environment built by the pipeline represented as a
spack:
view: false
concretizer:
unify: false
concretization: separately
definitions:
- pkgs:

View File

@@ -1,207 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
=====================================
Using Spack to Replace Homebrew/Conda
=====================================
Spack is an incredibly powerful package manager, designed for supercomputers
where users have diverse installation needs. But Spack can also be used to
handle simple single-user installations on your laptop. Most macOS users are
already familiar with package managers like Homebrew and Conda, where all
installed packages are symlinked to a single central location like ``/usr/local``.
In this section, we will show you how to emulate the behavior of Homebrew/Conda
using :ref:`environments`!
-----
Setup
-----
First, let's create a new environment. We'll assume that Spack is already set up
correctly, and that you've already sourced the setup script for your shell.
To create a new environment, simply run:
.. code-block:: console
$ spack env create myenv
Here, *myenv* can be anything you want to name your environment. Next, we can add
a list of packages we would like to install into our environment. Let's say we
want a newer version of Bash than the one that comes with macOS, and we want a
few Python libraries. We can run:
.. code-block:: console
$ spack -e myenv add bash@5 python py-numpy py-scipy py-matplotlib
Each package can be listed on a separate line, or combined into a single line like we did above.
Notice that we're explicitly asking for Bash 5 here. You can use any spec
you would normally use on the command line with other Spack commands.
Next, we want to manually configure a couple of things:
.. code-block:: console
$ spack -e myenv config edit
.. code-block:: yaml
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
# add package specs to the `specs` list
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
view: true
You can see the packages we added earlier in the ``specs:`` section. If you
ever want to add more packages, you can either use ``spack add`` or manually
edit this file.
We also need to change the ``concretizer:unify`` option. By default, Spack
concretizes each spec *separately*, allowing multiple versions of the same
package to coexist. Since we want a single consistent environment, we want to
concretize all of the specs *together*.
Here is what your ``spack.yaml`` looks like with this new setting:
.. code-block:: yaml
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
# add package specs to the `specs` list
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
view: true
concretizer:
unify: true
^^^^^^^^^^^^^^^^
Symlink location
^^^^^^^^^^^^^^^^
Spack symlinks all installations to ``/Users/me/spack/var/spack/environments/myenv/.spack-env/view``,
which is the default when ``view: true``.
You can actually change this to any directory you want. For example, Homebrew
uses ``/usr/local``, while Conda uses ``/Users/me/anaconda``. In order to access
files in these locations, you need to update ``PATH`` and other environment variables
to point to them. Activating the Spack environment does this automatically, but
you can also manually set them in your ``.bashrc``.
.. warning::
There are several reasons why you shouldn't use ``/usr/local``:
1. If you are on macOS 10.11+ (El Capitan and newer), Apple makes it hard
for you. You may notice permissions issues on ``/usr/local`` due to their
`System Integrity Protection <https://support.apple.com/en-us/HT204899>`_.
By default, users don't have permissions to install anything in ``/usr/local``,
and you can't even change this using ``sudo chown`` or ``sudo chmod``.
2. Other package managers like Homebrew will try to install things to the
same directory. If you plan on using Homebrew in conjunction with Spack,
don't symlink things to ``/usr/local``.
3. If you are on a shared workstation, or don't have sudo privileges, you
can't do this.
If you still want to do this anyway, there are several ways around SIP.
You could disable SIP by booting into recovery mode and running
``csrutil disable``, but this is not recommended, as it can open up your OS
to security vulnerabilities. Another technique is to run ``spack concretize``
and ``spack install`` using ``sudo``. This is also not recommended.
The safest way I've found is to create your installation directories using
sudo, then change ownership back to the user like so:
.. code-block:: bash
for directory in .spack bin contrib include lib man share
do
sudo mkdir -p /usr/local/$directory
sudo chown $(id -un):$(id -gn) /usr/local/$directory
done
Depending on the packages you install in your environment, the exact list of
directories you need to create may vary. You may also find some packages
like Java libraries that install a single file to the installation prefix
instead of in a subdirectory. In this case, the action is the same, just replace
``mkdir -p`` with ``touch`` in the for-loop above.
But again, it's safer just to use the default symlink location.
------------
Installation
------------
To actually concretize the environment, run:
.. code-block:: console
$ spack -e myenv concretize
This will tell you which if any packages are already installed, and alert you
to any conflicting specs.
To actually install these packages and symlink them to your ``view:``
directory, simply run:
.. code-block:: console
$ spack -e myenv install
$ spack env activate myenv
Now, when you type ``which python3``, it should find the one you just installed.
In order to change the default shell to our newer Bash installation, we first
need to add it to this list of acceptable shells. Run:
.. code-block:: console
$ sudo vim /etc/shells
and add the absolute path to your bash executable. Then run:
.. code-block:: console
$ chsh -s /path/to/bash
Now, when you log out and log back in, ``echo $SHELL`` should point to the
newer version of Bash.
---------------------------
Updating Installed Packages
---------------------------
Let's say you upgraded to a new version of macOS, or a new version of Python
was released, and you want to rebuild your entire software stack. To do this,
simply run the following commands:
.. code-block:: console
$ spack env activate myenv
$ spack concretize --force
$ spack install
The ``--force`` flag tells Spack to overwrite its previous concretization
decisions, allowing you to choose a new version of Python. If any of the new
packages like Bash are already installed, ``spack install`` won't re-install
them, it will keep the symlinks in place.
--------------
Uninstallation
--------------
If you decide that Spack isn't right for you, uninstallation is simple.
Just run:
.. code-block:: console
$ spack env activate myenv
$ spack uninstall --all
This will uninstall all packages in your environment and remove the symlinks.

View File

@@ -5,9 +5,9 @@
.. _repositories:
=================================
Package Repositories (repos.yaml)
=================================
=============================
Package Repositories
=============================
Spack comes with thousands of built-in package recipes in
``var/spack/repos/builtin/``. This is a **package repository** -- a

View File

@@ -1,11 +1,10 @@
# These dependencies should be installed using pip in order
# to build the documentation.
sphinx>=3.4,!=4.1.2,!=5.1.0
sphinx>=3.4,!=4.1.2
sphinxcontrib-programoutput
sphinx-rtd-theme
python-levenshtein
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.
# https://stackoverflow.com/questions/67542699
docutils <0.17
pygments <2.13

View File

@@ -25,5 +25,4 @@ spack:
- subversion
# Plotting
- graphviz
concretizer:
unify: true
concretization: together

View File

@@ -1,5 +1,5 @@
Name, Supported Versions, Notes, Requirement Reason
Python, 2.7/3.6-3.10, , Interpreter for Spack
Python, 2.7/3.5-3.9, , Interpreter for Spack
C/C++ Compilers, , , Building software
make, , , Build software
patch, , , Build software
@@ -7,7 +7,7 @@ bash, , , Compiler wrappers
tar, , , Extract/create archives
gzip, , , Compress/Decompress archives
unzip, , , Compress/Decompress archives
bzip2, , , Compress/Decompress archives
bzip, , , Compress/Decompress archives
xz, , , Compress/Decompress archives
zstd, , Optional, Compress/Decompress archives
file, , , Create/Use Buildcaches
@@ -15,4 +15,4 @@ gnupg2, , , Sign/Verify Buildcaches
git, , , Manage Software Repositories
svn, , Optional, Manage Software Repositories
hg, , Optional, Manage Software Repositories
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
1 Name Supported Versions Notes Requirement Reason
2 Python 2.7/3.6-3.10 2.7/3.5-3.9 Interpreter for Spack
3 C/C++ Compilers Building software
4 make Build software
5 patch Build software
7 tar Extract/create archives
8 gzip Compress/Decompress archives
9 unzip Compress/Decompress archives
10 bzip2 bzip Compress/Decompress archives
11 xz Compress/Decompress archives
12 zstd Optional Compress/Decompress archives
13 file Create/Use Buildcaches
15 git Manage Software Repositories
16 svn Optional Manage Software Repositories
17 hg Optional Manage Software Repositories
18 Python header files Optional (e.g. ``python3-dev`` on Debian) Bootstrapping from sources

1193
lib/spack/docs/workflows.rst Normal file

File diff suppressed because it is too large Load Diff

14
lib/spack/env/cc vendored
View File

@@ -1,4 +1,4 @@
#!/bin/sh -f
#!/bin/sh
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
@@ -241,28 +241,28 @@ case "$command" in
mode=cpp
debug_flags="-g"
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe)
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc)
command="$SPACK_CC"
language="C"
comp="CC"
lang_flags=C
debug_flags="-g"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++)
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
lang_flags=CXX
debug_flags="-g"
;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang)
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt)
command="$SPACK_FC"
language="Fortran 90"
comp="FC"
lang_flags=F
debug_flags="-g"
;;
f77|xlf|xlf_r|pgf77|amdflang)
f77|xlf|xlf_r|pgf77)
command="$SPACK_F77"
language="Fortran 77"
comp="F77"
@@ -768,9 +768,7 @@ if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
echo "[$mode] ${full_command_list}" >> "$output_log"
fi
# Execute the full command, preserving spaces with IFS set

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cpp

View File

@@ -1 +0,0 @@
../fc

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.4 (commit b8eea9df2b4204ff27d204452cd46f5199a0b423)
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
argparse
--------

View File

@@ -61,7 +61,7 @@ def proc_cpuinfo():
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
with open("/proc/cpuinfo") as file:
for line in file:
key, separator, value = line.partition(":")
@@ -80,46 +80,26 @@ def proc_cpuinfo():
def _check_output(args, env):
output = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=subprocess.PIPE, env=env
).communicate()[0]
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
return six.text_type(output.decode("utf-8"))
def _machine():
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
#
# See: https://bugs.python.org/issue42704
output = _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return "aarch64"
return "x86_64"
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == "x86_64":
if platform.machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
@@ -145,18 +125,6 @@ def sysctl(*args):
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
return child_environment
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
@@ -216,7 +184,12 @@ def compatible_microarchitectures(info):
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
architecture_family = platform.machine()
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
# so we should normalize the name here
if architecture_family == "arm64":
architecture_family = "aarch64"
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
@@ -271,7 +244,12 @@ def compatibility_check(architecture_family):
architecture_family = (architecture_family,)
def decorator(func):
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
# pylint: disable=fixme
# TODO: on removal of Python 2.6 support this can be re-written as
# TODO: an update + a dict comprehension
for arch_family in architecture_family:
COMPATIBILITY_CHECKS[arch_family] = func
return func
return decorator
@@ -310,7 +288,7 @@ def compatibility_check_for_x86_64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)
@@ -325,9 +303,8 @@ def compatibility_check_for_aarch64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)

View File

@@ -11,7 +11,7 @@
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping # pylint: disable=deprecated-class
from collections import MutableMapping
class LazyDictionary(MutableMapping):
@@ -56,7 +56,7 @@ def _load_json_file(json_file):
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file: # pylint: disable=unspecified-encoding
with open(filename, "r") as file:
return json.load(file)
return _factory

View File

@@ -85,21 +85,7 @@
"intel": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
@@ -143,20 +129,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -214,20 +186,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -290,20 +248,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -344,19 +288,8 @@
"intel": [
{
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}
@@ -400,18 +333,6 @@
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -463,20 +384,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -525,20 +432,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -597,18 +490,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -669,18 +550,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -746,18 +615,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -815,18 +672,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -887,18 +732,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -965,20 +798,6 @@
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1049,20 +868,6 @@
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1132,18 +937,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1211,18 +1004,6 @@
"versions": "19.0.1:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1317,20 +1098,6 @@
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1375,20 +1142,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
]
}
},
@@ -1439,20 +1192,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1507,20 +1246,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1576,20 +1301,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
]
}
},
@@ -1649,22 +1360,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1727,22 +1422,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1806,22 +1485,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1880,30 +1543,6 @@
"name": "znver3",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "16.0:",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -2149,6 +1788,7 @@
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
@@ -2181,26 +1821,18 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "8:10.2",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve -msve-vector-bits=512"
},
{
"versions": "10.3:",
"flags": "-mcpu=a64fx -msve-vector-bits=512"
"versions": "8:",
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
}
],
"clang": [
{
"versions": "3.9:4.9",
"flags": "-march=armv8.2-a+crc+sha2+fp16"
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "5:10",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve"
},
{
"versions": "11:",
"flags": "-mcpu=a64fx"
"versions": "5:",
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
],
"arm": [
@@ -2322,40 +1954,7 @@
"m1": {
"from": ["aarch64"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint"
],
"features": [],
"compilers": {
"gcc": [
{
@@ -2365,22 +1964,14 @@
],
"clang" : [
{
"versions": "9.0:12.0",
"versions": "9.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"versions": "11.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
]
}

View File

@@ -29,8 +29,8 @@ class Command(object):
- optionals: list of optional arguments (list)
- subcommands: list of subcommand parsers (list)
"""
def __init__(self, prog, description, usage, positionals, optionals, subcommands):
def __init__(self, prog, description, usage,
positionals, optionals, subcommands):
self.prog = prog
self.description = description
self.usage = usage
@@ -71,15 +71,15 @@ def parse(self, parser, prog):
"""
self.parser = parser
split_prog = parser.prog.split(" ")
split_prog = parser.prog.split(' ')
split_prog[-1] = prog
prog = " ".join(split_prog)
prog = ' '.join(split_prog)
description = parser.description
fmt = parser._get_formatter()
actions = parser._actions
groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip()
usage = fmt._format_usage(None, actions, groups, '').strip()
# Go through actions and split them into optionals, positionals,
# and subcommands
@@ -90,8 +90,8 @@ def parse(self, parser, prog):
if action.option_strings:
flags = action.option_strings
dest_flags = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
help = self._expand_help(action) if action.help else ''
help = help.replace('\n', ' ')
optionals.append((flags, dest_flags, help))
elif isinstance(action, argparse._SubParsersAction):
for subaction in action._choices_actions:
@@ -100,19 +100,20 @@ def parse(self, parser, prog):
# Look for aliases of the form 'name (alias, ...)'
if self.aliases:
match = re.match(r"(.*) \((.*)\)", subaction.metavar)
match = re.match(r'(.*) \((.*)\)', subaction.metavar)
if match:
aliases = match.group(2).split(", ")
aliases = match.group(2).split(', ')
for alias in aliases:
subparser = action._name_parser_map[alias]
subcommands.append((subparser, alias))
else:
args = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
help = self._expand_help(action) if action.help else ''
help = help.replace('\n', ' ')
positionals.append((args, help))
return Command(prog, description, usage, positionals, optionals, subcommands)
return Command(
prog, description, usage, positionals, optionals, subcommands)
def format(self, cmd):
"""Returns the string representation of a single node in the
@@ -160,13 +161,14 @@ def write(self, parser):
raise
_rst_levels = ["=", "-", "^", "~", ":", "`"]
_rst_levels = ['=', '-', '^', '~', ':', '`']
class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections."""
def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels):
def __init__(self, prog, out=None, aliases=False,
rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter.
Parameters:
@@ -215,12 +217,11 @@ def begin_command(self, prog):
{1}
{2}
""".format(
prog.replace(" ", "-"), prog, self.rst_levels[self.level] * len(prog)
)
""".format(prog.replace(' ', '-'), prog,
self.rst_levels[self.level] * len(prog))
def description(self, description):
return description + "\n\n"
return description + '\n\n'
def usage(self, usage):
return """\
@@ -228,39 +229,33 @@ def usage(self, usage):
{0}
""".format(
usage
)
""".format(usage)
def begin_positionals(self):
return "\n**Positional arguments**\n\n"
return '\n**Positional arguments**\n\n'
def positional(self, name, help):
return """\
{0}
{1}
""".format(
name, help
)
""".format(name, help)
def end_positionals(self):
return ""
return ''
def begin_optionals(self):
return "\n**Optional arguments**\n\n"
return '\n**Optional arguments**\n\n'
def optional(self, opts, help):
return """\
``{0}``
{1}
""".format(
opts, help
)
""".format(opts, help)
def end_optionals(self):
return ""
return ''
def begin_subcommands(self, subcommands):
string = """
@@ -272,10 +267,11 @@ def begin_subcommands(self, subcommands):
"""
for cmd, _ in subcommands:
prog = re.sub(r"^[^ ]* ", "", cmd.prog)
string += " * :ref:`{0} <{1}>`\n".format(prog, cmd.prog.replace(" ", "-"))
prog = re.sub(r'^[^ ]* ', '', cmd.prog)
string += ' * :ref:`{0} <{1}>`\n'.format(
prog, cmd.prog.replace(' ', '-'))
return string + "\n"
return string + '\n'
class ArgparseCompletionWriter(ArgparseWriter):
@@ -310,11 +306,9 @@ def format(self, cmd):
# Flatten lists of lists
optionals = [x for xx in optionals for x in xx]
return (
self.start_function(cmd.prog)
+ self.body(positionals, optionals, subcommands)
+ self.end_function(cmd.prog)
)
return (self.start_function(cmd.prog) +
self.body(positionals, optionals, subcommands) +
self.end_function(cmd.prog))
def start_function(self, prog):
"""Returns the syntax needed to begin a function definition.
@@ -325,8 +319,8 @@ def start_function(self, prog):
Returns:
str: the function definition beginning
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
name = prog.replace('-', '_').replace(' ', '_')
return '\n_{0}() {{'.format(name)
def end_function(self, prog=None):
"""Returns the syntax needed to end a function definition.
@@ -337,7 +331,7 @@ def end_function(self, prog=None):
Returns:
str: the function definition ending
"""
return "}\n"
return '}\n'
def body(self, positionals, optionals, subcommands):
"""Returns the body of the function.
@@ -350,7 +344,7 @@ def body(self, positionals, optionals, subcommands):
Returns:
str: the function body
"""
return ""
return ''
def positionals(self, positionals):
"""Returns the syntax for reporting positional arguments.
@@ -361,7 +355,7 @@ def positionals(self, positionals):
Returns:
str: the syntax for positional arguments
"""
return ""
return ''
def optionals(self, optionals):
"""Returns the syntax for reporting optional flags.
@@ -372,7 +366,7 @@ def optionals(self, optionals):
Returns:
str: the syntax for optional flags
"""
return ""
return ''
def subcommands(self, subcommands):
"""Returns the syntax for reporting subcommands.
@@ -383,4 +377,4 @@ def subcommands(self, subcommands):
Returns:
str: the syntax for subcommand parsers
"""
return ""
return ''

View File

@@ -18,22 +18,22 @@
map = map
zip = zip
from itertools import zip_longest as zip_longest # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
if sys.version_info >= (3, 3):
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import MutableSequence as MutableSequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
else:
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import MutableSequence as MutableSequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401

File diff suppressed because it is too large Load Diff

View File

@@ -11,17 +11,15 @@
import os
import re
import sys
import traceback
from datetime import datetime, timedelta
from typing import Any, Callable, Iterable, List, Tuple
import six
from six import string_types
from llnl.util.compat import MutableMapping, MutableSequence, zip_longest
from llnl.util.compat import MutableMapping, zip_longest
# Ignore emacs backups when listing modules
ignore_modules = [r"^\.#", "~$"]
ignore_modules = [r'^\.#', '~$']
def index_by(objects, *funcs):
@@ -91,9 +89,9 @@ def index_by(objects, *funcs):
def caller_locals():
"""This will return the locals of the *parent* of the caller.
This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
@@ -105,7 +103,7 @@ def caller_locals():
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
@@ -115,13 +113,12 @@ def get_calling_module_name():
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
if '__module__' not in caller_locals:
raise RuntimeError("Must invoke get_calling_module_name() "
"from inside a class definition!")
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
module_name = caller_locals['__module__']
base_name = module_name.split('.')[-1]
return base_name
@@ -129,8 +126,8 @@ def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
"No required attribute '%s' in class '%s'"
% (attr_name, obj.__class__.__name__))
def attr_setdefault(obj, name, value):
@@ -202,35 +199,33 @@ def _memoized_function(*args, **kwargs):
# TypeError is raised when indexing into a dict if the key is unhashable.
raise six.raise_from(
UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format(
key, func.__name__
),
"args + kwargs '{}' was not hashable for function '{}'"
.format(key, func.__name__),
),
e,
)
e)
return _memoized_function
def list_modules(directory, **kwargs):
"""Lists all of the modules, excluding ``__init__.py``, in a
particular directory. Listed packages have no particular
order."""
list_directories = kwargs.setdefault("directories", True)
particular directory. Listed packages have no particular
order."""
list_directories = kwargs.setdefault('directories', True)
for name in os.listdir(directory):
if name == "__init__.py":
if name == '__init__.py':
continue
path = os.path.join(directory, name)
if list_directories and os.path.isdir(path):
init_py = os.path.join(path, "__init__.py")
init_py = os.path.join(path, '__init__.py')
if os.path.isfile(init_py):
yield name
elif name.endswith(".py"):
elif name.endswith('.py'):
if not any(re.search(pattern, name) for pattern in ignore_modules):
yield re.sub(".py$", "", name)
yield re.sub('.py$', '', name)
def decorator_with_or_without_args(decorator):
@@ -260,34 +255,41 @@ def new_dec(*args, **kwargs):
def key_ordering(cls):
"""Decorates a class with extra methods that implement rich comparison
operations and ``__hash__``. The decorator assumes that the class
implements a function called ``_cmp_key()``. The rich comparison
operations will compare objects using this key, and the ``__hash__``
function will return the hash of this key.
operations and ``__hash__``. The decorator assumes that the class
implements a function called ``_cmp_key()``. The rich comparison
operations will compare objects using this key, and the ``__hash__``
function will return the hash of this key.
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
Raises:
TypeError: If the class does not have a ``_cmp_key`` method
Raises:
TypeError: If the class does not have a ``_cmp_key`` method
"""
def setter(name, value):
value.__name__ = name
setattr(cls, name, value)
if not has_method(cls, "_cmp_key"):
if not has_method(cls, '_cmp_key'):
raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
setter("__eq__", lambda s, o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter("__lt__", lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
setter("__le__", lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
setter('__eq__',
lambda s, o:
(s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter('__lt__',
lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
setter('__le__',
lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
setter("__ne__", lambda s, o: (s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter("__gt__", lambda s, o: o is None or s._cmp_key() > o._cmp_key())
setter("__ge__", lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
setter('__ne__',
lambda s, o:
(s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter('__gt__',
lambda s, o: o is None or s._cmp_key() > o._cmp_key())
setter('__ge__',
lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
setter("__hash__", lambda self: hash(self._cmp_key()))
setter('__hash__', lambda self: hash(self._cmp_key()))
return cls
@@ -454,7 +456,8 @@ def gt(self, other):
def le(self, other):
if self is other:
return True
return (other is not None) and not lazy_lt(other._cmp_iter, self._cmp_iter)
return (other is not None) and not lazy_lt(other._cmp_iter,
self._cmp_iter)
def ge(self, other):
if self is other:
@@ -484,9 +487,7 @@ def add_func_to_class(name, func):
@lazy_lexicographic_ordering
class HashableMap(MutableMapping):
"""This is a hashable, comparable dictionary. Hash is performed on
a tuple of the values in the dictionary."""
__slots__ = ("dict",)
a tuple of the values in the dictionary."""
def __init__(self):
self.dict = {}
@@ -524,7 +525,7 @@ def copy(self):
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
@@ -537,25 +538,24 @@ def in_function(function_name):
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
% (next(iter(kwargs)), fun.__name__))
def match_predicate(*args):
@@ -571,7 +571,6 @@ def match_predicate(*args):
* any regex in a list or tuple of regexes matches.
* any predicate in args matches.
"""
def match(string):
for arg in args:
if isinstance(arg, string_types):
@@ -584,39 +583,26 @@ def match(string):
if arg(string):
return True
else:
raise ValueError(
"args to match_predicate must be regex, " "list of regexes, or callable."
)
raise ValueError("args to match_predicate must be regex, "
"list of regexes, or callable.")
return False
return match
def dedupe(sequence, key=None):
"""Yields a stable de-duplication of an hashable sequence by key
def dedupe(sequence):
"""Yields a stable de-duplication of an hashable sequence
Args:
sequence: hashable sequence to be de-duplicated
key: callable applied on values before uniqueness test; identity
by default.
Returns:
stable de-duplication of the sequence
Examples:
Dedupe a list of integers:
[x for x in dedupe([1, 2, 1, 3, 2])] == [1, 2, 3]
[x for x in llnl.util.lang.dedupe([1,-2,1,3,2], key=abs)] == [1, -2, 3]
"""
seen = set()
for x in sequence:
x_key = x if key is None else key(x)
if x_key not in seen:
if x not in seen:
yield x
seen.add(x_key)
seen.add(x)
def pretty_date(time, now=None):
@@ -648,7 +634,7 @@ def pretty_date(time, now=None):
day_diff = diff.days
if day_diff < 0:
return ""
return ''
if day_diff == 0:
if second_diff < 10:
@@ -706,40 +692,43 @@ def pretty_string_to_date(date_str, now=None):
now = now or datetime.now()
# datetime formats
pattern[re.compile(r"^\d{4}$")] = lambda x: datetime.strptime(x, "%Y")
pattern[re.compile(r"^\d{4}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m")
pattern[re.compile(r"^\d{4}-\d{2}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m-%d")
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$")] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M"
pattern[re.compile(r'^\d{4}$')] = lambda x: datetime.strptime(x, '%Y')
pattern[re.compile(r'^\d{4}-\d{2}$')] = lambda x: datetime.strptime(
x, '%Y-%m'
)
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M:%S"
pattern[re.compile(r'^\d{4}-\d{2}-\d{2}$')] = lambda x: datetime.strptime(
x, '%Y-%m-%d'
)
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M')
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
pretty_regex = re.compile(r"(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago")
pretty_regex = re.compile(
r'(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago')
def _n_xxx_ago(x):
how_many, time_period = pretty_regex.search(x).groups()
how_many = 1 if how_many == "a" else int(how_many)
how_many = 1 if how_many == 'a' else int(how_many)
# timedelta natively supports time periods up to 'weeks'.
# To apply month or year we convert to 30 and 365 days
if time_period == "month":
if time_period == 'month':
how_many *= 30
time_period = "day"
elif time_period == "year":
time_period = 'day'
elif time_period == 'year':
how_many *= 365
time_period = "day"
time_period = 'day'
kwargs = {(time_period + "s"): how_many}
kwargs = {(time_period + 's'): how_many}
return now - timedelta(**kwargs)
pattern[pretty_regex] = _n_xxx_ago
# yesterday
callback = lambda x: now - timedelta(days=1)
pattern[re.compile("^yesterday$")] = callback
pattern[re.compile('^yesterday$')] = callback
for regexp, parser in pattern.items():
if bool(regexp.match(date_str)):
@@ -750,6 +739,7 @@ def _n_xxx_ago(x):
class RequiredAttributeError(ValueError):
def __init__(self, message):
super(RequiredAttributeError, self).__init__(message)
@@ -761,7 +751,6 @@ class ObjectWrapper(object):
This class is modeled after the stackoverflow answer:
* http://stackoverflow.com/a/1445289/771663
"""
def __init__(self, wrapped_object):
wrapped_cls = type(wrapped_object)
wrapped_name = wrapped_cls.__name__
@@ -805,7 +794,7 @@ def __getattr__(self, name):
# requested but not yet set. The final 'getattr' line here requires
# 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here.
if name in ["_instance", "instance"]:
if name in ['_instance', 'instance']:
raise AttributeError()
return getattr(self.instance, name)
@@ -835,7 +824,7 @@ def __init__(self, ref_function):
self.ref_function = ref_function
def __getattr__(self, name):
if name == "ref_function":
if name == 'ref_function':
raise AttributeError()
return getattr(self.ref_function(), name)
@@ -873,8 +862,8 @@ def load_module_from_file(module_name, module_path):
# This recipe is adapted from https://stackoverflow.com/a/67692/771663
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path) # novm
spec = importlib.util.spec_from_file_location( # novm
module_name, module_path)
module = importlib.util.module_from_spec(spec) # novm
# The module object needs to exist in sys.modules before the
# loader executes the module code.
@@ -889,9 +878,13 @@ def load_module_from_file(module_name, module_path):
except KeyError:
pass
raise
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader( # novm
module_name, module_path)
module = loader.load_module()
elif sys.version_info[0] == 2:
import imp
module = imp.load_source(module_name, module_path)
return module
@@ -923,10 +916,8 @@ def uniq(sequence):
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
@@ -935,23 +926,22 @@ class Devnull(object):
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::
replacing intervening elements with '...'. For example::
elide_list([1,2,3,4,5,6], 4)
elide_list([1,2,3,4,5,6], 4)
gives::
gives::
[1, 2, 3, '...', 6]
[1, 2, 3, '...', 6]
"""
if len(line_list) > max_num:
return line_list[: max_num - 1] + ["..."] + line_list[-1:]
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
else:
return line_list
@@ -966,148 +956,3 @@ def nullcontext(*args, **kwargs):
class UnhashableArguments(TypeError):
"""Raise when an @memoized function receives unhashable arg or kwarg values."""
def enum(**kwargs):
"""Return an enum-like class.
Args:
**kwargs: explicit dictionary of enums
"""
return type("Enum", (object,), kwargs)
def stable_partition(
input_iterable, # type: Iterable
predicate_fn, # type: Callable[[Any], bool]
):
# type: (...) -> Tuple[List[Any], List[Any]]
"""Partition the input iterable according to a custom predicate.
Args:
input_iterable: input iterable to be partitioned.
predicate_fn: predicate function accepting an iterable item
as argument.
Return:
Tuple of the list of elements evaluating to True, and
list of elements evaluating to False.
"""
true_items, false_items = [], []
for item in input_iterable:
if predicate_fn(item):
true_items.append(item)
continue
false_items.append(item)
return true_items, false_items
class TypedMutableSequence(MutableSequence):
"""Base class that behaves like a list, just with a different type.
Client code can inherit from this base class:
class Foo(TypedMutableSequence):
pass
and later perform checks based on types:
if isinstance(l, Foo):
# do something
"""
def __init__(self, iterable):
self.data = list(iterable)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __len__(self):
return len(self.data)
def insert(self, index, item):
self.data.insert(index, item)
def __repr__(self):
return repr(self.data)
def __str__(self):
return str(self.data)
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self):
"""Whether any exceptions were handled."""
return bool(self.exceptions)
def forward(self, context):
# type: (str) -> GroupedExceptionForwarder
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
return GroupedExceptionForwarder(context, self)
def _receive_forwarded(self, context, exc, tb):
# type: (str, Exception, List[str]) -> None
self.exceptions.append((context, exc, tb))
def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors."""
each_exception_message = [
"{0} raised {1}: {2}{3}".format(
context,
exc.__class__.__name__,
exc,
"\n{0}".format("".join(tb)) if with_tracebacks else "",
)
for context, exc, tb in self.exceptions
]
return "due to the following failures:\n{0}".format("\n".join(each_exception_message))
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
def __init__(self, context, handler):
# type: (str, GroupedExceptionHandler) -> None
self._context = context
self._handler = handler
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)
"""
def __init__(self, callback):
self.callback = callback
def __get__(self, instance, owner):
return self.callback(owner)

View File

@@ -10,19 +10,17 @@
import filecmp
import os
import shutil
from collections import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree
from llnl.util.symlink import islink, symlink
from llnl.util.filesystem import mkdirp, touch, traverse_tree
__all__ = ["LinkTree"]
__all__ = ['LinkTree']
empty_file_name = ".spack-empty"
empty_file_name = '.spack-empty'
def remove_link(src, dest):
if not islink(dest):
if not os.path.islink(dest):
raise ValueError("%s is not a link tree!" % dest)
# remove if dest is a hardlink/symlink to src; this will only
# be false if two packages are merged into a prefix and have a
@@ -31,262 +29,6 @@ def remove_link(src, dest):
os.remove(dest)
class MergeConflict:
"""
The invariant here is that src_a and src_b are both mapped
to dst:
project(src_a) == project(src_b) == dst
"""
def __init__(self, dst, src_a=None, src_b=None):
self.dst = dst
self.src_a = src_a
self.src_b = src_b
class SourceMergeVisitor(BaseDirectoryVisitor):
"""
Visitor that produces actions:
- An ordered list of directories to create in dst
- A list of files to link in dst
- A list of merge conflicts in dst/
"""
def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the
# destination dir.
self.projection = ""
# When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or
# or <site-packages>/<namespace>/__init__.py conflicts can be
# ignored).
self.file_conflicts = []
# When we have to create a dir where a file is, or a file
# where a dir is, we have fatal errors, listed here.
self.fatal_conflicts = []
# What directories we have to make; this is an ordered set,
# so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_rel, src_root)
self.files = OrderedDict()
def before_visit_dir(self, root, rel_path, depth):
"""
Register a directory if dst / rel_path is not blocked by a file or ignored.
"""
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
# Don't recurse when dir is ignored.
return False
elif proj_rel_path in self.files:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
return False
elif proj_rel_path in self.directories:
# No new directory, carry on.
return True
else:
# Register new directory.
self.directories[proj_rel_path] = (root, rel_path)
return True
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Replace symlinked dirs with actual directories when possible in low depths,
otherwise handle it as a file (i.e. we link to the symlink).
Transforming symlinks into dirs makes it more likely we can merge directories,
e.g. when <prefix>/lib -> <prefix>/subdir/lib.
We only do this when the symlink is pointing into a subdirectory from the
symlink's directory, to avoid potential infinite recursion; and only at a
constant level of nesting, to avoid potential exponential blowups in file
duplication.
"""
if self.ignore(rel_path):
return False
# Only follow symlinked dirs in <prefix>/**/**/*
if depth > 1:
handle_as_dir = False
else:
# Only follow symlinked dirs when pointing deeper
src = os.path.join(root, rel_path)
real_parent = os.path.realpath(os.path.dirname(src))
real_child = os.path.realpath(src)
handle_as_dir = real_child.startswith(real_parent)
if handle_as_dir:
return self.before_visit_dir(root, rel_path, depth)
self.visit_file(root, rel_path, depth)
return False
def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
pass
elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
elif proj_rel_path in self.files:
# In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
else:
# Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
def set_projection(self, projection):
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
if self.projection == ".":
self.projection = ""
return
# If there is a projection, we'll also create the directories
# it consists of, and check whether that's causing conflicts.
path = ""
for part in self.projection.split(os.sep):
path = os.path.join(path, part)
if path not in self.files:
self.directories[path] = ("<projection>", path)
else:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[path]
self.fatal_conflicts.append(
MergeConflict(
dst=path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join("<projection>", path),
)
)
class DestinationMergeVisitor(BaseDirectoryVisitor):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
and:
a. registers additional conflicts when merging
to the destination prefix
b. removes redundant mkdir operations when
directories already exist in the destination
prefix.
This also makes sure that symlinked directories
in the target prefix will never be merged with
directories in the sources directories.
"""
def __init__(self, source_merge_visitor):
self.src = source_merge_visitor
def before_visit_dir(self, root, rel_path, depth):
# If destination dir is a file in a src dir, add a conflict,
# and don't traverse deeper
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
return False
# If destination dir was also a src dir, remove the mkdir
# action, and traverse deeper.
if rel_path in self.src.directories:
del self.src.directories[rel_path]
return True
# If the destination dir does not appear in the src dir,
# don't descend into it.
return False
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Symlinked directories in the destination prefix should
be seen as files; we should not accidentally merge
source dir with a symlinked dest dir.
"""
# Always conflict
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
# Never descend into symlinked target dirs.
return False
def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
elif rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
class LinkTree(object):
"""Class to create trees of symbolic links from a source directory.
@@ -297,31 +39,30 @@ class LinkTree(object):
symlinked to, to prevent the source directory from ever being
modified.
"""
def __init__(self, source_root):
if not os.path.exists(source_root):
raise IOError("No such file or directory: '%s'", source_root)
self._root = source_root
def find_conflict(self, dest_root, ignore=None, ignore_file_conflicts=False):
def find_conflict(self, dest_root, ignore=None,
ignore_file_conflicts=False):
"""Returns the first file in dest that conflicts with src"""
ignore = ignore or (lambda x: False)
conflicts = self.find_dir_conflicts(dest_root, ignore)
if not ignore_file_conflicts:
conflicts.extend(
dst
for src, dst in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst)
)
dst for src, dst
in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst))
if conflicts:
return conflicts[0]
def find_dir_conflicts(self, dest_root, ignore):
conflicts = []
kwargs = {"follow_nonexisting": False, "ignore": ignore}
kwargs = {'follow_nonexisting': False, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
@@ -332,7 +73,7 @@ def find_dir_conflicts(self, dest_root, ignore):
def get_file_map(self, dest_root, ignore):
merge_map = {}
kwargs = {"follow_nonexisting": True, "ignore": ignore}
kwargs = {'follow_nonexisting': True, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if not os.path.isdir(src):
merge_map[src] = dest
@@ -354,7 +95,8 @@ def merge_directories(self, dest_root, ignore):
touch(marker)
def unmerge_directories(self, dest_root, ignore):
for src, dest in traverse_tree(self._root, dest_root, ignore=ignore, order="post"):
for src, dest in traverse_tree(
self._root, dest_root, ignore=ignore, order='post'):
if os.path.isdir(src):
if not os.path.exists(dest):
continue
@@ -370,7 +112,8 @@ def unmerge_directories(self, dest_root, ignore):
if os.path.exists(marker):
os.remove(marker)
def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, relative=False):
def merge(self, dest_root, ignore_conflicts=False, ignore=None,
link=os.symlink, relative=False):
"""Link all files in src into dest, creating directories
if necessary.
@@ -382,7 +125,7 @@ def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, re
ignore (callable): callable that returns True if a file is to be
ignored in the merge (by default ignore nothing)
link (callable): function to create links with (defaults to llnl.util.symlink)
link (callable): function to create links with (defaults to os.symlink)
relative (bool): create all symlinks relative to the target
(default False)
@@ -392,10 +135,9 @@ def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, re
ignore = lambda x: False
conflict = self.find_conflict(
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts
)
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts)
if conflict:
raise SingleMergeConflictError(conflict)
raise MergeConflictError(conflict)
self.merge_directories(dest_root, ignore)
existing = []
@@ -427,24 +169,7 @@ def unmerge(self, dest_root, ignore=None, remove_file=remove_link):
class MergeConflictError(Exception):
pass
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError):
def __init__(self, conflicts):
"""
A human-readable summary of file system view merge conflicts (showing only the
first 3 issues.)
"""
msg = "{0} fatal error(s) when merging prefixes:".format(len(conflicts))
# show the first 3 merge conflicts.
for conflict in conflicts[:3]:
msg += "\n `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst
)
super(MergeConflictSummary, self).__init__(msg)
super(MergeConflictError, self).__init__(
"Package merge blocked by file: %s" % path)

View File

@@ -4,9 +4,9 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import fcntl
import os
import socket
import sys
import time
from datetime import datetime
from typing import Dict, Tuple # novm
@@ -15,24 +15,22 @@
import spack.util.string
if sys.platform != "win32":
import fcntl
__all__ = [
"Lock",
"LockDowngradeError",
"LockUpgradeError",
"LockTransaction",
"WriteTransaction",
"ReadTransaction",
"LockError",
"LockTimeoutError",
"LockPermissionError",
"LockROFileError",
"CantCreateLockError",
'Lock',
'LockDowngradeError',
'LockUpgradeError',
'LockTransaction',
'WriteTransaction',
'ReadTransaction',
'LockError',
'LockTimeoutError',
'LockPermissionError',
'LockROFileError',
'CantCreateLockError'
]
#: Mapping of supported locks to description
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
#: A useful replacement for functions that should return True when not provided
#: for example.
@@ -47,7 +45,6 @@ class OpenFile(object):
the file descriptor from the file handle if needed -- or we could make this track
file descriptors as well in the future.
"""
def __init__(self, fh):
self.fh = fh
self.refs = 0
@@ -93,11 +90,11 @@ def get_fh(self, path):
path (str): path to lock file we want a filehandle for
"""
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), "r+"
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
pid = os.getpid()
open_file = None # OpenFile object, if there is one
stat = None # stat result for the lockfile, if it exists
stat = None # stat result for the lockfile, if it exists
try:
# see whether we've seen this inode/pid before
@@ -110,7 +107,7 @@ def get_fh(self, path):
raise
# path does not exist -- fail if we won't be able to create it
parent = os.path.dirname(path) or "."
parent = os.path.dirname(path) or '.'
if not os.access(parent, os.W_OK):
raise CantCreateLockError(path)
@@ -120,7 +117,7 @@ def get_fh(self, path):
# we know path exists but not if it's writable. If it's read-only,
# only open the file for reading (and fail if we're trying to get
# an exclusive (write) lock on it)
os_mode, fh_mode = os.O_RDONLY, "r"
os_mode, fh_mode = os.O_RDONLY, 'r'
fd = os.open(path, os_mode)
fh = os.fdopen(fd, fh_mode)
@@ -163,33 +160,10 @@ def release_fh(self, path):
def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try
if nattempts <= 1:
return ""
return ''
attempts = spack.util.string.plural(nattempts, "attempt")
return " after {0:0.2f}s and {1}".format(wait_time, attempts)
class LockType(object):
READ = 0
WRITE = 1
@staticmethod
def to_str(tid):
ret = "READ"
if tid == LockType.WRITE:
ret = "WRITE"
return ret
@staticmethod
def to_module(tid):
lock = fcntl.LOCK_SH
if tid == LockType.WRITE:
lock = fcntl.LOCK_EX
return lock
@staticmethod
def is_valid(op):
return op == LockType.READ or op == LockType.WRITE
attempts = spack.util.string.plural(nattempts, 'attempt')
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
class Lock(object):
@@ -207,7 +181,8 @@ class Lock(object):
overlapping byte ranges in the same file).
"""
def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, desc=""):
def __init__(self, path, start=0, length=0, default_timeout=None,
debug=False, desc=''):
"""Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally,
@@ -242,7 +217,7 @@ def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, d
self.debug = debug
# optional debug description
self.desc = " ({0})".format(desc) if desc else ""
self.desc = ' ({0})'.format(desc) if desc else ''
# If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the
@@ -279,17 +254,17 @@ def _poll_interval_generator(_wait_times=None):
def __repr__(self):
"""Formal representation of the lock."""
rep = "{0}(".format(self.__class__.__name__)
rep = '{0}('.format(self.__class__.__name__)
for attr, value in self.__dict__.items():
rep += "{0}={1}, ".format(attr, value.__repr__())
return "{0})".format(rep.strip(", "))
rep += '{0}={1}, '.format(attr, value.__repr__())
return '{0})'.format(rep.strip(', '))
def __str__(self):
"""Readable string (with key fields) of the lock."""
location = "{0}[{1}:{2}]".format(self.path, self._start, self._length)
timeout = "timeout={0}".format(self.default_timeout)
activity = "#reads={0}, #writes={1}".format(self._reads, self._writes)
return "({0}, {1}, {2})".format(location, timeout, activity)
location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
timeout = 'timeout={0}'.format(self.default_timeout)
activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
return '({0}, {1}, {2})'.format(location, timeout, activity)
def _lock(self, op, timeout=None):
"""This takes a lock using POSIX locks (``fcntl.lockf``).
@@ -301,10 +276,9 @@ def _lock(self, op, timeout=None):
successfully acquired, the total wait time and the number of attempts
is returned.
"""
assert LockType.is_valid(op)
op_str = LockType.to_str(op)
assert op in lock_type
self._log_acquiring("{0} LOCK".format(op_str))
self._log_acquiring('{0} LOCK'.format(lock_type[op].upper()))
timeout = timeout or self.default_timeout
# Create file and parent directories if they don't exist.
@@ -312,16 +286,14 @@ def _lock(self, op, timeout=None):
self._ensure_parent_directory()
self._file = file_tracker.get_fh(self.path)
if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == "r":
if op == fcntl.LOCK_EX and self._file.mode == 'r':
# Attempt to upgrade to write lock w/a read-only file.
# If the file were writable, we'd have opened it 'r+'
raise LockROFileError(self.path)
self._log_debug(
"{0} locking [{1}:{2}]: timeout {3} sec".format(
op_str.lower(), self._start, self._length, timeout
)
)
self._log_debug("{0} locking [{1}:{2}]: timeout {3} sec"
.format(lock_type[op], self._start, self._length,
timeout))
poll_intervals = iter(Lock._poll_interval_generator())
start_time = time.time()
@@ -340,31 +312,30 @@ def _lock(self, op, timeout=None):
total_wait_time = time.time() - start_time
return total_wait_time, num_attempts
raise LockTimeoutError("Timed out waiting for a {0} lock.".format(op_str.lower()))
raise LockTimeoutError("Timed out waiting for a {0} lock."
.format(lock_type[op]))
def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether
the locking attempt succeeds
"""
module_op = LockType.to_module(op)
assert op in lock_type
try:
# Try to get the lock (will raise if not available.)
fcntl.lockf(
self._file, module_op | fcntl.LOCK_NB, self._length, self._start, os.SEEK_SET
)
fcntl.lockf(self._file, op | fcntl.LOCK_NB,
self._length, self._start, os.SEEK_SET)
# help for debugging distributed locking
if self.debug:
# All locks read the owner PID and host
self._read_log_debug_data()
self._log_debug(
"{0} locked {1} [{2}:{3}] (owner={4})".format(
LockType.to_str(op), self.path, self._start, self._length, self.pid
)
)
self._log_debug('{0} locked {1} [{2}:{3}] (owner={4})'
.format(lock_type[op], self.path,
self._start, self._length, self.pid))
# Exclusive locks write their PID/host
if module_op == fcntl.LOCK_EX:
if op == fcntl.LOCK_EX:
self._write_log_debug_data()
return True
@@ -381,13 +352,14 @@ def _ensure_parent_directory(self):
# relative paths to lockfiles in the current directory have no parent
if not parent:
return "."
return '.'
try:
os.makedirs(parent)
except OSError as e:
# makedirs can fail when diretory already exists.
if not (e.errno == errno.EEXIST and os.path.isdir(parent) or e.errno == errno.EISDIR):
if not (e.errno == errno.EEXIST and os.path.isdir(parent) or
e.errno == errno.EISDIR):
raise
return parent
@@ -398,9 +370,9 @@ def _read_log_debug_data(self):
line = self._file.read()
if line:
pid, host = line.strip().split(",")
_, _, self.pid = pid.rpartition("=")
_, _, self.host = host.rpartition("=")
pid, host = line.strip().split(',')
_, _, self.pid = pid.rpartition('=')
_, _, self.host = host.rpartition('=')
self.pid = int(self.pid)
def _write_log_debug_data(self):
@@ -425,7 +397,8 @@ def _unlock(self):
be masquerading as write locks, but this removes either.
"""
fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET)
fcntl.lockf(self._file, fcntl.LOCK_UN,
self._length, self._start, os.SEEK_SET)
file_tracker.release_fh(self.path)
self._file = None
@@ -447,10 +420,10 @@ def acquire_read(self, timeout=None):
if self._reads == 0 and self._writes == 0:
# can raise LockError.
wait_time, nattempts = self._lock(LockType.READ, timeout=timeout)
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
self._reads += 1
# Log if acquired, which includes counts when verbose
self._log_acquired("READ LOCK", wait_time, nattempts)
self._log_acquired('READ LOCK', wait_time, nattempts)
return True
else:
# Increment the read count for nested lock tracking
@@ -472,10 +445,10 @@ def acquire_write(self, timeout=None):
if self._writes == 0:
# can raise LockError.
wait_time, nattempts = self._lock(LockType.WRITE, timeout=timeout)
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
self._writes += 1
# Log if acquired, which includes counts when verbose
self._log_acquired("WRITE LOCK", wait_time, nattempts)
self._log_acquired('WRITE LOCK', wait_time, nattempts)
# return True only if we weren't nested in a read lock.
# TODO: we may need to return two values: whether we got
@@ -516,7 +489,7 @@ def downgrade_write_to_read(self, timeout=None):
if self._writes == 1 and self._reads == 0:
self._log_downgrading()
# can raise LockError.
wait_time, nattempts = self._lock(LockType.READ, timeout=timeout)
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
self._reads = 1
self._writes = 0
self._log_downgraded(wait_time, nattempts)
@@ -535,7 +508,7 @@ def upgrade_read_to_write(self, timeout=None):
if self._reads == 1 and self._writes == 0:
self._log_upgrading()
# can raise LockError.
wait_time, nattempts = self._lock(LockType.WRITE, timeout=timeout)
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
self._reads = 0
self._writes = 1
self._log_upgraded(wait_time, nattempts)
@@ -562,7 +535,7 @@ def release_read(self, release_fn=None):
"""
assert self._reads > 0
locktype = "READ LOCK"
locktype = 'READ LOCK'
if self._reads == 1 and self._writes == 0:
self._log_releasing(locktype)
@@ -570,7 +543,7 @@ def release_read(self, release_fn=None):
release_fn = release_fn or true_fn
result = release_fn()
self._unlock() # can raise LockError.
self._unlock() # can raise LockError.
self._reads = 0
self._log_released(locktype)
return result
@@ -598,14 +571,14 @@ def release_write(self, release_fn=None):
assert self._writes > 0
release_fn = release_fn or true_fn
locktype = "WRITE LOCK"
locktype = 'WRITE LOCK'
if self._writes == 1 and self._reads == 0:
self._log_releasing(locktype)
# we need to call release_fn before releasing the lock
result = release_fn()
self._unlock() # can raise LockError.
self._unlock() # can raise LockError.
self._writes = 0
self._log_released(locktype)
return result
@@ -619,62 +592,57 @@ def release_write(self, release_fn=None):
else:
return False
def cleanup(self):
if self._reads == 0 and self._writes == 0:
os.unlink(self.path)
else:
raise LockError("Attempting to cleanup active lock.")
def _get_counts_desc(self):
return (
"(reads {0}, writes {1})".format(self._reads, self._writes) if tty.is_verbose() else ""
)
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
if tty.is_verbose() else ''
def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Acquired at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, "{0}{1}".format(desc, attempts_part)))
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, '{0}{1}'
.format(desc, attempts_part)))
def _log_acquiring(self, locktype):
self._log_debug(self._status_msg(locktype, "Acquiring"), level=3)
self._log_debug(self._status_msg(locktype, 'Acquiring'), level=3)
def _log_debug(self, *args, **kwargs):
"""Output lock debug messages."""
kwargs["level"] = kwargs.get("level", 2)
kwargs['level'] = kwargs.get('level', 2)
tty.debug(*args, **kwargs)
def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Downgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("READ LOCK", "{0}{1}".format(desc, attempts_part)))
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
def _log_downgrading(self):
self._log_debug(self._status_msg("WRITE LOCK", "Downgrading"), level=3)
self._log_debug(self._status_msg('WRITE LOCK', 'Downgrading'), level=3)
def _log_released(self, locktype):
now = datetime.now()
desc = "Released at %s" % now.strftime("%H:%M:%S.%f")
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype):
self._log_debug(self._status_msg(locktype, "Releasing"), level=3)
self._log_debug(self._status_msg(locktype, 'Releasing'), level=3)
def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Upgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("WRITE LOCK", "{0}{1}".format(desc, attempts_part)))
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
def _log_upgrading(self):
self._log_debug(self._status_msg("READ LOCK", "Upgrading"), level=3)
self._log_debug(self._status_msg('READ LOCK', 'Upgrading'), level=3)
def _status_msg(self, locktype, status):
status_desc = "[{0}] {1}".format(status, self._get_counts_desc())
return "{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}".format(
locktype, self, status_desc
)
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
locktype, self, status_desc)
class LockTransaction(object):
@@ -715,7 +683,7 @@ def __init__(self, lock, acquire=None, release=None, timeout=None):
def __enter__(self):
if self._enter() and self._acquire_fn:
self._as = self._acquire_fn()
if hasattr(self._as, "__enter__"):
if hasattr(self._as, '__enter__'):
return self._as.__enter__()
else:
return self._as
@@ -727,7 +695,7 @@ def release_fn():
if self._release_fn is not None:
return self._release_fn(type, value, traceback)
if self._as and hasattr(self._as, "__exit__"):
if self._as and hasattr(self._as, '__exit__'):
if self._as.__exit__(type, value, traceback):
suppress = True
@@ -739,7 +707,6 @@ def release_fn():
class ReadTransaction(LockTransaction):
"""LockTransaction context manager that does a read and releases it."""
def _enter(self):
return self._lock.acquire_read(self._timeout)
@@ -749,7 +716,6 @@ def _exit(self, release_fn):
class WriteTransaction(LockTransaction):
"""LockTransaction context manager that does a write and releases it."""
def _enter(self):
return self._lock.acquire_write(self._timeout)
@@ -763,7 +729,6 @@ class LockError(Exception):
class LockDowngradeError(LockError):
"""Raised when unable to downgrade from a write to a read lock."""
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super(LockDowngradeError, self).__init__(msg)
@@ -779,7 +744,6 @@ class LockTimeoutError(LockError):
class LockUpgradeError(LockError):
"""Raised when unable to upgrade from a read to a write lock."""
def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path
super(LockUpgradeError, self).__init__(msg)
@@ -791,7 +755,6 @@ class LockPermissionError(LockError):
class LockROFileError(LockPermissionError):
"""Tried to take an exclusive lock on a read-only file."""
def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path
super(LockROFileError, self).__init__(msg)
@@ -799,7 +762,6 @@ def __init__(self, path):
class CantCreateLockError(LockPermissionError):
"""Attempt to create a lock in an unwritable location."""
def __init__(self, path):
msg = "cannot create lock '%s': " % path
msg += "file does not exist and location is not writable"

View File

@@ -10,7 +10,7 @@
"""
from multiprocessing import Semaphore, Value
__all__ = ["Barrier"]
__all__ = ['Barrier']
class Barrier:
@@ -24,7 +24,7 @@ class Barrier:
def __init__(self, n, timeout=None):
self.n = n
self.to = timeout
self.count = Value("i", 0)
self.count = Value('i', 0)
self.mutex = Semaphore(1)
self.turnstile1 = Semaphore(0)
self.turnstile2 = Semaphore(1)

View File

@@ -1,112 +0,0 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import os
import shutil
import tempfile
from os.path import exists, join
from sys import platform as _platform
from llnl.util import lang
is_windows = _platform == "win32"
if is_windows:
from win32file import CreateHardLink
def symlink(real_path, link_path):
"""
Create a symbolic link.
On Windows, use junctions if os.symlink fails.
"""
if not is_windows or _win32_can_symlink():
os.symlink(real_path, link_path)
else:
try:
# Try to use junctions
_win32_junction(real_path, link_path)
except OSError:
# If all else fails, fall back to copying files
shutil.copyfile(real_path, link_path)
def islink(path):
return os.path.islink(path) or _win32_is_junction(path)
# '_win32' functions based on
# https://github.com/Erotemic/ubelt/blob/master/ubelt/util_links.py
def _win32_junction(path, link):
# junctions require absolute paths
if not os.path.isabs(link):
link = os.path.abspath(link)
# os.symlink will fail if link exists, emulate the behavior here
if exists(link):
raise OSError(errno.EEXIST, "File exists: %s -> %s" % (link, path))
if not os.path.isabs(path):
parent = os.path.join(link, os.pardir)
path = os.path.join(parent, path)
path = os.path.abspath(path)
CreateHardLink(link, path)
@lang.memoized
def _win32_can_symlink():
tempdir = tempfile.mkdtemp()
dpath = join(tempdir, "dpath")
fpath = join(tempdir, "fpath.txt")
dlink = join(tempdir, "dlink")
flink = join(tempdir, "flink.txt")
import llnl.util.filesystem as fs
fs.touchp(fpath)
try:
os.symlink(dpath, dlink)
can_symlink_directories = os.path.islink(dlink)
except OSError:
can_symlink_directories = False
try:
os.symlink(fpath, flink)
can_symlink_files = os.path.islink(flink)
except OSError:
can_symlink_files = False
# Cleanup the test directory
shutil.rmtree(tempdir)
return can_symlink_directories and can_symlink_files
def _win32_is_junction(path):
"""
Determines if a path is a win32 junction
"""
if os.path.islink(path):
return False
if is_windows:
import ctypes.wintypes
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
res = GetFileAttributes(path)
return res != INVALID_FILE_ATTRIBUTES and bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
return False

View File

@@ -6,22 +6,19 @@
from __future__ import unicode_literals
import contextlib
import fcntl
import os
import struct
import sys
import termios
import textwrap
import traceback
from datetime import datetime
from sys import platform as _platform
import six
from six import StringIO
from six.moves import input
if _platform != "win32":
import fcntl
import termios
from llnl.util.tty.color import cescape, clen, cprint, cwrite
# Globals
@@ -54,7 +51,7 @@ def is_stacktrace():
def set_debug(level=0):
global _debug
assert level >= 0, "Debug level must be a positive value"
assert level >= 0, 'Debug level must be a positive value'
_debug = level
@@ -110,7 +107,10 @@ def output_filter(filter_fn):
class SuppressOutput:
"""Class for disabling output in a scope using 'with' keyword"""
def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True):
def __init__(self,
msg_enabled=True,
warn_enabled=True,
error_enabled=True):
self._msg_enabled_initial = _msg_enabled
self._warn_enabled_initial = _warn_enabled
@@ -143,7 +143,7 @@ def process_stacktrace(countback):
file_list = []
for frame in st:
# Check that the file is a spack file
if frame[0].find(os.path.sep + "spack") >= 0:
if frame[0].find("/spack") >= 0:
file_list.append(frame[0])
# We use commonprefix to find what the spack 'root' directory is.
root_dir = os.path.commonprefix(file_list)
@@ -161,10 +161,11 @@ def get_timestamp(force=False):
"""Get a string timestamp"""
if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds.
pid = ", {0}".format(os.getpid()) if show_pid() else ""
return "[{0}{1}] ".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
pid = ', {0}'.format(os.getpid()) if show_pid() else ''
return '[{0}{1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
else:
return ""
return ''
def msg(message, *args, **kwargs):
@@ -174,14 +175,26 @@ def msg(message, *args, **kwargs):
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
newline = kwargs.get("newline", True)
newline = kwargs.get('newline', True)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(2)
if newline:
cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cprint(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cwrite(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
for arg in args:
print(indent + _output_filter(six.text_type(arg)))
@@ -190,19 +203,23 @@ def info(message, *args, **kwargs):
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
format = kwargs.get("format", "*b")
stream = kwargs.get("stream", sys.stdout)
wrap = kwargs.get("wrap", False)
break_long_words = kwargs.get("break_long_words", False)
st_countback = kwargs.get("countback", 3)
format = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
break_long_words = kwargs.get('break_long_words', False)
st_countback = kwargs.get('countback', 3)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(st_countback)
cprint(
"@%s{%s==>} %s%s"
% (format, st_text, get_timestamp(), cescape(_output_filter(six.text_type(message)))),
stream=stream,
"@%s{%s==>} %s%s" % (
format,
st_text,
get_timestamp(),
cescape(_output_filter(six.text_type(message)))
),
stream=stream
)
for arg in args:
if wrap:
@@ -210,25 +227,27 @@ def info(message, *args, **kwargs):
_output_filter(six.text_type(arg)),
initial_indent=indent,
subsequent_indent=indent,
break_long_words=break_long_words,
break_long_words=break_long_words
)
for line in lines:
stream.write(line + "\n")
stream.write(line + '\n')
else:
stream.write(indent + _output_filter(six.text_type(arg)) + "\n")
stream.write(
indent + _output_filter(six.text_type(arg)) + '\n'
)
def verbose(message, *args, **kwargs):
if _verbose:
kwargs.setdefault("format", "c")
kwargs.setdefault('format', 'c')
info(message, *args, **kwargs)
def debug(message, *args, **kwargs):
level = kwargs.get("level", 1)
level = kwargs.get('level', 1)
if is_debug(level):
kwargs.setdefault("format", "g")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)
@@ -236,8 +255,8 @@ def error(message, *args, **kwargs):
if not error_enabled():
return
kwargs.setdefault("format", "*r")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', '*r')
kwargs.setdefault('stream', sys.stderr)
info("Error: " + six.text_type(message), *args, **kwargs)
@@ -245,27 +264,27 @@ def warn(message, *args, **kwargs):
if not warn_enabled():
return
kwargs.setdefault("format", "*Y")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', '*Y')
kwargs.setdefault('stream', sys.stderr)
info("Warning: " + six.text_type(message), *args, **kwargs)
def die(message, *args, **kwargs):
kwargs.setdefault("countback", 4)
kwargs.setdefault('countback', 4)
error(message, *args, **kwargs)
sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
default = kwargs.get('default', None)
abort = kwargs.get('abort', None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
prompt += ' (default is %s) ' % default
elif abort is not None:
prompt += " (%s to abort) " % abort
prompt += ' (%s to abort) ' % abort
number = None
while number is None:
@@ -288,16 +307,17 @@ def get_number(prompt, **kwargs):
def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None)
default_value = kwargs.get('default', None)
if default_value is None:
prompt += " [y/n] "
prompt += ' [y/n] '
elif default_value is True:
prompt += " [Y/n] "
prompt += ' [Y/n] '
elif default_value is False:
prompt += " [y/N] "
prompt += ' [y/N] '
else:
raise ValueError("default for get_yes_no() must be True, False, or None.")
raise ValueError(
"default for get_yes_no() must be True, False, or None.")
result = None
while result is None:
@@ -308,9 +328,9 @@ def get_yes_or_no(prompt, **kwargs):
if result is None:
print("Please enter yes or no.")
else:
if ans == "y" or ans == "yes":
if ans == 'y' or ans == 'yes':
result = True
elif ans == "n" or ans == "no":
elif ans == 'n' or ans == 'no':
result = False
return result
@@ -322,12 +342,12 @@ def hline(label=None, **kwargs):
char (str): Char to draw the line with. Default '-'
max_width (int): Maximum width of the line. Default is 64 chars.
"""
char = kwargs.pop("char", "-")
max_width = kwargs.pop("max_width", 64)
char = kwargs.pop('char', '-')
max_width = kwargs.pop('max_width', 64)
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(kwargs.iterkeys())
)
"'%s' is an invalid keyword argument for this function."
% next(kwargs.iterkeys()))
rows, cols = terminal_size()
if not cols:
@@ -350,32 +370,22 @@ def hline(label=None, **kwargs):
def terminal_size():
"""Gets the dimensions of the console: (rows, cols)."""
if _platform != "win32":
def ioctl_gwinsz(fd):
try:
rc = struct.unpack('hh', fcntl.ioctl(
fd, termios.TIOCGWINSZ, '1234'))
except BaseException:
return
return rc
rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_gwinsz(fd)
os.close(fd)
except BaseException:
pass
if not rc:
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
def ioctl_gwinsz(fd):
try:
rc = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
except BaseException:
return
return rc
rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_gwinsz(fd)
os.close(fd)
except BaseException:
pass
if not rc:
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
return int(rc[0]), int(rc[1])
else:
if sys.version_info[0] < 3:
raise RuntimeError(
"Terminal size not obtainable on Windows with a\
Python version older than 3"
)
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
return int(rc[0]), int(rc[1])
return int(rc[0]), int(rc[1])

View File

@@ -18,27 +18,29 @@
class ColumnConfig:
def __init__(self, cols):
self.cols = cols
self.line_length = 0
self.valid = True
self.widths = [0] * cols # does not include ansi colors
self.widths = [0] * cols # does not include ansi colors
def __repr__(self):
attrs = [(a, getattr(self, a)) for a in dir(self) if not a.startswith("__")]
attrs = [(a, getattr(self, a))
for a in dir(self) if not a.startswith("__")]
return "<Config: %s>" % ", ".join("%s: %r" % a for a in attrs)
def config_variable_cols(elts, console_width, padding, cols=0):
"""Variable-width column fitting algorithm.
This function determines the most columns that can fit in the
screen width. Unlike uniform fitting, where all columns take
the width of the longest element in the list, each column takes
the width of its own longest element. This packs elements more
efficiently on screen.
This function determines the most columns that can fit in the
screen width. Unlike uniform fitting, where all columns take
the width of the longest element in the list, each column takes
the width of its own longest element. This packs elements more
efficiently on screen.
If cols is nonzero, force
If cols is nonzero, force
"""
if cols < 0:
raise ValueError("cols must be non-negative.")
@@ -62,8 +64,8 @@ def config_variable_cols(elts, console_width, padding, cols=0):
if conf.widths[col] < (length + p):
conf.line_length += length + p - conf.widths[col]
conf.widths[col] = length + p
conf.valid = conf.line_length < console_width
conf.widths[col] = length + p
conf.valid = (conf.line_length < console_width)
try:
config = next(conf for conf in reversed(configs) if conf.valid)
@@ -79,9 +81,9 @@ def config_variable_cols(elts, console_width, padding, cols=0):
def config_uniform_cols(elts, console_width, padding, cols=0):
"""Uniform-width column fitting algorithm.
Determines the longest element in the list, and determines how
many columns of that width will fit on screen. Returns a
corresponding column config.
Determines the longest element in the list, and determines how
many columns of that width will fit on screen. Returns a
corresponding column config.
"""
if cols < 0:
raise ValueError("cols must be non-negative.")
@@ -120,18 +122,18 @@ def colify(elts, **options):
and fit less data on the screen
"""
# Get keyword arguments or set defaults
cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0)
padding = options.pop("padding", 2)
tty = options.pop("tty", None)
method = options.pop("method", "variable")
cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0)
padding = options.pop("padding", 2)
tty = options.pop('tty', None)
method = options.pop("method", "variable")
console_cols = options.pop("width", None)
if options:
raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(options.iterkeys())
)
"'%s' is an invalid keyword argument for this function."
% next(options.iterkeys()))
# elts needs to be an array of strings so we can count the elements
elts = [text_type(elt) for elt in elts]
@@ -139,10 +141,10 @@ def colify(elts, **options):
return (0, ())
# environment size is of the form "<rows>x<cols>"
env_size = os.environ.get("COLIFY_SIZE")
env_size = os.environ.get('COLIFY_SIZE')
if env_size:
try:
r, c = env_size.split("x")
r, c = env_size.split('x')
console_rows, console_cols = int(r), int(c)
tty = True
except BaseException:
@@ -178,7 +180,7 @@ def colify(elts, **options):
elt = col * rows + row
width = config.widths[col] + cextra(elts[elt])
if col < cols - 1:
fmt = "%%-%ds" % width
fmt = '%%-%ds' % width
output.write(fmt % elts[elt])
else:
# Don't pad the rightmost column (sapces can wrap on
@@ -196,15 +198,15 @@ def colify(elts, **options):
def colify_table(table, **options):
"""Version of ``colify()`` for data expressed in rows, (list of lists).
Same as regular colify but:
Same as regular colify but:
1. This takes a list of lists, where each sub-list must be the
same length, and each is interpreted as a row in a table.
Regular colify displays a sequential list of values in columns.
1. This takes a list of lists, where each sub-list must be the
same length, and each is interpreted as a row in a table.
Regular colify displays a sequential list of values in columns.
2. Regular colify will always print with 1 column when the output
is not a tty. This will always print with same dimensions of
the table argument.
2. Regular colify will always print with 1 column when the output
is not a tty. This will always print with same dimensions of
the table argument.
"""
if table is None:
@@ -219,20 +221,20 @@ def transpose():
for row in table:
yield row[i]
if "cols" in options:
if 'cols' in options:
raise ValueError("Cannot override columsn in colify_table.")
options["cols"] = columns
options['cols'] = columns
# don't reduce to 1 column for non-tty
options["tty"] = True
options['tty'] = True
colify(transpose(), **options)
def colified(elts, **options):
"""Invokes the ``colify()`` function but returns the result as a string
instead of writing it to an output string."""
instead of writing it to an output string."""
sio = StringIO()
options["output"] = sio
options['output'] = sio
colify(elts, **options)
return sio.getvalue()

View File

@@ -76,33 +76,29 @@ def __init__(self, message):
# Text styles for ansi codes
styles = {"*": "1", "_": "4", None: "0"} # bold # underline # plain
styles = {'*': '1', # bold
'_': '4', # underline
None: '0'} # plain
# Dim and bright ansi colors
colors = {
"k": 30,
"K": 90, # black
"r": 31,
"R": 91, # red
"g": 32,
"G": 92, # green
"y": 33,
"Y": 93, # yellow
"b": 34,
"B": 94, # blue
"m": 35,
"M": 95, # magenta
"c": 36,
"C": 96, # cyan
"w": 37,
"W": 97,
} # white
colors = {'k': 30, 'K': 90, # black
'r': 31, 'R': 91, # red
'g': 32, 'G': 92, # green
'y': 33, 'Y': 93, # yellow
'b': 34, 'B': 94, # blue
'm': 35, 'M': 95, # magenta
'c': 36, 'C': 96, # cyan
'w': 37, 'W': 97} # white
# Regex to be used for color formatting
color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)"
color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)'
# Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False}
color_when_values = {
'always': True,
'auto': None,
'never': False
}
# Force color; None: Only color if stdout is a tty
# True: Always colorize output, False: Never colorize output
@@ -118,7 +114,7 @@ def _color_when_value(when):
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
raise ValueError('Invalid color setting: %s' % when)
return when
@@ -150,6 +146,7 @@ def color_when(value):
class match_to_ansi(object):
def __init__(self, color=True):
self.color = _color_when_value(color)
@@ -158,7 +155,7 @@ def escape(self, s):
if self.color:
return "\033[%sm" % s
else:
return ""
return ''
def __call__(self, match):
"""Convert a match object generated by ``color_re`` into an ansi
@@ -167,22 +164,22 @@ def __call__(self, match):
style, color, text = match.groups()
m = match.group(0)
if m == "@@":
return "@"
elif m == "@.":
if m == '@@':
return '@'
elif m == '@.':
return self.escape(0)
elif m == "@":
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string))
elif m == '@':
raise ColorParseError("Incomplete color format: '%s' in %s"
% (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError(
"Invalid color specifier: '%s' in '%s'" % (color, match.string)
)
string += ";" + str(colors[color])
raise ColorParseError("Invalid color specifier: '%s' in '%s'"
% (color, match.string))
string += ';' + str(colors[color])
colored_text = ""
colored_text = ''
if text:
colored_text = text + self.escape(0)
@@ -202,28 +199,28 @@ def colorize(string, **kwargs):
color (bool): If False, output will be plain text without control
codes, for output to non-console devices.
"""
color = _color_when_value(kwargs.get("color", get_color_when()))
color = _color_when_value(kwargs.get('color', get_color_when()))
string = re.sub(color_re, match_to_ansi(color), string)
string = string.replace("}}", "}")
string = string.replace('}}', '}')
return string
def clen(string):
"""Return the length of a string, excluding ansi color sequences."""
return len(re.sub(r"\033[^m]*m", "", string))
return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string):
"""Length of extra color characters in a string"""
return len("".join(re.findall(r"\033[^m]*m", string)))
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=None, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
codes and write the result to the stream. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
"""
stream = sys.stdout if stream is None else stream
if color is None:
@@ -254,19 +251,20 @@ def cescape(string):
(str): the string with color codes escaped
"""
string = six.text_type(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
string = string.replace('@', '@@')
string = string.replace('}', '}}')
return string
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color
def write(self, string, **kwargs):
raw = kwargs.get("raw", False)
raw_write = getattr(self._stream, "write")
raw = kwargs.get('raw', False)
raw_write = getattr(self._stream, 'write')
color = self._color
if self._color is None:
@@ -277,6 +275,6 @@ def write(self, string, **kwargs):
raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs):
raw = kwargs.get("raw", False)
raw = kwargs.get('raw', False)
for string in sequence:
self.write(string, self.color, raw=raw)

View File

@@ -8,19 +8,15 @@
from __future__ import unicode_literals
import atexit
import ctypes
import errno
import io
import multiprocessing
import os
import re
import select
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from threading import Thread
from types import ModuleType # novm
from typing import Optional # novm
@@ -31,22 +27,21 @@
termios = None # type: Optional[ModuleType]
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
# Use this to strip escape sequences
_escape = re.compile(r"\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07")
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07')
# control characters for enabling/disabling echo
#
# We use control characters to ensure that echo enable/disable are inline
# with the other output. We always follow these with a newline to ensure
# one per line the following newline is ignored in output.
xon, xoff = "\x11\n", "\x13\n"
control = re.compile("(\x11\n|\x13\n)")
xon, xoff = '\x11\n', '\x13\n'
control = re.compile('(\x11\n|\x13\n)')
@contextmanager
@@ -60,13 +55,17 @@ def ignore_signal(signum):
def _is_background_tty(stream):
"""True if the stream is a tty and calling process is in the background."""
return stream.isatty() and os.getpgrp() != os.tcgetpgrp(stream.fileno())
"""True if the stream is a tty and calling process is in the background.
"""
return (
stream.isatty() and
os.getpgrp() != os.tcgetpgrp(stream.fileno())
)
def _strip(line):
"""Strip color and control characters from a line."""
return _escape.sub("", line)
return _escape.sub('', line)
class keyboard_input(object):
@@ -144,7 +143,6 @@ class keyboard_input(object):
a TTY, ``keyboard_input`` has no effect.
"""
def __init__(self, stream):
"""Create a context manager that will enable keyboard input on stream.
@@ -202,7 +200,7 @@ def check_fg_bg(self):
bg = self._is_background()
# restore sanity if flags are amiss -- see diagram in class docs
if not bg and any(flags): # fg, but input not enabled
if not bg and any(flags): # fg, but input not enabled
self._enable_keyboard_input()
elif bg and not all(flags): # bg, but input enabled
self._restore_default_terminal_settings()
@@ -226,7 +224,8 @@ def __enter__(self):
# Install a signal handler to disable/enable keyboard input
# when the process moves between foreground and background.
self.old_handlers[signal.SIGTSTP] = signal.signal(signal.SIGTSTP, self._tstp_handler)
self.old_handlers[signal.SIGTSTP] = signal.signal(
signal.SIGTSTP, self._tstp_handler)
# add an atexit handler to ensure the terminal is restored
atexit.register(self._restore_default_terminal_settings)
@@ -255,7 +254,6 @@ class Unbuffered(object):
This is implemented by forcing a flush after each write.
"""
def __init__(self, stream):
self.stream = stream
@@ -300,7 +298,6 @@ class FileWrapper(object):
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
"""
def __init__(self, file_like):
# This records whether the file-like object returned by "unwrap" is
# purely in-memory. In that case a subprocess will need to explicitly
@@ -324,9 +321,9 @@ def unwrap(self):
if self.open:
if self.file_like:
if sys.version_info < (3,):
self.file = open(self.file_like, "w")
self.file = open(self.file_like, 'w')
else:
self.file = open(self.file_like, "w", encoding="utf-8") # novm
self.file = open(self.file_like, 'w', encoding='utf-8') # novm
else:
self.file = StringIO()
return self.file
@@ -342,9 +339,8 @@ def close(self):
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
@@ -403,7 +399,7 @@ def replace_environment(env):
os.environ[name] = val
def log_output(*args, **kwargs):
class log_output(object):
"""Context manager that logs its output to a file.
In the simplest case, the usage looks like this::
@@ -418,7 +414,6 @@ def log_output(*args, **kwargs):
with log_output('logfile.txt', echo=True):
# do things ... output will be logged and printed out
The following is available on Unix only. No-op on Windows.
And, if you just want to echo *some* stuff from the parent, use
``force_echo``::
@@ -428,20 +423,6 @@ def log_output(*args, **kwargs):
with logger.force_echo():
# things here will be echoed *and* logged
See individual log classes for more information.
This method is actually a factory serving a per platform
(unix vs windows) log_output class
"""
if sys.platform == "win32":
return winlog(*args, **kwargs)
else:
return nixlog(*args, **kwargs)
class nixlog(object):
"""
Under the hood, we spawn a daemon and set up a pipe between this
process and the daemon. The daemon writes our output to both the
file and to stdout (if echoing). The parent process can communicate
@@ -454,9 +435,8 @@ class nixlog(object):
work within test frameworks like nose and pytest.
"""
def __init__(
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
):
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
env=None, filter_fn=None):
"""Create a new output log context manager.
Args:
@@ -525,7 +505,8 @@ def __enter__(self):
raise RuntimeError("Can't re-enter the same log_output!")
if self.file_like is None:
raise RuntimeError("file argument must be set by either __init__ or __call__")
raise RuntimeError(
"file argument must be set by either __init__ or __call__")
# set up a stream for the daemon to write to
self.log_file = FileWrapper(self.file_like)
@@ -555,7 +536,9 @@ def __enter__(self):
input_multiprocess_fd = None
try:
if sys.stdin.isatty():
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
input_multiprocess_fd = MultiProcessFd(
os.dup(sys.stdin.fileno())
)
except BaseException:
# just don't forward input if this fails
pass
@@ -564,14 +547,9 @@ def __enter__(self):
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_multiprocess_fd,
read_multiprocess_fd,
write_fd,
self.echo,
self.log_file,
child_pipe,
self.filter_fn,
),
input_multiprocess_fd, read_multiprocess_fd, write_fd,
self.echo, self.log_file, child_pipe, self.filter_fn
)
)
self.process.daemon = True # must set before start()
self.process.start()
@@ -586,7 +564,7 @@ def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
# Now do the actual output redirection.
# Now do the actual output rediction.
self.use_fds = _file_descriptors_work(sys.stdout, sys.stderr)
if self.use_fds:
# We try first to use OS-level file descriptors, as this
@@ -612,7 +590,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(write_fd, "w")
pipe_fd_out = os.fdopen(write_fd, 'w')
sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out
@@ -677,7 +655,8 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def force_echo(self):
"""Context manager to force local echo, even if echo is off."""
if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!")
raise RuntimeError(
"Can't call force_echo() outside log_output region!")
# This uses the xon/xoff to highlight regions to be echoed in the
# output. We us these control characters rather than, say, a
@@ -692,186 +671,8 @@ def force_echo(self):
sys.stdout.flush()
class StreamWrapper:
"""Wrapper class to handle redirection of io streams"""
def __init__(self, sys_attr):
self.sys_attr = sys_attr
self.saved_stream = None
if sys.platform.startswith("win32"):
if sys.version_info < (3, 5):
libc = ctypes.CDLL(ctypes.util.find_library("c"))
else:
if hasattr(sys, "gettotalrefcount"): # debug build
libc = ctypes.CDLL("ucrtbased")
else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0")
kernel32 = ctypes.WinDLL("kernel32")
# https://docs.microsoft.com/en-us/windows/console/getstdhandle
if self.sys_attr == "stdout":
STD_HANDLE = -11
elif self.sys_attr == "stderr":
STD_HANDLE = -12
else:
raise KeyError(self.sys_attr)
c_stdout = kernel32.GetStdHandle(STD_HANDLE)
self.libc = libc
self.c_stream = c_stdout
else:
self.libc = ctypes.CDLL(None)
self.c_stream = ctypes.c_void_p.in_dll(self.libc, self.sys_attr)
self.sys_stream = getattr(sys, self.sys_attr)
self.orig_stream_fd = self.sys_stream.fileno()
# Save a copy of the original stdout fd in saved_stream
self.saved_stream = os.dup(self.orig_stream_fd)
def redirect_stream(self, to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stream
if sys.platform.startswith("win32"):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
# Flush and close sys_stream - also closes the file descriptor (fd)
sys_stream = getattr(sys, self.sys_attr)
sys_stream.flush()
sys_stream.close()
# Make orig_stream_fd point to the same file as to_fd
os.dup2(to_fd, self.orig_stream_fd)
# Set sys_stream to a new stream that points to the redirected fd
new_buffer = open(self.orig_stream_fd, "wb")
new_stream = io.TextIOWrapper(new_buffer)
setattr(sys, self.sys_attr, new_stream)
self.sys_stream = getattr(sys, self.sys_attr)
def flush(self):
if sys.platform.startswith("win32"):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
self.sys_stream.flush()
def close(self):
"""Redirect back to the original system stream, and close stream"""
try:
if self.saved_stream is not None:
self.redirect_stream(self.saved_stream)
finally:
if self.saved_stream is not None:
os.close(self.saved_stream)
class winlog(object):
"""
Similar to nixlog, with underlying
functionality ported to support Windows.
Does not support the use of 'v' toggling as nixlog does.
"""
def __init__(
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
):
self.env = env
self.debug = debug
self.echo = echo
self.logfile = file_like
self.stdout = StreamWrapper("stdout")
self.stderr = StreamWrapper("stderr")
self._active = False
self._ioflag = False
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
def __enter__(self):
if self._active:
raise RuntimeError("Can't re-enter the same log_output!")
if self.logfile is None:
raise RuntimeError("file argument must be set by __init__ ")
# Open both write and reading on logfile
if type(self.logfile) == StringIO:
self._ioflag = True
# cannot have two streams on tempfile, so we must make our own
sys.stdout = self.logfile
sys.stderr = self.logfile
else:
self.writer = open(self.logfile, mode="wb+")
self.reader = open(self.logfile, mode="rb+")
# Dup stdout so we can still write to it after redirection
self.echo_writer = open(os.dup(sys.stdout.fileno()), "w")
# Redirect stdout and stderr to write to logfile
self.stderr.redirect_stream(self.writer.fileno())
self.stdout.redirect_stream(self.writer.fileno())
self._kill = threading.Event()
def background_reader(reader, echo_writer, _kill):
# for each line printed to logfile, read it
# if echo: write line to user
try:
while True:
is_killed = _kill.wait(0.1)
# Flush buffered build output to file
# stdout/err fds refer to log file
self.stderr.flush()
self.stdout.flush()
line = reader.readline()
if self.echo and line:
echo_writer.write("{0}".format(line.decode()))
echo_writer.flush()
if is_killed:
break
finally:
reader.close()
self._active = True
with replace_environment(self.env):
self._thread = Thread(
target=background_reader, args=(self.reader, self.echo_writer, self._kill)
)
self._thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._ioflag:
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self._ioflag = False
else:
self.writer.close()
self.echo_writer.flush()
self.stdout.flush()
self.stderr.flush()
self._kill.set()
self._thread.join()
self.stdout.close()
self.stderr.close()
self._active = False
@contextmanager
def force_echo(self):
"""Context manager to force local echo, even if echo is off."""
if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!")
yield
def _writer_daemon(
stdin_multiprocess_fd,
read_multiprocess_fd,
write_fd,
echo,
log_file_wrapper,
control_pipe,
filter_fn,
):
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
log_file_wrapper, control_pipe, filter_fn):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
@@ -924,16 +725,16 @@ def _writer_daemon(
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != "darwin":
if sys.version_info < (3, 8) or sys.platform != 'darwin':
os.close(write_fd)
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
if sys.version_info < (3,):
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1)
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
else:
# Python 3.x before 3.7 does not open with UTF-8 encoding by default
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
@@ -942,7 +743,7 @@ def _writer_daemon(
# list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output
force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap()
@@ -965,7 +766,7 @@ def _writer_daemon(
# check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN):
try:
if stdin.read(1) == "v":
if stdin.read(1) == 'v':
echo = not echo
except IOError as e:
# If SIGTTIN is ignored, the system gives EIO
@@ -983,14 +784,14 @@ def _writer_daemon(
line = _retry(in_pipe.readline)()
except UnicodeDecodeError:
# installs like --test=root gpgme produce non-UTF8 logs
line = "<line lost: output was not encoded as UTF-8>\n"
line = '<line lost: output was not encoded as UTF-8>\n'
if not line:
return
line_count += 1
# find control characters and strip them.
clean_line, num_controls = control.subn("", line)
clean_line, num_controls = control.subn('', line)
# Echo to stdout if requested or forced.
if echo or force_echo:
@@ -1054,7 +855,6 @@ def _retry(function):
relevant for this file.
"""
def wrapped(*args, **kwargs):
while True:
try:
@@ -1067,7 +867,6 @@ def wrapped(*args, **kwargs):
if e.args[0] == errno.EINTR:
continue
raise
return wrapped

View File

@@ -11,7 +11,6 @@
things like timeouts in ``ProcessController.wait()``, which are set to
get tests done quickly, not to avoid high CPU usage.
Note: The functionality in this module is unsupported on Windows
"""
from __future__ import print_function
@@ -20,6 +19,7 @@
import re
import signal
import sys
import termios
import time
import traceback
@@ -27,14 +27,6 @@
from spack.util.executable import which
termios = None
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
@@ -43,8 +35,8 @@ class ProcessController(object):
minion) similar to the way a shell would, by sending signals and I/O.
"""
def __init__(self, pid, controller_fd, timeout=1, sleep_time=1e-1, debug=False):
def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
@@ -85,19 +77,18 @@ def get_canon_echo_attrs(self):
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
if self.debug:
sys.stderr.write("------------------------------------------- %s\n" % name)
sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self):
"""Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write(
"canon: %s, echo: %s\n"
% (
"on" if canon else "off",
"on" if echo else "off",
)
)
sys.stderr.write("canon: %s, echo: %s\n" % (
"on" if canon else "off",
"on" if echo else "off",
))
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n")
@@ -139,7 +130,7 @@ def write(self, byte_string):
def wait(self, condition):
start = time.time()
while ((time.time() - start) < self.timeout) and not condition():
while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2)
assert condition()
@@ -221,7 +212,6 @@ def minion_function(**kwargs)
|_________________________________________________________|
"""
def __init__(self, controller_function, minion_function):
self.proc = None
self.controller_function = controller_function
@@ -245,12 +235,8 @@ def start(self, **kwargs):
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function,
args=(
self.controller_function,
self.minion_function,
self.controller_timeout,
self.sleep_time,
),
args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
@@ -262,8 +248,7 @@ def join(self):
@staticmethod
def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs
):
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
"""Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
@@ -281,7 +266,8 @@ def _set_up_and_run_minion_function(
os.close(stdin_fd)
if kwargs.get("debug"):
sys.stderr.write("minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
@@ -295,15 +281,15 @@ def _set_up_and_run_minion_function(
@staticmethod
def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout, sleep_time, **kwargs
):
controller_function, minion_function, controller_timeout,
sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
``controller_function``.
"""
os.setsid() # new session; this process is the controller
os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd)
@@ -312,10 +298,11 @@ def _set_up_and_run_controller_function(
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value("i", False)
ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(), ready, minion_function),
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
kwargs=kwargs,
)
minion_process.start()
@@ -335,7 +322,8 @@ def _set_up_and_run_controller_function(
minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write("minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell
@@ -344,8 +332,7 @@ def _set_up_and_run_controller_function(
# call the controller function once the minion is ready
try:
controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug")
)
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs)

View File

@@ -3,11 +3,10 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: (major, minor, micro, dev release) tuple
spack_version_info = (0, 19, 0, "dev0")
#: major, minor, patch version for Spack, in a tuple
spack_version_info = (0, 17, 1)
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
spack_version = ".".join(str(s) for s in spack_version_info)
#: String containing Spack version joined with .'s
spack_version = '.'.join(str(v) for v in spack_version_info)
__all__ = ["spack_version_info", "spack_version"]
__version__ = spack_version
__all__ = ['spack_version_info', 'spack_version']

View File

@@ -15,25 +15,21 @@
class ABI(object):
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""
The current implementation is rather rough and could be improved."""
def architecture_compatible(self, target, constraint):
"""Return true if architecture of target spec is ABI compatible
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
as architecture ABI compatible to constraint."""
return (
not target.architecture
or not constraint.architecture
or target.architecture.satisfies(constraint.architecture)
)
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
as architecture ABI compatible to constraint."""
return not target.architecture or not constraint.architecture or \
target.architecture.satisfies(constraint.architecture)
@memoized
def _gcc_get_libstdcxx_version(self, version):
"""Returns gcc ABI compatibility info by getting the library version of
a compiler's libstdc++ or libgcc_s"""
a compiler's libstdc++ or libgcc_s"""
from spack.build_environment import dso_suffix
spec = CompilerSpec("gcc", version)
compilers = spack.compilers.compilers_for_spec(spec)
if not compilers:
@@ -54,7 +50,7 @@ def _gcc_get_libstdcxx_version(self, version):
# Some gcc's are actually clang and don't respond properly to
# --print-file-name (they just print the filename, not the
# full path). Ignore these and expect them to be handled as clang.
if Clang.default_version(rungcc.exe[0]) != "unknown":
if Clang.default_version(rungcc.exe[0]) != 'unknown':
return None
output = rungcc("--print-file-name=%s" % libname, output=str)
@@ -70,7 +66,7 @@ def _gcc_get_libstdcxx_version(self, version):
@memoized
def _gcc_compiler_compare(self, pversion, cversion):
"""Returns true iff the gcc version pversion and cversion
are ABI compatible."""
are ABI compatible."""
plib = self._gcc_get_libstdcxx_version(pversion)
clib = self._gcc_get_libstdcxx_version(cversion)
if not plib or not clib:
@@ -79,10 +75,10 @@ def _gcc_compiler_compare(self, pversion, cversion):
def _intel_compiler_compare(self, pversion, cversion):
"""Returns true iff the intel version pversion and cversion
are ABI compatible"""
are ABI compatible"""
# Test major and minor versions. Ignore build version.
if len(pversion.version) < 2 or len(cversion.version) < 2:
if (len(pversion.version) < 2 or len(cversion.version) < 2):
return False
return pversion.version[:2] == cversion.version[:2]
@@ -95,7 +91,7 @@ def compiler_compatible(self, parent, child, **kwargs):
# Different compiler families are assumed ABI incompatible
return False
if kwargs.get("loose", False):
if kwargs.get('loose', False):
return True
# TODO: Can we move the specialized ABI matching stuff
@@ -106,19 +102,16 @@ def compiler_compatible(self, parent, child, **kwargs):
# Otherwise match on version match.
if pversion.satisfies(cversion):
return True
elif parent.compiler.name == "gcc" and self._gcc_compiler_compare(
pversion, cversion
):
elif (parent.compiler.name == "gcc" and
self._gcc_compiler_compare(pversion, cversion)):
return True
elif parent.compiler.name == "intel" and self._intel_compiler_compare(
pversion, cversion
):
elif (parent.compiler.name == "intel" and
self._intel_compiler_compare(pversion, cversion)):
return True
return False
def compatible(self, target, constraint, **kwargs):
"""Returns true if target spec is ABI compatible to constraint spec"""
loosematch = kwargs.get("loose", False)
return self.architecture_compatible(target, constraint) and self.compiler_compatible(
target, constraint, loose=loosematch
)
loosematch = kwargs.get('loose', False)
return self.architecture_compatible(target, constraint) and \
self.compiler_compatible(target, constraint, loose=loosematch)

View File

@@ -0,0 +1,42 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This package contains code for creating analyzers to extract Application
Binary Interface (ABI) information, along with simple analyses that just load
existing metadata.
"""
from __future__ import absolute_import
import llnl.util.tty as tty
import spack.paths
import spack.util.classes
mod_path = spack.paths.analyzers_path
analyzers = spack.util.classes.list_classes("spack.analyzers", mod_path)
# The base analyzer does not have a name, and cannot do dict comprehension
analyzer_types = {}
for a in analyzers:
if not hasattr(a, "name"):
continue
analyzer_types[a.name] = a
def list_all():
"""A helper function to list all analyzers and their descriptions
"""
for name, analyzer in analyzer_types.items():
print("%-25s: %-35s" % (name, analyzer.description))
def get_analyzer(name):
"""Courtesy function to retrieve an analyzer, and exit on error if it
does not exist.
"""
if name in analyzer_types:
return analyzer_types[name]
tty.die("Analyzer %s does not exist" % name)

View File

@@ -0,0 +1,116 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An analyzer base provides basic functions to run the analysis, save results,
and (optionally) interact with a Spack Monitor
"""
import os
import llnl.util.tty as tty
import spack.config
import spack.hooks
import spack.monitor
import spack.util.path
def get_analyzer_dir(spec, analyzer_dir=None):
"""
Given a spec, return the directory to save analyzer results.
We create the directory if it does not exist. We also check that the
spec has an associated package. An analyzer cannot be run if the spec isn't
associated with a package. If the user provides a custom analyzer_dir,
we use it over checking the config and the default at ~/.spack/analyzers
"""
# An analyzer cannot be run if the spec isn't associated with a package
if not hasattr(spec, "package") or not spec.package:
tty.die("A spec can only be analyzed with an associated package.")
# The top level directory is in the user home, or a custom location
if not analyzer_dir:
analyzer_dir = spack.util.path.canonicalize_path(
spack.config.get('config:analyzers_dir', '~/.spack/analyzers'))
# We follow the same convention as the spec install (this could be better)
package_prefix = os.sep.join(spec.package.prefix.split('/')[-3:])
meta_dir = os.path.join(analyzer_dir, package_prefix)
return meta_dir
class AnalyzerBase(object):
def __init__(self, spec, dirname=None):
"""
Verify that the analyzer has correct metadata.
An Analyzer is intended to run on one spec install, so the spec
with its associated package is required on init. The child analyzer
class should define an init function that super's the init here, and
also check that the analyzer has all dependencies that it
needs. If an analyzer subclass does not have dependencies, it does not
need to define an init. An Analyzer should not be allowed to proceed
if one or more dependencies are missing. The dirname, if defined,
is an optional directory name to save to (instead of the default meta
spack directory).
"""
self.spec = spec
self.dirname = dirname
self.meta_dir = os.path.dirname(spec.package.install_log_path)
for required in ["name", "outfile", "description"]:
if not hasattr(self, required):
tty.die("Please add a %s attribute on the analyzer." % required)
def run(self):
"""
Given a spec with an installed package, run the analyzer on it.
"""
raise NotImplementedError
@property
def output_dir(self):
"""
The full path to the output directory.
This includes the nested analyzer directory structure. This function
does not create anything.
"""
if not hasattr(self, "_output_dir"):
output_dir = get_analyzer_dir(self.spec, self.dirname)
self._output_dir = os.path.join(output_dir, self.name)
return self._output_dir
def save_result(self, result, overwrite=False):
"""
Save a result to the associated spack monitor, if defined.
This function is on the level of the analyzer because it might be
the case that the result is large (appropriate for a single request)
or that the data is organized differently (e.g., more than one
request per result). If an analyzer subclass needs to over-write
this function with a custom save, that is appropriate to do (see abi).
"""
# We maintain the structure in json with the analyzer as key so
# that in the future, we could upload to a monitor server
if result[self.name]:
outfile = os.path.join(self.output_dir, self.outfile)
# Only try to create the results directory if we have a result
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
# Don't overwrite an existing result if overwrite is False
if os.path.exists(outfile) and not overwrite:
tty.info("%s exists and overwrite is False, skipping." % outfile)
else:
tty.info("Writing result to %s" % outfile)
spack.monitor.write_json(result[self.name], outfile)
# This hook runs after a save result
spack.hooks.on_analyzer_save(self.spec.package, result)

View File

@@ -0,0 +1,33 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""A configargs analyzer is a class of analyzer that typically just uploads
already existing metadata about config args from a package spec install
directory."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class ConfigArgs(AnalyzerBase):
name = "config_args"
outfile = "spack-analyzer-config-args.json"
description = "config args loaded from spack-configure-args.txt"
def run(self):
"""
Load the configure-args.txt and save in json.
The run function will find the spack-config-args.txt file in the
package install directory, and read it into a json structure that has
the name of the analyzer as the key.
"""
config_file = os.path.join(self.meta_dir, "spack-configure-args.txt")
return {self.name: spack.monitor.read_file(config_file)}

View File

@@ -0,0 +1,54 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An environment analyzer will read and parse the environment variables
file in the installed package directory, generating a json file that has
an index of key, value pairs for environment variables."""
import os
import llnl.util.tty as tty
from spack.util.environment import EnvironmentModifications
from .analyzer_base import AnalyzerBase
class EnvironmentVariables(AnalyzerBase):
name = "environment_variables"
outfile = "spack-analyzer-environment-variables.json"
description = "environment variables parsed from spack-build-env.txt"
def run(self):
"""
Load, parse, and save spack-build-env.txt to analyzers.
Read in the spack-build-env.txt file from the package install
directory and parse the environment variables into key value pairs.
The result should have the key for the analyzer, the name.
"""
env_file = os.path.join(self.meta_dir, "spack-build-env.txt")
return {self.name: self._read_environment_file(env_file)}
def _read_environment_file(self, filename):
"""
Read and parse the environment file.
Given an environment file, we want to read it, split by semicolons
and new lines, and then parse down to the subset of SPACK_* variables.
We assume that all spack prefix variables are not secrets, and unlike
the install_manifest.json, we don't (at least to start) parse the values
to remove path prefixes specific to user systems.
"""
if not os.path.exists(filename):
tty.warn("No environment file available")
return
mods = EnvironmentModifications.from_sourcing_file(filename)
env = {}
mods.apply_modifications(env)
return env

View File

@@ -0,0 +1,31 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The install files json file (install_manifest.json) already exists in
the package install folder, so this analyzer simply moves it to the user
analyzer folder for further processing."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class InstallFiles(AnalyzerBase):
name = "install_files"
outfile = "spack-analyzer-install-files.json"
description = "install file listing read from install_manifest.json"
def run(self):
"""
Load in the install_manifest.json and save to analyzers.
We write it out to the analyzers folder, with key as the analyzer name.
"""
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
return {self.name: spack.monitor.read_json(manifest_file)}

View File

@@ -0,0 +1,114 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import llnl.util.tty as tty
import spack
import spack.binary_distribution
import spack.bootstrap
import spack.error
import spack.hooks
import spack.monitor
import spack.package
import spack.repo
import spack.util.executable
from .analyzer_base import AnalyzerBase
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
def __init__(self, spec, dirname=None):
"""
init for an analyzer ensures we have all needed dependencies.
For the libabigail analyzer, this means Libabigail.
Since the output for libabigail is one file per object, we communicate
with the monitor multiple times.
"""
super(Libabigail, self).__init__(spec, dirname)
# This doesn't seem to work to import on the module level
tty.debug("Preparing to use Libabigail, will install if missing.")
with spack.bootstrap.ensure_bootstrap_configuration():
# libabigail won't install lib/bin/share without docs
spec = spack.spec.Spec("libabigail+docs")
spack.bootstrap.ensure_executables_in_path_or_raise(
["abidw"], abstract_spec=spec
)
self.abidw = spack.util.executable.which('abidw')
def run(self):
"""
Run libabigail, and save results to filename.
This run function differs in that we write as we generate and then
return a dict with the analyzer name as the key, and the value of a
dict of results, where the key is the object name, and the value is
the output file written to.
"""
manifest = spack.binary_distribution.get_buildfile_manifest(self.spec)
# This result will store a path to each file
result = {}
# Generate an output file for each binary or object
for obj in manifest.get("binary_to_relocate_fullpath", []):
# We want to preserve the path in the install directory in case
# a library has an equivalenly named lib or executable, for example
outdir = os.path.dirname(obj.replace(self.spec.package.prefix,
'').strip(os.path.sep))
outfile = "spack-analyzer-libabigail-%s.xml" % os.path.basename(obj)
outfile = os.path.join(self.output_dir, outdir, outfile)
outdir = os.path.dirname(outfile)
# Create the output directory
if not os.path.exists(outdir):
os.makedirs(outdir)
# Sometimes libabigail segfaults and dumps
try:
self.abidw(obj, "--out-file", outfile)
result[obj] = outfile
tty.info("Writing result to %s" % outfile)
except spack.error.SpackError:
tty.warn("Issue running abidw for %s" % obj)
return {self.name: result}
def save_result(self, result, overwrite=False):
"""
Read saved ABI results and upload to monitor server.
ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not spack.monitor.cli:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
spack.hooks.on_analyzer_save(self.spec.package, {"libabigail": [data]})

View File

@@ -36,20 +36,16 @@ def _search_duplicate_compilers(error_cls):
as input.
"""
import collections
import inspect
import itertools
import re
from six.moves.urllib.request import urlopen
import llnl.util.lang
from llnl.util.compat import Sequence
try:
from collections.abc import Sequence # novm
except ImportError:
from collections import Sequence
import spack.config
import spack.patch
import spack.repo
import spack.spec
import spack.variant
#: Map an audit tag to a list of callables implementing checks
CALLBACKS = {}
@@ -60,13 +56,14 @@ def _search_duplicate_compilers(error_cls):
class Error(object):
"""Information on an error reported in a test."""
def __init__(self, summary, details):
self.summary = summary
self.details = tuple(details)
def __str__(self):
return self.summary + "\n" + "\n".join([" " + detail for detail in self.details])
return self.summary + '\n' + '\n'.join([
' ' + detail for detail in self.details
])
def __eq__(self, other):
if self.summary != other.summary or self.details != other.details:
@@ -118,11 +115,11 @@ def __len__(self):
def run(self, **kwargs):
msg = 'please pass "{0}" as keyword arguments'
msg = msg.format(", ".join(self.kwargs))
msg = msg.format(', '.join(self.kwargs))
assert set(self.kwargs) == set(kwargs), msg
errors = []
kwargs["error_cls"] = Error
kwargs['error_cls'] = Error
for fn in self.callbacks:
errors.extend(fn(**kwargs))
@@ -164,63 +161,78 @@ def run_check(tag, **kwargs):
# TODO: https://github.com/spack/spack/pull/23053/files#r630265011
#: Generic checks relying on global state
generic = AuditClass(
group="generic",
tag="GENERIC",
description="Generic checks relying on global variables",
kwargs=(),
group='generic',
tag='GENERIC',
description='Generic checks relying on global variables',
kwargs=()
)
#: Sanity checks on compilers.yaml
config_compiler = AuditClass(
group="configs", tag="CFG-COMPILER", description="Sanity checks on compilers.yaml", kwargs=()
group='configs',
tag='CFG-COMPILER',
description='Sanity checks on compilers.yaml',
kwargs=()
)
@config_compiler
def _search_duplicate_compilers(error_cls):
"""Report compilers with the same spec and two different definitions"""
import spack.config
errors = []
compilers = list(sorted(spack.config.get("compilers"), key=lambda x: x["compiler"]["spec"]))
for spec, group in itertools.groupby(compilers, key=lambda x: x["compiler"]["spec"]):
compilers = list(sorted(
spack.config.get('compilers'), key=lambda x: x['compiler']['spec']
))
for spec, group in itertools.groupby(
compilers, key=lambda x: x['compiler']['spec']
):
group = list(group)
if len(group) == 1:
continue
error_msg = "Compiler defined multiple times: {0}"
error_msg = 'Compiler defined multiple times: {0}'
try:
details = [str(x._start_mark).strip() for x in group]
except Exception:
details = []
errors.append(error_cls(summary=error_msg.format(spec), details=details))
errors.append(error_cls(
summary=error_msg.format(spec), details=details
))
return errors
#: Sanity checks on packages.yaml
config_packages = AuditClass(
group="configs", tag="CFG-PACKAGES", description="Sanity checks on packages.yaml", kwargs=()
group='configs',
tag='CFG-PACKAGES',
description='Sanity checks on packages.yaml',
kwargs=()
)
@config_packages
def _search_duplicate_specs_in_externals(error_cls):
"""Search for duplicate specs declared as externals"""
import spack.config
errors, externals = [], collections.defaultdict(list)
packages_yaml = spack.config.get("packages")
packages_yaml = spack.config.get('packages')
for name, pkg_config in packages_yaml.items():
# No externals can be declared under all
if name == "all" or "externals" not in pkg_config:
if name == 'all' or 'externals' not in pkg_config:
continue
current_externals = pkg_config["externals"]
current_externals = pkg_config['externals']
for entry in current_externals:
# Ask for the string representation of the spec to normalize
# aspects of the spec that may be represented in multiple ways
# e.g. +foo or foo=true
key = str(spack.spec.Spec(entry["spec"]))
key = str(spack.spec.Spec(entry['spec']))
externals[key].append(entry)
for spec, entries in sorted(externals.items()):
@@ -229,14 +241,14 @@ def _search_duplicate_specs_in_externals(error_cls):
continue
# Otherwise wwe need to report an error
error_msg = "Multiple externals share the same spec: {0}".format(spec)
error_msg = 'Multiple externals share the same spec: {0}'.format(spec)
try:
lines = [str(x._start_mark).strip() for x in entries]
details = (
["Please remove all but one of the following entries:"]
+ lines
+ ["as they might result in non-deterministic hashes"]
)
details = [
'Please remove all but one of the following entries:'
] + lines + [
'as they might result in non-deterministic hashes'
]
except TypeError:
details = []
@@ -247,132 +259,48 @@ def _search_duplicate_specs_in_externals(error_cls):
#: Sanity checks on package directives
package_directives = AuditClass(
group="packages",
tag="PKG-DIRECTIVES",
description="Sanity checks on specs used in directives",
kwargs=("pkgs",),
group='packages',
tag='PKG-DIRECTIVES',
description='Sanity checks on specs used in directives',
kwargs=('pkgs',)
)
package_attributes = AuditClass(
group="packages",
tag="PKG-ATTRIBUTES",
description="Sanity checks on reserved attributes of packages",
kwargs=("pkgs",),
)
#: Sanity checks on linting
# This can take some time, so it's run separately from packages
package_https_directives = AuditClass(
group="packages-https",
tag="PKG-HTTPS-DIRECTIVES",
description="Sanity checks on https checks of package urls, etc.",
kwargs=("pkgs",),
group='packages-https',
tag='PKG-HTTPS-DIRECTIVES',
description='Sanity checks on https checks of package urls, etc.',
kwargs=('pkgs',)
)
@package_directives
def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test method is not included in build-time callbacks"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = pkg_cls.build_time_test_callbacks
if test_callbacks and "test" in test_callbacks:
msg = '{0} package contains "test" method in ' "build_time_test_callbacks"
instr = 'Remove "test" from: [{0}]'.format(", ".join(test_callbacks))
errors.append(error_cls(msg.format(pkg_name), [instr]))
return errors
@package_directives
def _check_patch_urls(pkgs, error_cls):
"""Ensure that patches fetched from GitHub have stable sha256 hashes."""
github_patch_url_re = (
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
".+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
)
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for condition, patches in pkg_cls.patches.items():
for patch in patches:
if not isinstance(patch, spack.patch.UrlPatch):
continue
if not re.match(github_patch_url_re, patch.url):
continue
full_index_arg = "?full_index=1"
if not patch.url.endswith(full_index_arg):
errors.append(
error_cls(
"patch URL in package {0} must end with {1}".format(
pkg_cls.name,
full_index_arg,
),
[patch.url],
)
)
return errors
@package_attributes
def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
"""Ensure that packages don't override reserved names"""
RESERVED_NAMES = ("name",)
errors = []
for pkg_name in pkgs:
name_definitions = collections.defaultdict(list)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for cls_item in inspect.getmro(pkg_cls):
for name in RESERVED_NAMES:
current_value = cls_item.__dict__.get(name)
if current_value is None:
continue
name_definitions[name].append((cls_item, current_value))
for name in RESERVED_NAMES:
if len(name_definitions[name]) == 1:
continue
error_msg = (
"Package '{}' overrides the '{}' attribute or method, "
"which is reserved for Spack internal use"
)
definitions = [
"defined in '{}'".format(x[0].__module__) for x in name_definitions[name]
]
errors.append(error_cls(error_msg.format(pkg_name, name), definitions))
return errors
@package_https_directives
def _linting_package_file(pkgs, error_cls):
"""Check for correctness of links"""
"""Check for correctness of links
"""
import llnl.util.lang
import spack.repo
import spack.spec
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Does the homepage have http, and if so, does https work?
if pkg_cls.homepage.startswith("http://"):
https = re.sub("http", "https", pkg_cls.homepage, 1)
if pkg.homepage.startswith('http://'):
https = re.sub("http", "https", pkg.homepage, 1)
try:
response = urlopen(https)
except Exception as e:
msg = 'Error with attempting https for "{0}": '
errors.append(error_cls(msg.format(pkg_cls.name), [str(e)]))
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
continue
if response.getcode() == 200:
msg = 'Package "{0}" uses http but has a valid https endpoint.'
errors.append(msg.format(pkg_cls.name))
errors.append(msg.format(pkg.name))
return llnl.util.lang.dedupe(errors)
@@ -380,12 +308,17 @@ def _linting_package_file(pkgs, error_cls):
@package_directives
def _unknown_variants_in_directives(pkgs, error_cls):
"""Report unknown or wrong variants in directives for this package"""
import llnl.util.lang
import spack.repo
import spack.spec
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Check "conflicts" directive
for conflict, triggers in pkg_cls.conflicts.items():
for conflict, triggers in pkg.conflicts.items():
for trigger, _ in triggers:
vrn = spack.spec.Spec(conflict)
try:
@@ -397,48 +330,36 @@ def _unknown_variants_in_directives(pkgs, error_cls):
# conflict and trigger separately in that case.
# When os and target constraints can be created independently of
# the platform, TODO change this back to add an error.
errors.extend(
_analyze_variants_in_directive(
pkg_cls,
spack.spec.Spec(trigger),
directive="conflicts",
error_cls=error_cls,
)
)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="conflicts", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, spack.spec.Spec(trigger),
directive='conflicts', error_cls=error_cls
))
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='conflicts', error_cls=error_cls
))
# Check "depends_on" directive
for _, triggers in pkg_cls.dependencies.items():
for _, triggers in pkg.dependencies.items():
triggers = list(triggers)
for trigger in list(triggers):
vrn = spack.spec.Spec(trigger)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="depends_on", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='depends_on', error_cls=error_cls
))
# Check "patch" directive
for _, triggers in pkg_cls.provided.items():
for _, triggers in pkg.provided.items():
triggers = [spack.spec.Spec(x) for x in triggers]
for vrn in triggers:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="patch", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='patch', error_cls=error_cls
))
# Check "resource" directive
for vrn in pkg_cls.resources:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="resource", error_cls=error_cls
)
)
for vrn in pkg.resources:
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='resource', error_cls=error_cls
))
return llnl.util.lang.dedupe(errors)
@@ -446,24 +367,28 @@ def _unknown_variants_in_directives(pkgs, error_cls):
@package_directives
def _unknown_variants_in_dependencies(pkgs, error_cls):
"""Report unknown dependencies and wrong variants for dependencies"""
import spack.repo
import spack.spec
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# No need to analyze virtual packages
if spack.repo.path.is_virtual(dependency_name):
continue
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(dependency_name)
dependency_pkg = spack.repo.get(dependency_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
summary = pkg_name + ": unknown package '{0}' in " "'depends_on' directive".format(
dependency_name
)
details = [" in " + filename]
summary = (pkg_name + ": unknown package '{0}' in "
"'depends_on' directive".format(dependency_name))
details = [
" in " + filename
]
errors.append(error_cls(summary=summary, details=details))
continue
@@ -471,21 +396,20 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
dependency_variants = dependency_edge.spec.variants
for name, value in dependency_variants.items():
try:
v, _ = dependency_pkg_cls.variants[name]
v.validate_or_raise(value, pkg_cls=dependency_pkg_cls)
v, _ = dependency_pkg.variants[name]
v.validate_or_raise(value, pkg=dependency_pkg)
except Exception as e:
summary = (
pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive"
)
summary = (pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive")
error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = "the variant {0} does not " "exist".format(error_msg)
error_msg = ('the variant {0} does not '
'exist'.format(error_msg))
error_msg += " in package '" + dependency_name + "'"
errors.append(
error_cls(summary=summary, details=[error_msg, "in " + filename])
)
errors.append(error_cls(
summary=summary, details=[error_msg, 'in ' + filename]
))
return errors
@@ -493,52 +417,56 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
@package_directives
def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls):
"""Report if version constraints used in directives are not satisfiable"""
import spack.repo
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
dependencies_to_check = []
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.path.is_virtual(dependency_name):
continue
dependencies_to_check.extend([edge.spec for edge in dependency_data.values()])
dependencies_to_check.extend(
[edge.spec for edge in dependency_data.values()]
)
for s in dependencies_to_check:
dependency_pkg_cls = None
dependency_pkg = None
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(s.name)
assert any(v.satisfies(s.versions) for v in list(dependency_pkg_cls.versions))
dependency_pkg = spack.repo.get(s.name)
assert any(
v.satisfies(s.versions) for v in list(dependency_pkg.versions)
)
except Exception:
summary = (
"{0}: dependency on {1} cannot be satisfied " "by known versions of {1.name}"
).format(pkg_name, s)
details = ["happening in " + filename]
if dependency_pkg_cls is not None:
details.append(
"known versions of {0.name} are {1}".format(
s, ", ".join([str(x) for x in dependency_pkg_cls.versions])
)
)
summary = ("{0}: dependency on {1} cannot be satisfied "
"by known versions of {1.name}").format(pkg_name, s)
details = ['happening in ' + filename]
if dependency_pkg is not None:
details.append('known versions of {0.name} are {1}'.format(
s, ', '.join([str(x) for x in dependency_pkg.versions])
))
errors.append(error_cls(summary=summary, details=details))
return errors
def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
import spack.variant
variant_exceptions = (
spack.variant.InconsistentValidationError,
spack.variant.MultipleValuesInExclusiveVariantError,
spack.variant.InvalidVariantValueError,
KeyError,
KeyError
)
errors = []
for name, v in constraint.variants.items():
try:
variant, _ = pkg.variants[name]
variant.validate_or_raise(v, pkg_cls=pkg)
variant.validate_or_raise(v, pkg=pkg)
except variant_exceptions as e:
summary = pkg.name + ': wrong variant in "{0}" directive'
summary = summary.format(directive)
@@ -546,9 +474,11 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = "the variant {0} does not exist".format(error_msg)
error_msg = 'the variant {0} does not exist'.format(error_msg)
err = error_cls(summary=summary, details=[error_msg, "in " + filename])
err = error_cls(summary=summary, details=[
error_msg, 'in ' + filename
])
errors.append(err)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,7 @@
from spack.build_systems.autotools import AutotoolsPackage
from spack.directives import extends
from spack.package_base import ExtensionError
from spack.package import ExtensionError
from spack.util.executable import which
@@ -23,32 +23,29 @@
class AspellDictPackage(AutotoolsPackage):
"""Specialized class for building aspell dictionairies."""
extends("aspell")
extends('aspell')
def view_destination(self, view):
aspell_spec = self.spec["aspell"]
aspell_spec = self.spec['aspell']
if view.get_projection_for_spec(aspell_spec) != aspell_spec.prefix:
raise ExtensionError("aspell does not support non-global extensions")
raise ExtensionError(
'aspell does not support non-global extensions')
aspell = aspell_spec.command
return aspell("dump", "config", "dict-dir", output=str).strip()
return aspell('dump', 'config', 'dict-dir', output=str).strip()
def view_source(self):
return self.prefix.lib
def patch(self):
filter_file(r"^dictdir=.*$", "dictdir=/lib", "configure")
filter_file(r"^datadir=.*$", "datadir=/lib", "configure")
filter_file(r'^dictdir=.*$', 'dictdir=/lib', 'configure')
filter_file(r'^datadir=.*$', 'datadir=/lib', 'configure')
def configure(self, spec, prefix):
aspell = spec["aspell"].prefix.bin.aspell
prezip = spec["aspell"].prefix.bin.prezip
aspell = spec['aspell'].prefix.bin.aspell
prezip = spec['aspell'].prefix.bin.prezip
destdir = prefix
sh = which("sh")
sh(
"./configure",
"--vars",
"ASPELL={0}".format(aspell),
"PREZIP={0}".format(prezip),
"DESTDIR={0}".format(destdir),
)
sh = which('sh')
sh('./configure', '--vars', "ASPELL={0}".format(aspell),
"PREZIP={0}".format(prezip),
"DESTDIR={0}".format(destdir))

View File

@@ -14,9 +14,9 @@
from llnl.util.filesystem import force_remove, working_dir
from spack.build_environment import InstallError
from spack.directives import conflicts, depends_on
from spack.directives import depends_on
from spack.operating_systems.mac_os import macos_version
from spack.package_base import PackageBase, run_after, run_before
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
from spack.version import Version
@@ -52,12 +52,11 @@ class AutotoolsPackage(PackageBase):
+-----------------------------------------------+--------------------+
"""
#: Phases of a GNU Autotools package
phases = ["autoreconf", "configure", "build", "install"]
phases = ['autoreconf', 'configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "AutotoolsPackage"
build_system_class = 'AutotoolsPackage'
@property
def patch_config_files(self):
@@ -72,14 +71,12 @@ def patch_config_files(self):
the directory containing the system ``config.guess`` and ``config.sub``
files.
"""
return (
self.spec.satisfies("target=ppc64le:")
or self.spec.satisfies("target=aarch64:")
or self.spec.satisfies("target=riscv64:")
)
return (self.spec.satisfies('target=ppc64le:')
or self.spec.satisfies('target=aarch64:')
or self.spec.satisfies('target=riscv64:'))
#: Whether or not to update ``libtool``
#: (currently only for Arm/Clang/Fujitsu/NVHPC compilers)
#: (currently only for Arm/Clang/Fujitsu compilers)
patch_libtool = True
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.build`
@@ -87,13 +84,13 @@ def patch_config_files(self):
build_targets = [] # type: List[str]
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.install`
#: phase
install_targets = ["install"]
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ["check"]
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ["installcheck"]
install_time_test_callbacks = ['installcheck']
#: Set to true to force the autoreconf step even if configure is present
force_autoreconf = False
@@ -104,10 +101,9 @@ def patch_config_files(self):
#: after the installation. If True instead it installs them.
install_libtool_archives = False
depends_on("gnuconfig", type="build", when="target=ppc64le:")
depends_on("gnuconfig", type="build", when="target=aarch64:")
depends_on("gnuconfig", type="build", when="target=riscv64:")
conflicts("platform=windows")
depends_on('gnuconfig', type='build', when='target=ppc64le:')
depends_on('gnuconfig', type='build', when='target=aarch64:')
depends_on('gnuconfig', type='build', when='target=riscv64:')
@property
def _removed_la_files_log(self):
@@ -115,17 +111,17 @@ def _removed_la_files_log(self):
build_dir = self.build_directory
if not os.path.isabs(self.build_directory):
build_dir = os.path.join(self.stage.path, build_dir)
return os.path.join(build_dir, "removed_la_files.txt")
return os.path.join(build_dir, 'removed_la_files.txt')
@property
def archive_files(self):
"""Files to archive for packages based on autotools"""
files = [os.path.join(self.build_directory, "config.log")]
files = [os.path.join(self.build_directory, 'config.log')]
if not self.install_libtool_archives:
files.append(self._removed_la_files_log)
return files
@run_after("autoreconf")
@run_after('autoreconf')
def _do_patch_config_files(self):
"""Some packages ship with older config.guess/config.sub files and
need to have these updated when installed on a newer architecture.
@@ -139,18 +135,20 @@ def _do_patch_config_files(self):
# TODO: Expand this to select the 'config.sub'-compatible architecture
# for each platform (e.g. 'config.sub' doesn't accept 'power9le', but
# does accept 'ppc64le').
if self.spec.satisfies("target=ppc64le:"):
config_arch = "ppc64le"
elif self.spec.satisfies("target=aarch64:"):
config_arch = "aarch64"
elif self.spec.satisfies("target=riscv64:"):
config_arch = "riscv64"
if self.spec.satisfies('target=ppc64le:'):
config_arch = 'ppc64le'
elif self.spec.satisfies('target=aarch64:'):
config_arch = 'aarch64'
elif self.spec.satisfies('target=riscv64:'):
config_arch = 'riscv64'
else:
config_arch = "local"
config_arch = 'local'
def runs_ok(script_abs_path):
# Construct the list of arguments for the call
additional_args = {"config.sub": [config_arch]}
additional_args = {
'config.sub': [config_arch]
}
script_name = os.path.basename(script_abs_path)
args = [script_abs_path] + additional_args.get(script_name, [])
@@ -163,7 +161,7 @@ def runs_ok(script_abs_path):
return True
# Get the list of files that needs to be patched
to_be_patched = fs.find(self.stage.path, files=["config.sub", "config.guess"])
to_be_patched = fs.find(self.stage.path, files=['config.sub', 'config.guess'])
to_be_patched = [f for f in to_be_patched if not runs_ok(f)]
# If there are no files to be patched, return early
@@ -172,37 +170,33 @@ def runs_ok(script_abs_path):
# Otherwise, require `gnuconfig` to be a build dependency
self._require_build_deps(
pkgs=["gnuconfig"], spec=self.spec, err="Cannot patch config files"
)
pkgs=['gnuconfig'],
spec=self.spec,
err="Cannot patch config files")
# Get the config files we need to patch (config.sub / config.guess).
to_be_found = list(set(os.path.basename(f) for f in to_be_patched))
gnuconfig = self.spec["gnuconfig"]
gnuconfig = self.spec['gnuconfig']
gnuconfig_dir = gnuconfig.prefix
# An external gnuconfig may not not have a prefix.
if gnuconfig_dir is None:
raise InstallError(
"Spack could not find substitutes for GNU config "
"files because no prefix is available for the "
"`gnuconfig` package. Make sure you set a prefix "
"path instead of modules for external `gnuconfig`."
)
raise InstallError("Spack could not find substitutes for GNU config "
"files because no prefix is available for the "
"`gnuconfig` package. Make sure you set a prefix "
"path instead of modules for external `gnuconfig`.")
candidates = fs.find(gnuconfig_dir, files=to_be_found, recursive=False)
# For external packages the user may have specified an incorrect prefix.
# otherwise the installation is just corrupt.
if not candidates:
msg = (
"Spack could not find `config.guess` and `config.sub` "
"files in the `gnuconfig` prefix `{0}`. This means the "
"`gnuconfig` package is broken"
).format(gnuconfig_dir)
msg = ("Spack could not find `config.guess` and `config.sub` "
"files in the `gnuconfig` prefix `{0}`. This means the "
"`gnuconfig` package is broken").format(gnuconfig_dir)
if gnuconfig.external:
msg += (
" or the `gnuconfig` package prefix is misconfigured as" " an external package"
)
msg += (" or the `gnuconfig` package prefix is misconfigured as"
" an external package")
raise InstallError(msg)
# Filter working substitutes
@@ -228,7 +222,7 @@ def runs_ok(script_abs_path):
and set the prefix to the directory containing the `config.guess` and
`config.sub` files.
"""
raise InstallError(msg.format(", ".join(to_be_found), self.name))
raise InstallError(msg.format(', '.join(to_be_found), self.name))
# Copy the good files over the bad ones
for abs_path in to_be_patched:
@@ -238,16 +232,7 @@ def runs_ok(script_abs_path):
fs.copy(substitutes[name], abs_path)
os.chmod(abs_path, mode)
@run_before("configure")
def _patch_usr_bin_file(self):
"""On NixOS file is not available in /usr/bin/file. Patch configure
scripts to use file from path."""
if self.spec.os.startswith("nixos"):
for configure_file in fs.find(".", files=["configure"], recursive=True):
fs.filter_file("/usr/bin/file", "file", configure_file, string=True)
@run_before("configure")
@run_before('configure')
def _set_autotools_environment_variables(self):
"""Many autotools builds use a version of mknod.m4 that fails when
running as root unless FORCE_UNSAFE_CONFIGURE is set to 1.
@@ -262,44 +247,36 @@ def _set_autotools_environment_variables(self):
"""
os.environ["FORCE_UNSAFE_CONFIGURE"] = "1"
@run_after("configure")
@run_after('configure')
def _do_patch_libtool(self):
"""If configure generates a "libtool" script that does not correctly
detect the compiler (and patch_libtool is set), patch in the correct
flags for the Arm, Clang/Flang, Fujitsu and NVHPC compilers."""
flags for the Arm, Clang/Flang, and Fujitsu compilers."""
# Exit early if we are required not to patch libtool
if not self.patch_libtool:
return
for libtool_path in fs.find(self.build_directory, "libtool", recursive=True):
for libtool_path in fs.find(
self.build_directory, 'libtool', recursive=True):
self._patch_libtool(libtool_path)
def _patch_libtool(self, libtool_path):
if (
self.spec.satisfies("%arm")
or self.spec.satisfies("%clang")
or self.spec.satisfies("%fj")
or self.spec.satisfies("%nvhpc")
):
if self.spec.satisfies('%arm')\
or self.spec.satisfies('%clang')\
or self.spec.satisfies('%fj'):
fs.filter_file('wl=""\n', 'wl="-Wl,"\n', libtool_path)
fs.filter_file(
'pic_flag=""\n', 'pic_flag="{0}"\n'.format(self.compiler.cc_pic_flag), libtool_path
)
if self.spec.satisfies("%fj"):
fs.filter_file("-nostdlib", "", libtool_path)
rehead = r"/\S*/"
objfile = [
"fjhpctag.o",
"fjcrt0.o",
"fjlang08.o",
"fjomp.o",
"crti.o",
"crtbeginS.o",
"crtendS.o",
]
fs.filter_file('pic_flag=""\n',
'pic_flag="{0}"\n'
.format(self.compiler.cc_pic_flag),
libtool_path)
if self.spec.satisfies('%fj'):
fs.filter_file('-nostdlib', '', libtool_path)
rehead = r'/\S*/'
objfile = ['fjhpctag.o', 'fjcrt0.o', 'fjlang08.o', 'fjomp.o',
'crti.o', 'crtbeginS.o', 'crtendS.o']
for o in objfile:
fs.filter_file(rehead + o, "", libtool_path)
fs.filter_file(rehead + o, '', libtool_path)
@property
def configure_directory(self):
@@ -312,7 +289,9 @@ def configure_directory(self):
@property
def configure_abs_path(self):
# Absolute path to configure
configure_abs_path = os.path.join(os.path.abspath(self.configure_directory), "configure")
configure_abs_path = os.path.join(
os.path.abspath(self.configure_directory), 'configure'
)
return configure_abs_path
@property
@@ -320,7 +299,7 @@ def build_directory(self):
"""Override to provide another place to build the package"""
return self.configure_directory
@run_before("autoreconf")
@run_before('autoreconf')
def delete_configure_to_force_update(self):
if self.force_autoreconf:
force_remove(self.configure_abs_path)
@@ -329,20 +308,20 @@ def _require_build_deps(self, pkgs, spec, err):
"""Require `pkgs` to be direct build dependencies of `spec`. Raises a
RuntimeError with a helpful error messages when any dep is missing."""
build_deps = [d.name for d in spec.dependencies(deptype="build")]
build_deps = [d.name for d in spec.dependencies(deptype='build')]
missing_deps = [x for x in pkgs if x not in build_deps]
if not missing_deps:
return
# Raise an exception on missing deps.
msg = (
"{0}: missing dependencies: {1}.\n\nPlease add "
"the following lines to the package:\n\n".format(err, ", ".join(missing_deps))
)
msg = ("{0}: missing dependencies: {1}.\n\nPlease add "
"the following lines to the package:\n\n"
.format(err, ", ".join(missing_deps)))
for dep in missing_deps:
msg += " depends_on('{0}', type='build', when='@{1}')\n".format(dep, spec.version)
msg += (" depends_on('{0}', type='build', when='@{1}')\n"
.format(dep, spec.version))
msg += "\nUpdate the version (when='@{0}') as needed.".format(spec.version)
raise RuntimeError(msg)
@@ -356,32 +335,34 @@ def autoreconf(self, spec, prefix):
# Else try to regenerate it, which reuquires a few build dependencies
self._require_build_deps(
pkgs=["autoconf", "automake", "libtool"], spec=spec, err="Cannot generate configure"
)
pkgs=['autoconf', 'automake', 'libtool'],
spec=spec,
err="Cannot generate configure")
tty.msg("Configure script not found: trying to generate it")
tty.warn("*********************************************************")
tty.warn("* If the default procedure fails, consider implementing *")
tty.warn("* a custom AUTORECONF phase in the package *")
tty.warn("*********************************************************")
tty.msg('Configure script not found: trying to generate it')
tty.warn('*********************************************************')
tty.warn('* If the default procedure fails, consider implementing *')
tty.warn('* a custom AUTORECONF phase in the package *')
tty.warn('*********************************************************')
with working_dir(self.configure_directory):
m = inspect.getmodule(self)
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ["-ivf"]
autoreconf_args = ['-ivf']
autoreconf_args += self.autoreconf_search_path_args
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@property
def autoreconf_search_path_args(self):
"""Search path includes for autoreconf. Add an -I flag for all `aclocal` dirs
of build deps, skips the default path of automake, move external include
flags to the back, since they might pull in unrelated m4 files shadowing
spack dependencies."""
return _autoreconf_search_path_args(self.spec)
"""Arguments to autoreconf to modify the search paths"""
search_path_args = []
for dep in self.spec.dependencies(deptype='build'):
if os.path.exists(dep.prefix.share.aclocal):
search_path_args.extend(['-I', dep.prefix.share.aclocal])
return search_path_args
@run_after("autoreconf")
@run_after('autoreconf')
def set_configure_or_die(self):
"""Checks the presence of a ``configure`` file after the
autoreconf phase. If it is found sets a module attribute
@@ -392,11 +373,13 @@ def set_configure_or_die(self):
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
msg = "configure script not found in {0}"
msg = 'configure script not found in {0}'
raise RuntimeError(msg.format(self.configure_directory))
# Monkey-patch the configure script in the corresponding module
inspect.getmodule(self).configure = Executable(self.configure_abs_path)
inspect.getmodule(self).configure = Executable(
self.configure_abs_path
)
def configure_args(self):
"""Produces a list containing all the arguments that must be passed to
@@ -410,16 +393,16 @@ def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass specified
compiler flags to configure."""
# Has to be dynamic attribute due to caching.
setattr(self, "configure_flag_args", [])
setattr(self, 'configure_flag_args', [])
for flag, values in flags.items():
if values:
values_str = "{0}={1}".format(flag.upper(), " ".join(values))
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
# Spack's fflags are meant for both F77 and FC, therefore we
# additionaly set FCFLAGS if required.
values = flags.get("fflags", None)
values = flags.get('fflags', None)
if values:
values_str = "FCFLAGS={0}".format(" ".join(values))
values_str = 'FCFLAGS={0}'.format(' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
@@ -427,25 +410,26 @@ def configure(self, spec, prefix):
:meth:`~spack.build_systems.autotools.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = getattr(self, "configure_flag_args", [])
options += ["--prefix={0}".format(prefix)]
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += self.configure_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).configure(*options)
def setup_build_environment(self, env):
if self.spec.platform == "darwin" and macos_version() >= Version("11"):
if (self.spec.platform == 'darwin'
and macos_version() >= Version('11')):
# Many configure files rely on matching '10.*' for macOS version
# detection and fail to add flags if it shows as version 11.
env.set("MACOSX_DEPLOYMENT_TARGET", "10.16")
env.set('MACOSX_DEPLOYMENT_TARGET', '10.16')
def build(self, spec, prefix):
"""Makes the build targets specified by
:py:attr:``~.AutotoolsPackage.build_targets``
"""
# See https://autotools.io/automake/silent.html
params = ["V=1"]
params = ['V=1']
params += self.build_targets
with working_dir(self.build_directory):
inspect.getmodule(self).make(*params)
@@ -457,18 +441,23 @@ def install(self, spec, prefix):
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after("build")(PackageBase._run_default_build_time_test_callbacks)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("test")
self._if_make_target_execute("check")
self._if_make_target_execute('test')
self._if_make_target_execute('check')
def _activate_or_not(
self, name, activation_word, deactivation_word, activation_value=None, variant=None
self,
name,
activation_word,
deactivation_word,
activation_value=None,
variant=None
):
"""This function contains the current implementation details of
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without` and
@@ -531,7 +520,7 @@ def _activate_or_not(
spec = self.spec
args = []
if activation_value == "prefix":
if activation_value == 'prefix':
activation_value = lambda x: spec[x].prefix
variant = variant or name
@@ -552,41 +541,45 @@ def _activate_or_not(
# BoolValuedVariant carry information about a single option.
# Nonetheless, for uniformity of treatment we'll package them
# in an iterable of one element.
condition = "+{name}".format(name=variant)
condition = '+{name}'.format(name=variant)
options = [(name, condition in spec)]
else:
condition = "{variant}={value}"
condition = '{variant}={value}'
# "feature_values" is used to track values which correspond to
# features which can be enabled or disabled as understood by the
# package's build system. It excludes values which have special
# meanings and do not correspond to features (e.g. "none")
feature_values = (
getattr(variant_desc.values, "feature_values", None) or variant_desc.values
)
feature_values = getattr(
variant_desc.values, 'feature_values', None
) or variant_desc.values
options = [
(value, condition.format(variant=variant, value=value) in spec)
(value,
condition.format(variant=variant,
value=value) in spec)
for value in feature_values
]
# For each allowed value in the list of values
for option_value, activated in options:
# Search for an override in the package for this value
override_name = "{0}_or_{1}_{2}".format(
override_name = '{0}_or_{1}_{2}'.format(
activation_word, deactivation_word, option_value
)
line_generator = getattr(self, override_name, None)
# If not available use a sensible default
if line_generator is None:
def _default_generator(is_activated):
if is_activated:
line = "--{0}-{1}".format(activation_word, option_value)
if activation_value is not None and activation_value(option_value):
line += "={0}".format(activation_value(option_value))
line = '--{0}-{1}'.format(
activation_word, option_value
)
if activation_value is not None and activation_value(option_value): # NOQA=ignore=E501
line += '={0}'.format(
activation_value(option_value)
)
return line
return "--{0}-{1}".format(deactivation_word, option_value)
return '--{0}-{1}'.format(deactivation_word, option_value)
line_generator = _default_generator
args.append(line_generator(activated))
return args
@@ -617,7 +610,8 @@ def with_or_without(self, name, activation_value=None, variant=None):
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, "with", "without", activation_value, variant)
return self._activate_or_not(name, 'with', 'without', activation_value,
variant)
def enable_or_disable(self, name, activation_value=None, variant=None):
"""Same as
@@ -636,21 +630,23 @@ def enable_or_disable(self, name, activation_value=None, variant=None):
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, "enable", "disable", activation_value, variant)
return self._activate_or_not(
name, 'enable', 'disable', activation_value, variant
)
run_after("install")(PackageBase._run_default_install_time_test_callbacks)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("installcheck")
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)
@run_after("install")
@run_after('install')
def remove_libtool_archives(self):
"""Remove all .la files in prefix sub-folders if the package sets
``install_libtool_archives`` to be False.
@@ -660,40 +656,11 @@ def remove_libtool_archives(self):
return
# Remove the files and create a log of what was removed
libtool_files = fs.find(str(self.prefix), "*.la", recursive=True)
libtool_files = fs.find(str(self.prefix), '*.la', recursive=True)
with fs.safe_remove(*libtool_files):
fs.mkdirp(os.path.dirname(self._removed_la_files_log))
with open(self._removed_la_files_log, mode="w") as f:
f.write("\n".join(libtool_files))
with open(self._removed_la_files_log, mode='w') as f:
f.write('\n'.join(libtool_files))
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
run_after("install")(PackageBase.apply_macos_rpath_fixups)
def _autoreconf_search_path_args(spec):
dirs_seen = set()
flags_spack, flags_external = [], []
# We don't want to add an include flag for automake's default search path.
for automake in spec.dependencies(name="automake", deptype="build"):
try:
s = os.stat(automake.prefix.share.aclocal)
if stat.S_ISDIR(s.st_mode):
dirs_seen.add((s.st_ino, s.st_dev))
except OSError:
pass
for dep in spec.dependencies(deptype="build"):
path = dep.prefix.share.aclocal
# Skip non-existing aclocal paths
try:
s = os.stat(path)
except OSError:
continue
# Skip things seen before, as well as non-dirs.
if (s.st_ino, s.st_dev) in dirs_seen or not stat.S_ISDIR(s.st_mode):
continue
dirs_seen.add((s.st_ino, s.st_dev))
flags = flags_external if dep.external else flags_spack
flags.extend(["-I", path])
return flags_spack + flags_external
run_after('install')(PackageBase.apply_macos_rpath_fixups)

View File

@@ -8,7 +8,7 @@
from llnl.util.filesystem import install, mkdirp
from spack.build_systems.cmake import CMakePackage
from spack.package_base import run_after
from spack.package import run_after
def cmake_cache_path(name, value, comment=""):
@@ -36,7 +36,7 @@ class CachedCMakePackage(CMakePackage):
sidestep certain parsing bugs in extremely long ``cmake`` commands, and to
avoid system limits on the length of the command line."""
phases = ["initconfig", "cmake", "build", "install"]
phases = ['initconfig', 'cmake', 'build', 'install']
@property
def cache_name(self):
@@ -52,7 +52,7 @@ def cache_path(self):
return os.path.join(self.stage.source_path, self.cache_name)
def flag_handler(self, name, flags):
if name in ("cflags", "cxxflags", "cppflags", "fflags"):
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
return (None, None, None) # handled in the cmake cache
return (flags, None, None)
@@ -64,8 +64,10 @@ def initconfig_compiler_entries(self):
# Fortran compiler is optional
if "FC" in os.environ:
spack_fc_entry = cmake_cache_path("CMAKE_Fortran_COMPILER", os.environ["FC"])
system_fc_entry = cmake_cache_path("CMAKE_Fortran_COMPILER", self.compiler.fc)
spack_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", os.environ['FC'])
system_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", self.compiler.fc)
else:
spack_fc_entry = "# No Fortran compiler defined in spec"
system_fc_entry = "# No Fortran compiler defined in spec"
@@ -76,59 +78,42 @@ def initconfig_compiler_entries(self):
"#------------------{0}".format("-" * 60),
"# Compiler Spec: {0}".format(spec.compiler),
"#------------------{0}".format("-" * 60),
"if(DEFINED ENV{SPACK_CC})\n",
" " + cmake_cache_path("CMAKE_C_COMPILER", os.environ["CC"]),
" " + cmake_cache_path("CMAKE_CXX_COMPILER", os.environ["CXX"]),
" " + spack_fc_entry,
"else()\n",
" " + cmake_cache_path("CMAKE_C_COMPILER", self.compiler.cc),
" " + cmake_cache_path("CMAKE_CXX_COMPILER", self.compiler.cxx),
" " + system_fc_entry,
"endif()\n",
'if(DEFINED ENV{SPACK_CC})\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", os.environ['CC']),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", os.environ['CXX']),
' ' + spack_fc_entry,
'else()\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", self.compiler.cc),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", self.compiler.cxx),
' ' + system_fc_entry,
'endif()\n'
]
flags = spec.compiler_flags
# use global spack compiler flags
cppflags = " ".join(flags["cppflags"])
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with " " with no flags defined
cppflags += " "
cflags = cppflags + " ".join(flags["cflags"])
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
entries.append(cmake_cache_string("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + " ".join(flags["cxxflags"])
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS", cxxflags))
fflags = " ".join(flags["fflags"])
fflags = ' '.join(spec.compiler_flags['fflags'])
if fflags:
entries.append(cmake_cache_string("CMAKE_Fortran_FLAGS", fflags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags["ldflags"]:
ld_flags = " ".join(flags["ldflags"])
ld_format_string = "CMAKE_{0}_LINKER_FLAGS"
# CMake has separate linker arguments for types of builds.
for ld_type in ["EXE", "MODULE", "SHARED", "STATIC"]:
ld_string = ld_format_string.format(ld_type)
entries.append(cmake_cache_string(ld_string, ld_flags))
# CMake has libs options separated by language. Apply ours to each.
if flags["ldlibs"]:
libs_flags = " ".join(flags["ldlibs"])
libs_format_string = "CMAKE_{0}_STANDARD_LIBRARIES"
langs = ["C", "CXX", "Fortran"]
for lang in langs:
libs_string = libs_format_string.format(lang)
entries.append(cmake_cache_string(libs_string, libs_flags))
return entries
def initconfig_mpi_entries(self):
spec = self.spec
if not spec.satisfies("^mpi"):
if not spec.satisfies('^mpi'):
return []
entries = [
@@ -137,27 +122,32 @@ def initconfig_mpi_entries(self):
"#------------------{0}\n".format("-" * 60),
]
entries.append(cmake_cache_path("MPI_C_COMPILER", spec["mpi"].mpicc))
entries.append(cmake_cache_path("MPI_CXX_COMPILER", spec["mpi"].mpicxx))
entries.append(cmake_cache_path("MPI_Fortran_COMPILER", spec["mpi"].mpifc))
entries.append(cmake_cache_path("MPI_C_COMPILER",
spec['mpi'].mpicc))
entries.append(cmake_cache_path("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
entries.append(cmake_cache_path("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
# Check for slurm
using_slurm = False
slurm_checks = ["+slurm", "schedulers=slurm", "process_managers=slurm"]
if any(spec["mpi"].satisfies(variant) for variant in slurm_checks):
slurm_checks = ['+slurm',
'schedulers=slurm',
'process_managers=slurm']
if any(spec['mpi'].satisfies(variant) for variant in slurm_checks):
using_slurm = True
# Determine MPIEXEC
if using_slurm:
if spec["mpi"].external:
if spec['mpi'].external:
# Heuristic until we have dependents on externals
mpiexec = "/usr/bin/srun"
mpiexec = '/usr/bin/srun'
else:
mpiexec = os.path.join(spec["slurm"].prefix.bin, "srun")
mpiexec = os.path.join(spec['slurm'].prefix.bin, 'srun')
else:
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpirun')
if not os.path.exists(mpiexec):
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpiexec")
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpiexec')
if not os.path.exists(mpiexec):
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.name
@@ -166,8 +156,9 @@ def initconfig_mpi_entries(self):
else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies("@3.10:"):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
if self.spec["cmake"].satisfies('@3.10:'):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE",
mpiexec))
else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
@@ -188,22 +179,24 @@ def initconfig_hardware_entries(self):
"#------------------{0}\n".format("-" * 60),
]
if spec.satisfies("^cuda"):
if spec.satisfies('^cuda'):
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Cuda")
entries.append("#------------------{0}\n".format("-" * 30))
cudatoolkitdir = spec["cuda"].prefix
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
cudatoolkitdir = spec['cuda'].prefix
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR",
cudatoolkitdir))
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER", cudacompiler))
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER",
cudacompiler))
if spec.satisfies("^mpi"):
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${MPI_CXX_COMPILER}"))
if spec.satisfies('^mpi'):
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${MPI_CXX_COMPILER}"))
else:
entries.append(
cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${CMAKE_CXX_COMPILER}")
)
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${CMAKE_CXX_COMPILER}"))
return entries
@@ -212,35 +205,30 @@ def std_initconfig_entries(self):
"#------------------{0}".format("-" * 60),
"# !!!! This is a generated file, edit at own risk !!!!",
"#------------------{0}".format("-" * 60),
"# CMake executable path: {0}".format(self.spec["cmake"].command.path),
"# CMake executable path: {0}".format(
self.spec['cmake'].command.path),
"#------------------{0}\n".format("-" * 60),
]
def initconfig_package_entries(self):
"""This method is to be overwritten by the package"""
return []
def initconfig(self, spec, prefix):
cache_entries = (
self.std_initconfig_entries()
+ self.initconfig_compiler_entries()
+ self.initconfig_mpi_entries()
+ self.initconfig_hardware_entries()
+ self.initconfig_package_entries()
)
cache_entries = (self.std_initconfig_entries() +
self.initconfig_compiler_entries() +
self.initconfig_mpi_entries() +
self.initconfig_hardware_entries() +
self.initconfig_package_entries())
with open(self.cache_name, "w") as f:
with open(self.cache_name, 'w') as f:
for entry in cache_entries:
f.write("%s\n" % entry)
f.write("\n")
f.write('%s\n' % entry)
f.write('\n')
@property
def std_cmake_args(self):
args = super(CachedCMakePackage, self).std_cmake_args
args.extend(["-C", self.cache_path])
args.extend(['-C', self.cache_path])
return args
@run_after("install")
@run_after('install')
def install_cmake_cache(self):
mkdirp(self.spec.prefix.share.cmake)
install(self.cache_path, self.spec.prefix.share.cmake)

View File

@@ -8,8 +8,7 @@
import os
import platform
import re
import sys
from typing import List
from typing import List # novm
import six
@@ -18,12 +17,11 @@
import spack.build_environment
from spack.directives import conflicts, depends_on, variant
from spack.package_base import InstallError, PackageBase, run_after
from spack.util.path import convert_to_posix_path
from spack.package import InstallError, PackageBase, run_after
# Regex to extract the primary generator from the CMake generator
# string.
_primary_generator_extractor = re.compile(r"(?:.* - )?(.*)")
_primary_generator_extractor = re.compile(r'(?:.* - )?(.*)')
def _extract_primary_generator(generator):
@@ -74,17 +72,16 @@ class CMakePackage(PackageBase):
if the generator string does not follow the prescribed format, or if
the primary generator is not supported.
"""
#: Phases of a CMake package
phases = ["cmake", "build", "install"]
phases = ['cmake', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "CMakePackage"
build_system_class = 'CMakePackage'
build_targets = [] # type: List[str]
install_targets = ["install"]
install_targets = ['install']
build_time_test_callbacks = ["check"]
build_time_test_callbacks = ['check']
#: The build system generator to use.
#:
@@ -94,32 +91,26 @@ class CMakePackage(PackageBase):
#:
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = "Unix Makefiles"
if sys.platform == "win32":
generator = "Ninja"
depends_on("ninja")
generator = 'Unix Makefiles'
# https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
variant(
"build_type",
default="RelWithDebInfo",
description="CMake build type",
values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"),
)
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
# https://cmake.org/cmake/help/latest/variable/CMAKE_INTERPROCEDURAL_OPTIMIZATION.html
variant("ipo", default=False, description="CMake interprocedural optimization")
variant('ipo', default=False,
description='CMake interprocedural optimization')
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
conflicts("+ipo", when="^cmake@:3.8", msg="+ipo is not supported by CMake < 3.9")
conflicts('+ipo', when='^cmake@:3.8',
msg='+ipo is not supported by CMake < 3.9')
depends_on("cmake", type="build")
depends_on('cmake', type='build')
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
return [os.path.join(self.build_directory, "CMakeCache.txt")]
return [os.path.join(self.build_directory, 'CMakeCache.txt')]
@property
def root_cmakelists_dir(self):
@@ -141,69 +132,65 @@ def std_cmake_args(self):
"""
# standard CMake arguments
std_cmake_args = CMakePackage._std_args(self)
std_cmake_args += getattr(self, "cmake_flag_args", [])
std_cmake_args += getattr(self, 'cmake_flag_args', [])
return std_cmake_args
@staticmethod
def _std_args(pkg):
"""Computes the standard cmake arguments for a generic package"""
try:
generator = pkg.generator
except AttributeError:
generator = CMakePackage.generator
generator = 'Unix Makefiles'
# Make sure a valid generator was chosen
valid_primary_generators = ["Unix Makefiles", "Ninja"]
valid_primary_generators = ['Unix Makefiles', 'Ninja']
primary_generator = _extract_primary_generator(generator)
if primary_generator not in valid_primary_generators:
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg += "CMakePackage currently supports the following "
msg += "primary generators: '{0}'".format("', '".join(valid_primary_generators))
msg += "primary generators: '{0}'".\
format("', '".join(valid_primary_generators))
raise InstallError(msg)
try:
build_type = pkg.spec.variants["build_type"].value
build_type = pkg.spec.variants['build_type'].value
except KeyError:
build_type = "RelWithDebInfo"
build_type = 'RelWithDebInfo'
try:
ipo = pkg.spec.variants["ipo"].value
ipo = pkg.spec.variants['ipo'].value
except KeyError:
ipo = False
define = CMakePackage.define
args = [
"-G",
generator,
define("CMAKE_INSTALL_PREFIX", convert_to_posix_path(pkg.prefix)),
define("CMAKE_BUILD_TYPE", build_type),
define("BUILD_TESTING", pkg.run_tests),
'-G', generator,
define('CMAKE_INSTALL_PREFIX', pkg.prefix),
define('CMAKE_BUILD_TYPE', build_type),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
if pkg.spec.satisfies("^cmake@3.9:"):
args.append(define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
if pkg.spec.satisfies('^cmake@3.9:'):
args.append(define('CMAKE_INTERPROCEDURAL_OPTIMIZATION', ipo))
if primary_generator == "Unix Makefiles":
args.append(define("CMAKE_VERBOSE_MAKEFILE", True))
if primary_generator == 'Unix Makefiles':
args.append(define('CMAKE_VERBOSE_MAKEFILE', True))
if platform.mac_ver()[0]:
args.extend(
[
define("CMAKE_FIND_FRAMEWORK", "LAST"),
define("CMAKE_FIND_APPBUNDLE", "LAST"),
]
)
args.extend([
define('CMAKE_FIND_FRAMEWORK', "LAST"),
define('CMAKE_FIND_APPBUNDLE', "LAST"),
])
# Set up CMake rpath
args.extend(
[
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
define("CMAKE_INSTALL_RPATH", spack.build_environment.get_rpaths(pkg)),
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
]
)
args.extend([
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', False),
define('CMAKE_INSTALL_RPATH',
spack.build_environment.get_rpaths(pkg)),
define('CMAKE_PREFIX_PATH',
spack.build_environment.get_cmake_prefix_path(pkg))
])
return args
@staticmethod
@@ -234,10 +221,10 @@ def define(cmake_var, value):
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if isinstance(value, bool):
kind = "BOOL"
kind = 'BOOL'
value = "ON" if value else "OFF"
else:
kind = "STRING"
kind = 'STRING'
if isinstance(value, Sequence) and not isinstance(value, six.string_types):
value = ";".join(str(v) for v in value)
else:
@@ -293,10 +280,11 @@ def define_from_variant(self, cmake_var, variant=None):
variant = cmake_var.lower()
if variant not in self.variants:
raise KeyError('"{0}" is not a variant of "{1}"'.format(variant, self.name))
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
if variant not in self.spec.variants:
return ""
return ''
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
@@ -311,34 +299,37 @@ def flags_to_build_system_args(self, flags):
so cppflags will be added to cflags, cxxflags, and fflags to mimic the
behavior in other tools."""
# Has to be dynamic attribute due to caching
setattr(self, "cmake_flag_args", [])
setattr(self, 'cmake_flag_args', [])
flag_string = "-DCMAKE_{0}_FLAGS={1}"
langs = {"C": "c", "CXX": "cxx", "Fortran": "f"}
flag_string = '-DCMAKE_{0}_FLAGS={1}'
langs = {'C': 'c', 'CXX': 'cxx', 'Fortran': 'f'}
# Handle language compiler flags
for lang, pre in langs.items():
flag = pre + "flags"
flag = pre + 'flags'
# cmake has no explicit cppflags support -> add it to all langs
lang_flags = " ".join(flags.get(flag, []) + flags.get("cppflags", []))
lang_flags = ' '.join(flags.get(flag, []) + flags.get('cppflags',
[]))
if lang_flags:
self.cmake_flag_args.append(flag_string.format(lang, lang_flags))
self.cmake_flag_args.append(flag_string.format(lang,
lang_flags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags["ldflags"]:
ldflags = " ".join(flags["ldflags"])
ld_string = "-DCMAKE_{0}_LINKER_FLAGS={1}"
if flags['ldflags']:
ldflags = ' '.join(flags['ldflags'])
ld_string = '-DCMAKE_{0}_LINKER_FLAGS={1}'
# cmake has separate linker arguments for types of builds.
for type in ["EXE", "MODULE", "SHARED", "STATIC"]:
for type in ['EXE', 'MODULE', 'SHARED', 'STATIC']:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags["ldlibs"]:
libs_flags = " ".join(flags["ldlibs"])
libs_string = "-DCMAKE_{0}_STANDARD_LIBRARIES={1}"
if flags['ldlibs']:
libs_flags = ' '.join(flags['ldlibs'])
libs_string = '-DCMAKE_{0}_STANDARD_LIBRARIES={1}'
for lang in langs:
self.cmake_flag_args.append(libs_string.format(lang, libs_flags))
self.cmake_flag_args.append(libs_string.format(lang,
libs_flags))
@property
def build_dirname(self):
@@ -346,7 +337,7 @@ def build_dirname(self):
:return: name of the subdirectory for building the package
"""
return "spack-build-%s" % self.spec.dag_hash(7)
return 'spack-build-%s' % self.spec.dag_hash(7)
@property
def build_directory(self):
@@ -362,7 +353,6 @@ def cmake_args(self):
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
* BUILD_TESTING
which will be set automatically.
@@ -381,33 +371,35 @@ def cmake(self, spec, prefix):
def build(self, spec, prefix):
"""Make the build targets"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.build_targets)
elif self.generator == "Ninja":
elif self.generator == 'Ninja':
self.build_targets.append("-v")
inspect.getmodule(self).ninja(*self.build_targets)
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.install_targets)
elif self.generator == "Ninja":
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.install_targets)
run_after("build")(PackageBase._run_default_build_time_test_callbacks)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the CMake-generated Makefile for the target ``test``
and runs it if found.
"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
self._if_make_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL")
self._if_make_target_execute("check")
elif self.generator == "Ninja":
self._if_ninja_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL")
self._if_ninja_target_execute("check")
if self.generator == 'Unix Makefiles':
self._if_make_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_make_target_execute('check')
elif self.generator == 'Ninja':
self._if_ninja_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_ninja_target_execute('check')
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)

Some files were not shown because too many files have changed in this diff Show More