Compare commits

..

26 Commits

Author SHA1 Message Date
Danny McClanahan
0af9331015 fix zlib and bzip2 2022-04-28 00:42:33 -04:00
Danny McClanahan
da423cdbac fix npm docstring 2022-04-24 16:58:33 -04:00
Danny McClanahan
fe0323f7a3 default -Wl,--allow-multiple-definition to on 2022-04-23 15:32:40 -04:00
Danny McClanahan
0c3d3afb01 subjective change -- prefer ninja over unix makefiles in cmake 2022-04-23 14:44:46 -04:00
Danny McClanahan
f4b094834a make ffmpeg build with emscripten!!!!!! 2022-04-23 14:44:45 -04:00
Danny McClanahan
2170120f40 fix emscripten and llvm to be able to produce runnable binaries
fix binaryen for emscripten
2022-04-23 14:44:45 -04:00
Danny McClanahan
58a71a5307 yasm bootstrap is no longer necessary 2022-04-23 14:44:45 -04:00
Danny McClanahan
203cebb081 make zlib and other makefile packages able to use emscripten
- also, bzip2 fix
2022-04-23 14:44:37 -04:00
Danny McClanahan
13362f6c86 make emscripten test work 2022-04-23 03:56:14 -04:00
Danny McClanahan
29dbae0c20 add emscripten package 2022-04-23 03:29:54 -04:00
Danny McClanahan
4f2b9ca6dc add several new variants to llvm to make it build on alpine 2022-04-23 03:29:54 -04:00
Danny McClanahan
bf50833d58 add note about python version necessary to install node-js 2022-04-23 03:29:54 -04:00
Danny McClanahan
5adf17f8a3 add binaryen package 2022-04-23 03:29:54 -04:00
Danny McClanahan
856dee5b0a add very hacky compatibility layer for npm on alpine 2022-04-23 03:29:53 -04:00
Danny McClanahan
9baf586a2e fix python2 build dep for node-js 2022-04-23 03:29:53 -04:00
Danny McClanahan
d38310fa02 not working -- look at nix emscripten support first before further work 2022-04-23 03:29:53 -04:00
Danny McClanahan
cc87de83cd make emconfigure and emcmake work, kinda 2022-04-23 03:29:53 -04:00
Danny McClanahan
187c0f1d04 fix doc issue with executables regex 2022-04-23 03:29:53 -04:00
Danny McClanahan
1a0b11ca43 random unrelated change 2022-04-23 03:29:53 -04:00
Danny McClanahan
982f883bcc rename emcc to emscripten 2022-04-23 03:29:52 -04:00
Danny McClanahan
2487a75422 implement spack external find for node-js 2022-04-23 03:29:52 -04:00
Danny McClanahan
b9bedba68c make emsdk use spack's node and llvm instead of downloading its own 2022-04-23 03:29:52 -04:00
Danny McClanahan
620575f505 add emscripten compiler emcc, which can't compile executables (yet!) 2022-04-23 03:29:52 -04:00
Danny McClanahan
43eaa713a3 add emsdk package 2022-04-23 03:29:52 -04:00
Danny McClanahan
e4f602632c add gnu plotutils 2022-04-23 03:27:53 -04:00
Danny McClanahan
5fe1a036a0 fix gsl build with openblas 2022-04-23 03:27:53 -04:00
7815 changed files with 182970 additions and 246110 deletions

43
.flake8
View File

@@ -1,25 +1,43 @@
# -*- conf -*-
# flake8 settings for Spack.
# flake8 settings for Spack core files.
#
# These exceptions are for Spack core files. We're slightly more lenient
# with packages. See .flake8_packages for that.
#
# This is the only flake8 rule Spack violates somewhat flagrantly
# E1: Indentation
# - E129: visually indented line with same indent as next logical line
#
# E2: Whitespace
# - E221: multiple spaces before operator
# - E241: multiple spaces after ','
# - E272: multiple spaces before keyword
#
# E7: Statement
# - E731: do not assign a lambda expression, use a def
#
# This is the only flake8 exception needed when using Black.
# - E203: white space around slice operators can be required, ignore : warn
# W5: Line break warning
# - W503: line break before binary operator
# - W504: line break after binary operator
#
# We still allow these in packages (Would like to get rid of them or rely on mypy
# in the future)
# - F403: from/import * used; unable to detect undefined names
# These are required to get the package.py files to test clean:
# - F999: syntax error in doctest
#
# N8: PEP8-naming
# - N801: class names should use CapWords convention
# - N813: camelcase imported as lowercase
# - N814: camelcase imported as constant
#
# F4: pyflakes import checks, these are now checked by mypy more precisely
# - F403: from module import *
# - F405: undefined name or from *
# - F821: undefined name (needed with from/import *)
#
# Black ignores, these are incompatible with black style and do not follow PEP-8
# - E203: white space around slice operators can be required, ignore : warn
# - W503: see above, already ignored for line-breaks
#
[flake8]
#ignore = E129,,W503,W504,F999,N801,N813,N814,F403,F405,E203
extend-ignore = E731,E203
max-line-length = 99
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814,F403,F405
max-line-length = 88
# F4: Import
# - F405: `name` may be undefined, or undefined from star imports: `module`
@@ -28,8 +46,7 @@ max-line-length = 99
# - F821: undefined name `name`
#
per-file-ignores =
var/spack/repos/*/package.py:F403,F405,F821
*-ci-package.py:F403,F405,F821
var/spack/repos/*/package.py:F405,F821
# exclude things we usually do not want linting for.
# These still get linted when passed explicitly, as when spack flake8 passes

View File

@@ -1,3 +0,0 @@
# .git-blame-ignore-revs
# Formatted entire codebase with black
f52f6e99dbf1131886a80112b8c79dfc414afb7c

View File

@@ -1,62 +0,0 @@
name: "\U0001F4A5 Tests error"
description: Some package in Spack had stand-alone tests that didn't pass
title: "Testing issue: "
labels: [test-error]
body:
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce the failure(s) or link(s) to test output(s)
description: |
Fill in the test output from the exact spec that is having stand-alone test failures. Links to test outputs (e.g., CDash) can also be provided.
value: |
```console
$ spack spec -I <spec>
...
```
- type: textarea
id: error
attributes:
label: Error message
description: |
Please post the error message from spack inside the `<details>` tag below:
value: |
<details><summary>Error message</summary><pre>
...
</pre></details>
validations:
required: true
- type: textarea
id: information
attributes:
label: Information on your system or the test runner
description: Please include the output of `spack debug report` for your system.
validations:
required: true
- type: markdown
attributes:
value: |
If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well.
- type: textarea
id: additional_information
attributes:
label: Additional information
description: |
Please upload test logs or any additional information about the problem.
- type: markdown
attributes:
value: |
Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and **@mention** them here if they exist.
- type: checkboxes
id: checks
attributes:
label: General information
options:
- label: I have reported the version of Spack/Python/Platform/Runner
required: true
- label: I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers
required: true
- label: I have uploaded any available logs
required: true
- label: I have searched the issues of this repo and believe this is not a duplicate
required: true

View File

@@ -1,44 +0,0 @@
name: audit
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
python_version:
required: true
type: string
concurrency:
group: audit-${{inputs.python_version}}-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov 'coverage[toml]<=6.2'
- name: Package audits (with coverage)
if: ${{ inputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,7 +0,0 @@
#!/bin/bash
set -ex
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap untrust spack-install
$PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store
exit 0

View File

@@ -3,16 +3,24 @@ name: Bootstrapping
on:
# This Workflow can be triggered manually
workflow_dispatch:
workflow_call:
pull_request:
branches:
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the
# built-in repository or documentation
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
- '!var/spack/repos/builtin/packages/python/**'
- '!var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/docs/**'
schedule:
# nightly at 2:16 AM
- cron: '16 2 * * *'
concurrency:
group: bootstrap-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: "fedora:latest"
@@ -23,21 +31,14 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -60,21 +61,22 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -96,21 +98,22 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -118,6 +121,7 @@ jobs:
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
@@ -130,15 +134,11 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup repo
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
@@ -154,8 +154,7 @@ jobs:
- name: Install dependencies
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -166,69 +165,46 @@ jobs:
tree ~/.spack/bootstrap/store/
macos-clingo-binaries:
runs-on: ${{ matrix.macos-version }}
runs-on: macos-latest
strategy:
matrix:
macos-version: ['macos-11', 'macos-12']
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
steps:
- name: Install dependencies
run: |
brew install tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
source share/spack/setup-env.sh
spack bootstrap untrust spack-install
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-binaries:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
steps:
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
fetch-depth: 0
- name: Setup repo
python-version: ${{ matrix.python-version }}
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
set -ex
for ver in '2.7' '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
if (($not_found)) ; then
echo Required python version $ver not found in runner!
exit 1
fi
done
source share/spack/setup-env.sh
spack bootstrap untrust spack-install
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
@@ -242,21 +218,22 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
@@ -278,21 +255,22 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
@@ -310,8 +288,7 @@ jobs:
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -327,8 +304,7 @@ jobs:
brew install gawk tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -336,11 +312,3 @@ jobs:
spack bootstrap untrust github-actions-v0.2
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -19,10 +19,6 @@ on:
release:
types: [published]
concurrency:
group: build_containers-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
deploy-images:
runs-on: ubuntu-latest
@@ -47,10 +43,9 @@ jobs:
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -80,33 +75,33 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@c74574e6c82eeedc46366be1b0d287eff9085eb6 # @v1
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@c84f38281176d4c9cdb1626ffafcd6b3911b5d94 # @v2
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -1,92 +0,0 @@
name: ci
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: ci-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
with:
with_coverage: ${{ needs.changes.outputs.core }}
audit-ancient-python:
uses: ./.github/workflows/audit.yaml
needs: [ changes ]
with:
with_coverage: ${{ needs.changes.outputs.core }}
python_version: 2.7
all-prechecks:
needs: [ prechecks ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
bootstrap: ${{ steps.filter.outputs.bootstrap }}
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@b2feaf19c27470162a626bd6fa8438ae5b263721
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
# Don't run if we only modified packages in the
# built-in repository or documentation
filters: |
bootstrap:
- 'var/spack/repos/builtin/packages/clingo-bootstrap/**'
- 'var/spack/repos/builtin/packages/clingo/**'
- 'var/spack/repos/builtin/packages/python/**'
- 'var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/**'
- 'share/spack/**'
- '.github/workflows/bootstrap.yml'
- '.github/workflows/ci.yaml'
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
bootstrap:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml
unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
all:
needs: [ windows, unit-tests, bootstrap, audit-ancient-python ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"

64
.github/workflows/macos_python.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
# These are nightly package tests for macOS
# focus areas:
# - initial user experience
# - scientific python stack
name: macOS builds nightly
on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
pull_request:
branches:
- develop
paths:
# Run if we modify this yaml file
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
jobs:
install_gcc:
name: gcc with clang
runs-on: macos-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
# 9.2.0 is the latest version on which we apply homebrew patch
spack install -v --fail-fast gcc@11.2.0 %apple-clang
install_jupyter_clang:
name: jupyter
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-jupyterlab %apple-clang
install_scipy_clang:
name: scipy, mpl, pd
runs-on: macos-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-scipy %apple-clang
spack install -v --fail-fast py-matplotlib %apple-clang
spack install -v --fail-fast py-pandas %apple-clang

View File

@@ -4,7 +4,6 @@ Set-Location spack
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git config --global core.longpaths true
if ($(git branch --show-current) -ne "develop")
{

View File

@@ -1,46 +1,115 @@
name: unit tests
name: linux tests
on:
workflow_dispatch:
workflow_call:
concurrency:
group: unit_tests-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools types-six
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@b2feaf19c27470162a626bd6fa8438ae5b263721
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
filters: |
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
- id: coverage
# Run the subsequent jobs with coverage if core has been modified,
# regardless of whether this is a pull request or a push to a branch
run: |
echo Core changes: ${{ steps.filter.outputs.core }}
echo Event name: ${{ github.event_name }}
if [ "${{ steps.filter.outputs.core }}" == "true" ]
then
echo "::set-output name=with_coverage::true"
else
echo "::set-output name=with_coverage::false"
fi
# Run unit tests with different configurations on linux
ubuntu:
unittests:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
concretizer: ['clingo']
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
include:
- python-version: 2.7
concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
- python-version: '3.10'
- python-version: 3.6
concretizer: original
- python-version: 3.9
concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
exclude:
- python-version: '3.7'
concretizer: 'clingo'
on_develop: false
- python-version: '3.8'
concretizer: 'clingo'
on_develop: false
- python-version: '3.9'
concretizer: 'clingo'
on_develop: false
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -52,11 +121,11 @@ jobs:
patchelf cmake bison libbison-dev kcov
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov[toml] pytest-cov pytest-xdist
pip install --upgrade pip six setuptools pytest codecov "coverage[toml]<=6.2"
# ensure style checks are not skipped in unit tests for python >= 3.6
# note that true/false (i.e., 1/0) are opposite in conditions in python and bash
if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click==8.0.4" "black<=21.12b0"
pip install --upgrade flake8 isort>=4.3.5 mypy>=0.900 black
fi
- name: Pin pathlib for Python 2.7
if: ${{ matrix.python-version == 2.7 }}
@@ -75,28 +144,37 @@ jobs:
. share/spack/setup-env.sh
spack bootstrap untrust spack-install
spack -v solve zlib
- name: Run unit tests
- name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
SPACK_PYTHON: python
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
SPACK_TEST_PARALLEL: 2
COVERAGE: true
UNIT_TEST_COVERAGE: ${{ (matrix.concretizer == 'original' && matrix.python-version == '2.7') || (matrix.python-version == '3.10') }}
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
coverage combine -a
coverage combine
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
SPACK_PYTHON: python
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
shell:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install System packages
@@ -106,25 +184,33 @@ jobs:
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2 pytest-xdist
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run shell tests
- name: Run shell tests (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
share/spack/qa/run-shell-tests
- name: Run shell tests (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: shelltests,linux
# Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack
rhel8-platform-python:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
container: registry.access.redhat.com/ubi8/ubi
steps:
- name: Install dependencies
@@ -132,7 +218,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -148,12 +234,13 @@ jobs:
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
# Test for the clingo based solver (using clingo-cffi)
clingo-cffi:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install System packages
@@ -165,60 +252,105 @@ jobs:
patchelf kcov
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml] pytest-cov clingo pytest-xdist
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2 clingo
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
coverage combine -a
coverage combine
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
macos:
build:
needs: [ validate, style, changes ]
runs-on: macos-latest
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade pytest codecov coverage[toml] pytest-xdist pytest-cov
pip install --upgrade pytest codecov coverage[toml]==6.2
- name: Setup Homebrew packages
run: |
brew install dash fish gcc gnupg2 kcov
- name: Run unit tests
env:
SPACK_TEST_SOLVER: clingo
SPACK_TEST_PARALLEL: 4
run: |
git --version
. .github/workflows/setup_git.sh
. share/spack/setup-env.sh
$(which spack) bootstrap untrust spack-install
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --cov --cov-config=pyproject.toml "${common_args[@]}"
coverage combine -a
coverage xml
# Delete the symlink going from ./lib/spack/docs/_spack_root back to
# the initial directory, since it causes ELOOP errors with codecov/actions@2
rm lib/spack/docs/_spack_root
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
then
coverage run $(which spack) unit-test -x
coverage combine
coverage xml
# Delete the symlink going from ./lib/spack/docs/_spack_root back to
# the initial directory, since it causes ELOOP errors with codecov/actions@2
rm lib/spack/docs/_spack_root
else
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
files: ./coverage.xml
flags: unittests,macos
# Run audits on all the packages in the built-in repository
package-audits:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Package audits (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (wwithout coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,60 +0,0 @@
name: style
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
concurrency:
group: style-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.10'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.10'
cache: 'pip'
- name: Install Python packages
run: |
python3 -m pip install --upgrade pip six setuptools types-six click==8.0.2 'black==21.12b0' mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.10'

View File

@@ -1,77 +1,104 @@
name: windows
name: windows tests
on:
workflow_call:
concurrency:
group: windows-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
defaults:
run:
shell:
powershell Invoke-Expression -Command ".\share\spack\qa\windows_test_setup.ps1"; {0}
jobs:
unit-tests:
validate:
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python Packages
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.5-' -v spack/lib/spack/spack/ spack/lib/spack/llnl/ spack/bin/
- name: vermin (Repositories)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.5-' -v spack/var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov pytest-cov
python -m pip install --upgrade pip six setuptools flake8 isort>=4.3.5 mypy>=0.800 black pywin32 types-python-dateutil
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Run style tests
run: |
spack style
- name: Verify license headers
run: |
python spack\bin\spack license verify
unittest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack
dir
(Get-Item '.\lib\spack\docs\_spack_root').Delete()
spack unit-test --verbose --cov --cov-config=pyproject.toml --ignore=lib/spack/spack/test/cmd
coverage combine -a
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
unit-tests-cmd:
spack unit-test --verbose --ignore=lib/spack/spack/test/cmd
unittest-cmd:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage pytest-cov
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Command Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack
(Get-Item '.\lib\spack\docs\_spack_root').Delete()
spack unit-test --verbose --cov --cov-config=pyproject.toml lib/spack/spack/test/cmd
coverage combine -a
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
build-abseil:
spack unit-test lib/spack/spack/test/cmd --verbose
buildtest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -84,7 +111,8 @@ jobs:
spack external find cmake
spack external find ninja
spack install abseil-cpp
make-installer:
generate-installer-test:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- name: Disable Windows Symlinks
@@ -92,15 +120,15 @@ jobs:
git config --global core.symlinks false
shell:
powershell
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Add Light and Candle to Path
run: |
$env:WIX >> $GITHUB_PATH
@@ -111,27 +139,27 @@ jobs:
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env:
ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi
execute-installer:
needs: make-installer
needs: generate-installer-test
runs-on: windows-latest
defaults:
run:
shell: pwsh
steps:
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Setup installer directory
run: |
mkdir -p spack_installer
@@ -157,4 +185,4 @@ jobs:
$proc = Start-Process ${{ env.spack_installer }}\spack.msi "/quiet" -Passthru
$handle = $proc.Handle # cache proc.Handle
$proc.WaitForExit();
$LASTEXITCODE
$LASTEXITCODE

View File

@@ -1,232 +1,3 @@
# v0.18.1 (2022-07-19)
### Spack Bugfixes
* Fix several bugs related to bootstrapping (#30834,#31042,#31180)
* Fix a regression that was causing spec hashes to differ between
Python 2 and Python 3 (#31092)
* Fixed compiler flags for oneAPI and DPC++ (#30856)
* Fixed several issues related to concretization (#31142,#31153,#31170,#31226)
* Improved support for Cray manifest file and `spack external find` (#31144,#31201,#31173,#31186)
* Assign a version to openSUSE Tumbleweed according to the GLIBC version
in the system (#19895)
* Improved Dockerfile generation for `spack containerize` (#29741,#31321)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
### Package updates
* WarpX: add v22.06, fixed libs property (#30866,#31102)
* openPMD: add v0.14.5, update recipe for @develop (#29484,#31023)
# v0.18.0 (2022-05-28)
`v0.18.0` is a major feature release.
## Major features in this release
1. **Concretizer now reuses by default**
`spack install --reuse` was introduced in `v0.17.0`, and `--reuse`
is now the default concretization mode. Spack will try hard to
resolve dependencies using installed packages or binaries (#30396).
To avoid reuse and to use the latest package configurations, (the
old default), you can use `spack install --fresh`, or add
configuration like this to your environment or `concretizer.yaml`:
```yaml
concretizer:
reuse: false
```
2. **Finer-grained hashes**
Spack hashes now include `link`, `run`, *and* `build` dependencies,
as well as a canonical hash of package recipes. Previously, hashes
only included `link` and `run` dependencies (though `build`
dependencies were stored by environments). We coarsened the hash to
reduce churn in user installations, but the new default concretizer
behavior mitigates this concern and gets us reuse *and* provenance.
You will be able to see the build dependencies of new installations
with `spack find`. Old installations will not change and their
hashes will not be affected. (#28156, #28504, #30717, #30861)
3. **Improved error messages**
Error handling with the new concretizer is now done with
optimization criteria rather than with unsatisfiable cores, and
Spack reports many more details about conflicting constraints.
(#30669)
4. **Unify environments when possible**
Environments have thus far supported `concretization: together` or
`concretization: separately`. These have been replaced by a new
preference in `concretizer.yaml`:
```yaml
concretizer:
unify: [true|false|when_possible]
```
`concretizer:unify:when_possible` will *try* to resolve a fully
unified environment, but if it cannot, it will create multiple
configurations of some packages where it has to. For large
environments that previously had to be concretized separately, this
can result in a huge speedup (40-50x). (#28941)
5. **Automatically find externals on Cray machines**
Spack can now automatically discover installed packages in the Cray
Programming Environment by running `spack external find` (or `spack
external read-cray-manifest` to *only* query the PE). Packages from
the PE (e.g., `cray-mpich` are added to the database with full
dependency information, and compilers from the PE are added to
`compilers.yaml`. Available with the June 2022 release of the Cray
Programming Environment. (#24894, #30428)
6. **New binary format and hardened signing**
Spack now has an updated binary format, with improvements for
security. The new format has a detached signature file, and Spack
verifies the signature before untarring or decompressing the binary
package. The previous format embedded the signature in a `tar`
file, which required the client to run `tar` *before* verifying
(#30750). Spack can still install from build caches using the old
format, but we encourage users to switch to the new format going
forward.
Production GitLab pipelines have been hardened to securely sign
binaries. There is now a separate signing stage so that signing
keys are never exposed to build system code, and signing keys are
ephemeral and only live as long as the signing pipeline stage.
(#30753)
7. **Bootstrap mirror generation**
The `spack bootstrap mirror` command can automatically create a
mirror for bootstrapping the concretizer and other needed
dependencies in an air-gapped environment. (#28556)
8. **Nascent Windows support**
Spack now has initial support for Windows. Spack core has been
refactored to run in the Windows environment, and a small number of
packages can now build for Windows. More details are
[in the documentation](https://spack.rtfd.io/en/latest/getting_started.html#spack-on-windows)
(#27021, #28385, many more)
9. **Makefile generation**
`spack env depfile` can be used to generate a `Makefile` from an
environment, which can be used to build packages the environment
in parallel on a single node. e.g.:
```console
spack -e myenv env depfile > Makefile
make
```
Spack propagates `gmake` jobserver information to builds so that
their jobs can share cores. (#30039, #30254, #30302, #30526)
10. **New variant features**
In addition to being conditional themselves, variants can now have
[conditional *values*](https://spack.readthedocs.io/en/latest/packaging_guide.html#conditional-possible-values)
that are only possible for certain configurations of a package. (#29530)
Variants can be
[declared "sticky"](https://spack.readthedocs.io/en/latest/packaging_guide.html#sticky-variants),
which prevents them from being enabled or disabled by the
concretizer. Sticky variants must be set explicitly by users
on the command line or in `packages.yaml`. (#28630)
* Allow conditional possible values in variants
* Add a "sticky" property to variants
## Other new features of note
* Environment views can optionally link only `run` dependencies
with `link:run` (#29336)
* `spack external find --all` finds library-only packages in
addition to build dependencies (#28005)
* Customizable `config:license_dir` option (#30135)
* `spack external find --path PATH` takes a custom search path (#30479)
* `spack spec` has a new `--format` argument like `spack find` (#27908)
* `spack concretize --quiet` skips printing concretized specs (#30272)
* `spack info` now has cleaner output and displays test info (#22097)
* Package-level submodule option for git commit versions (#30085, #30037)
* Using `/hash` syntax to refer to concrete specs in an environment
now works even if `/hash` is not installed. (#30276)
## Major internal refactors
* full hash (see above)
* new develop versioning scheme `0.19.0-dev0`
* Allow for multiple dependencies/dependents from the same package (#28673)
* Splice differing virtual packages (#27919)
## Performance Improvements
* Concretization of large environments with `unify: when_possible` is
much faster than concretizing separately (#28941, see above)
* Single-pass view generation algorithm is 2.6x faster (#29443)
## Archspec improvements
* `oneapi` and `dpcpp` flag support (#30783)
* better support for `M1` and `a64fx` (#30683)
## Removals and Deprecations
* Spack no longer supports Python `2.6` (#27256)
* Removed deprecated `--run-tests` option of `spack install`;
use `spack test` (#30461)
* Removed deprecated `spack flake8`; use `spack style` (#27290)
* Deprecate `spack:concretization` config option; use
`concretizer:unify` (#30038)
* Deprecate top-level module configuration; use module sets (#28659)
* `spack activate` and `spack deactivate` are deprecated in favor of
environments; will be removed in `0.19.0` (#29430; see also `link:run`
in #29336 above)
## Notable Bugfixes
* Fix bug that broke locks with many parallel builds (#27846)
* Many bugfixes and consistency improvements for the new concretizer
and `--reuse` (#30357, #30092, #29835, #29933, #28605, #29694, #28848)
## Packages
* `CMakePackage` uses `CMAKE_INSTALL_RPATH_USE_LINK_PATH` (#29703)
* Refactored `lua` support: `lua-lang` virtual supports both
`lua` and `luajit` via new `LuaPackage` build system(#28854)
* PythonPackage: now installs packages with `pip` (#27798)
* Python: improve site_packages_dir handling (#28346)
* Extends: support spec, not just package name (#27754)
* `find_libraries`: search for both .so and .dylib on macOS (#28924)
* Use stable URLs and `?full_index=1` for all github patches (#29239)
## Spack community stats
* 6,416 total packages, 458 new since `v0.17.0`
* 219 new Python packages
* 60 new R packages
* 377 people contributed to this release
* 337 committers to packages
* 85 committers to core
# v0.17.3 (2022-07-14)
### Spack bugfixes
* Fix missing chgrp on symlinks in package installations (#30743)
* Allow having non-existing upstreams (#30744, #30746)
* Fix `spack stage` with custom paths (#30448)
* Fix failing call for `spack buildcache save-specfile` (#30637)
* Fix globbing in compiler wrapper (#30699)
# v0.17.2 (2022-04-13)
### Spack bugfixes
@@ -240,7 +11,7 @@
* Fixed a few bugs affecting the spack ci command (#29518, #29419)
* Fix handling of Intel compiler environment (#29439)
* Fix a few edge cases when reindexing the DB (#28764)
* Remove "Known issues" from documentation (#29664)
* Remove "Known issues" from documentation (#29664)
* Other miscellaneous bugfixes (0b72e070583fc5bcd016f5adc8a84c99f2b7805f, #28403, #29261)
# v0.17.1 (2021-12-23)

View File

@@ -2,10 +2,10 @@
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
Spack is a multi-platform package manager that builds and installs
@@ -62,7 +62,6 @@ Resources:
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
To get an invitation, visit [slack.spack.io](https://slack.spack.io).
* [**Github Discussions**](https://github.com/spack/spack/discussions): not just for discussions, also Q&A.
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!

View File

@@ -8,11 +8,13 @@
def getpywin():
try:
import win32con # noqa: F401
import win32con # noqa
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "--upgrade", "pip"])
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "pywin32"])
subprocess.check_call(
[sys.executable, "-m", "pip", "-q", "install", "--upgrade", "pip"])
subprocess.check_call(
[sys.executable, "-m", "pip", "-q", "install", "pywin32"])
if __name__ == "__main__":
if __name__ == '__main__':
getpywin()

View File

@@ -77,23 +77,21 @@ if "ruamel" in sys.modules:
try:
import argparse
except ImportError:
argparse_pyc = os.path.join(spack_external_libs, "argparse.pyc")
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
if not os.path.exists(argparse_pyc):
raise
try:
os.remove(argparse_pyc)
import argparse # noqa: F401
import argparse # noqa
except Exception:
msg = (
"The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. "
"Either delete it manually or ask some administrator to "
"delete it for you."
)
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
'Either delete it manually or ask some administrator to '
'delete it for you.')
print(msg.format(argparse_pyc))
sys.exit(1)
import spack.main # noqa: E402
import spack.main # noqa
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":

View File

@@ -1,95 +0,0 @@
#!/bin/bash
set -euo pipefail
[[ -n "${TMPCONFIG_DEBUG:=}" ]] && set -x
DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
mkdir -p "${XDG_RUNTIME_DIR:=/tmp}/spack-tests"
export TMPDIR="${XDG_RUNTIME_DIR}"
export TMP_DIR="$(mktemp -d -t spack-test-XXXXX)"
clean_up() {
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "cleaning up: $TMP_DIR\n"
rm -rf "$TMP_DIR"
}
trap clean_up EXIT
trap clean_up ERR
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "Redirecting TMP_DIR and spack directories to $TMP_DIR\n"
export BOOTSTRAP="${SPACK_USER_CACHE_PATH:=$HOME/.spack}/bootstrap"
export SPACK_USER_CACHE_PATH="$TMP_DIR/user_cache"
mkdir -p "$SPACK_USER_CACHE_PATH"
private_bootstrap="$SPACK_USER_CACHE_PATH/bootstrap"
use_spack=''
use_bwrap=''
# argument handling
while (($# >= 1)) ; do
case "$1" in
-b) # privatize bootstrap too, useful for CI but not always cheap
shift
export BOOTSTRAP="$private_bootstrap"
;;
-B) # use specified bootstrap dir
export BOOTSTRAP="$2"
shift 2
;;
-s) # run spack directly with remaining args
shift
use_spack=1
;;
--contain=bwrap)
if bwrap --help 2>&1 > /dev/null ; then
use_bwrap=1
else
echo Bubblewrap containment requested, but no bwrap command found
exit 1
fi
shift
;;
--)
shift
break
;;
*)
break
;;
esac
done
typeset -a CMD
if [[ -n "$use_spack" ]] ; then
CMD=("$DIR/spack" "$@")
else
CMD=("$@")
fi
mkdir -p "$BOOTSTRAP"
export SPACK_SYSTEM_CONFIG_PATH="$TMP_DIR/sys_conf"
export SPACK_USER_CONFIG_PATH="$TMP_DIR/user_conf"
mkdir -p "$SPACK_USER_CONFIG_PATH"
cat >"$SPACK_USER_CONFIG_PATH/config.yaml" <<EOF
config:
install_tree:
root: $TMP_DIR/install
misc_cache: $$user_cache_path/cache
source_cache: $$user_cache_path/source
EOF
cat >"$SPACK_USER_CONFIG_PATH/bootstrap.yaml" <<EOF
bootstrap:
root: $BOOTSTRAP
EOF
if [[ -n "$use_bwrap" ]] ; then
CMD=(
bwrap
--dev-bind / /
--ro-bind "$DIR/.." "$DIR/.." # do not touch spack root
--ro-bind $HOME/.spack $HOME/.spack # do not touch user config/cache dir
--bind "$TMP_DIR" "$TMP_DIR"
--bind "$BOOTSTRAP" "$BOOTSTRAP"
--die-with-parent
"${CMD[@]}"
)
fi
(( ${TMPCONFIG_DEBUG:=0} > 1)) && echo "Running: ${CMD[@]}"
"${CMD[@]}"

View File

@@ -6,19 +6,36 @@ bootstrap:
# by Spack is installed in a "store" subfolder of this root directory
root: $user_cache_path/bootstrap
# Methods that can be used to bootstrap software. Each method may or
# may not be able to bootstrap all the software that Spack needs,
# may not be able to bootstrap all of the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.3'
metadata: $spack/share/spack/bootstrap/github-actions-v0.3
- name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.2
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
- name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
# This method is just Spack bootstrapping the software it needs from sources.
# It has been added here so that users can selectively disable bootstrapping
# from sources by "untrusting" it.
- name: spack-install
type: install
description: |
Specs built from sources by Spack. May take a long time.
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow
github-actions-v0.3: true
github-actions-v0.2: true
spack-install: true

View File

@@ -14,23 +14,4 @@ concretizer:
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: true
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets
# considered.
targets:
# Determine whether we want to target specific or generic microarchitectures.
# An example of the first kind might be for instance "skylake" or "bulldozer",
# while generic microarchitectures are for instance "aarch64" or "x86_64_v4".
granularity: microarchitectures
# If "false" allow targets that are incompatible with the current host (for
# instance concretize with target "icelake" while running on "haswell").
# If "true" only allow targets that are compatible with the host.
host_compatible: true
# When "true" concretize root specs of environments together, so that each unique
# package in an environment corresponds to one concrete spec. This ensures
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: false
reuse: false

View File

@@ -33,9 +33,6 @@ config:
template_dirs:
- $spack/share/spack/templates
# Directory where licenses should be located
license_dir: $spack/etc/spack/licenses
# Temporary locations Spack can try to use for builds.
#
# Recommended options are given below.

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules: {}
modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -15,7 +15,7 @@
# -------------------------------------------------------------------------
modules:
prefix_inspections:
./lib:
lib:
- DYLD_FALLBACK_LIBRARY_PATH
./lib64:
lib64:
- DYLD_FALLBACK_LIBRARY_PATH

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules: {}
modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -14,24 +14,23 @@
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules:
# This maps paths in the package install prefix to environment variables
# they should be added to. For example, <prefix>/bin should be in PATH.
# Paths to check when creating modules for all module sets
prefix_inspections:
./bin:
bin:
- PATH
./man:
man:
- MANPATH
./share/man:
share/man:
- MANPATH
./share/aclocal:
share/aclocal:
- ACLOCAL_PATH
./lib/pkgconfig:
lib/pkgconfig:
- PKG_CONFIG_PATH
./lib64/pkgconfig:
lib64/pkgconfig:
- PKG_CONFIG_PATH
./share/pkgconfig:
share/pkgconfig:
- PKG_CONFIG_PATH
./:
'':
- CMAKE_PREFIX_PATH
# These are configurations for the module set named "default"

View File

@@ -25,19 +25,17 @@ packages:
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fuse: [libfuse]
gl: [glx, osmesa]
gl: [mesa+opengl, mesa18, opengl]
glu: [mesa-glu, openglu]
glx: [mesa+glx, mesa18+glx, opengl]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libglx: [mesa+glx, mesa18+glx]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit]
libllvm: [llvm, llvm-amdgpu]
lua-lang: [lua, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb]
mkl: [intel-mkl]
mpe: [mpe2]
@@ -45,6 +43,7 @@ packages:
mysql-client: [mysql, mariadb-c-client]
opencl: [pocl]
onedal: [intel-oneapi-dal]
osmesa: [mesa+osmesa, mesa18+osmesa]
pbs: [openpbs, torque]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]

View File

@@ -192,32 +192,32 @@ you can use them to customize an installation in :ref:`sec-specs`.
Reusing installed dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack install``, Spack tries hard to reuse existing installations
as dependencies, either from a local store or from remote buildcaches if configured.
This minimizes unwanted rebuilds of common dependencies, in particular if
you update Spack frequently.
.. warning::
In case you want the latest versions and configurations to be installed instead,
you can add the ``--fresh`` option:
The ``--reuse`` option described here will become the default installation
method in the next Spack version, and you will be able to get the current
behavior by using ``spack install --fresh``.
By default, when you run ``spack install``, Spack tries to build a new
version of the package you asked for, along with updated versions of
its dependencies. This gets you the latest versions and configurations,
but it can result in unwanted rebuilds if you update Spack frequently.
If you want Spack to try hard to reuse existing installations as dependencies,
you can add the ``--reuse`` option:
.. code-block:: console
$ spack install --fresh mpich
$ spack install --reuse mpich
Reusing installations in this mode is "accidental", and happening only if
there's a match between existing installations and what Spack would have installed
anyhow.
You can use the ``spack spec -I mpich`` command to see what
This will not do anything if ``mpich`` is already installed. If ``mpich``
is not installed, but dependencies like ``hwloc`` and ``libfabric`` are,
the ``mpich`` will be build with the installed versions, if possible.
You can use the :ref:`spack spec -I <cmd-spack-spec>` command to see what
will be reused and what will be built before you install.
You can configure Spack to use the ``--fresh`` behavior by default in
``concretizer.yaml``:
.. code-block:: yaml
concretizer:
reuse: false
You can configure Spack to use the ``--reuse`` behavior by default in
``concretizer.yaml``.
.. _cmd-spack-uninstall:
@@ -896,8 +896,8 @@ your path:
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH``
and ``MANPATH`` according to the
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
:ref:`prefix inspections <customize-env-modifications>` defined in your
modules configuration.
When you no longer want to use a package, you can type unload or
@@ -1093,8 +1093,6 @@ could depend on ``mpich@1.2:`` if it can only build with version
Below are more details about the specifiers that you can add to specs.
.. _version-specifier:
^^^^^^^^^^^^^^^^^
Version specifier
^^^^^^^^^^^^^^^^^
@@ -1110,37 +1108,6 @@ set of arbitrary versions, such as ``@1.0,1.5,1.7`` (``1.0``, ``1.5``,
or ``1.7``). When you supply such a specifier to ``spack install``,
it constrains the set of versions that Spack will install.
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
reference provided. Acceptable syntaxes for this are:
.. code-block:: sh
# branches and tags
foo@git.develop # use the develop branch
foo@git.0.19 # use the 0.19 tag
# commit hashes
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234
Spack versions from git reference either have an associated version supplied by the user,
or infer a relationship to known versions from the structure of the git repository. If an
associated version is supplied by the user, Spack treats the git version as equivalent to that
version for all version comparisons in the package logic (e.g. ``depends_on('foo', when='@1.5')``).
The associated version can be assigned with ``[git ref]=[version]`` syntax, with the caveat that the specified version is known to Spack from either the package definition, or in the configuration preferences (i.e. ``packages.yaml``).
.. code-block:: sh
foo@git.my_ref=3.2 # use the my_ref tag or branch, but treat it as version 3.2 for version comparisons
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop # use the given commit, but treat it as develop for version comparisons
If an associated version is not supplied then the tags in the git repo are used to determine
the most recent previous version known to Spack. Details about how versions are compared
and how Spack determines if one version is less than another are discussed in the developer guide.
If the version spec is not provided, then Spack will choose one
according to policies set for the particular spack installation. If
the spec is ambiguous, i.e. it could match multiple versions, Spack

View File

@@ -50,13 +50,6 @@ build cache files for the "ninja" spec:
Note that the targeted spec must already be installed. Once you have a build cache,
you can add it as a mirror, discussed next.
.. warning::
Spack improved the format used for binary caches in v0.18. The entire v0.18 series
will be able to verify and install binary caches both in the new and in the old format.
Support for using the old format is expected to end in v0.19, so we advise users to
recreate relevant buildcaches using Spack v0.18 or higher.
---------------------------------------
Finding or installing build cache files
---------------------------------------

View File

@@ -1,160 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _bootstrapping:
=============
Bootstrapping
=============
In the :ref:`Getting started <getting_started>` Section we already mentioned that
Spack can bootstrap some of its dependencies, including ``clingo``. In fact, there
is an entire command dedicated to the management of every aspect of bootstrapping:
.. command-output:: spack bootstrap --help
The first thing to know to understand bootstrapping in Spack is that each of
Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console
% spack bootstrap status
Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs
[FAIL] Binary packages
[B] MISSING "gpg2": required to sign/verify buildcaches
Spack will take care of bootstrapping any missing dependency marked as [B]. Dependencies marked as [-] are instead required to be found on the system.
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. Running a command that concretize a spec, like:
.. code-block:: console
% spack solve zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.1/build_cache/darwin-catalina-x86_64/apple-clang-12.0.0/clingo-bootstrap-spack/darwin-catalina-x86_64-apple-clang-12.0.0-clingo-bootstrap-spack-p5on7i4hejl775ezndzfdkhvwra3hatn.spack
==> Installing "clingo-bootstrap@spack%apple-clang@12.0.0~docs~ipo+python build_type=Release arch=darwin-catalina-x86_64" from a buildcache
[ ... ]
triggers the bootstrapping of clingo from pre-built binaries as expected.
-----------------------
The Bootstrapping store
-----------------------
The software installed for bootstrapping purposes is deployed in a separate store.
Its location can be checked with the following command:
.. code-block:: console
% spack bootstrap root
It can also be changed with the same command by just specifying the newly desired path:
.. code-block:: console
% spack bootstrap root /opt/spack/bootstrap
You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
clingo-bootstrap@spack libassuan@2.5.5 libgpg-error@1.42 libksba@1.5.1 pinentry@1.1.1 zlib@1.2.11
gnupg@2.3.1 libgcrypt@1.9.3 libiconv@1.16 npth@1.6 python@3.8
In case it is needed you can remove all the software in the current bootstrapping store with:
.. code-block:: console
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
--------------------------------------------
Enabling and disabling bootstrapping methods
--------------------------------------------
Bootstrapping is always performed by trying the methods listed by:
.. command-output:: spack bootstrap list
in the order they appear, from top to bottom. By default Spack is
configured to try first bootstrapping from pre-built binaries and to
fall-back to bootstrapping from sources if that failed.
If need be, you can disable bootstrapping altogether by running:
.. code-block:: console
% spack bootstrap disable
in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by *untrusting*
them. For instance:
.. code-block:: console
% spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console
% spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults:
.. code-block:: console
% spack bootstrap reset
==> Bootstrapping configuration is being reset to Spack's defaults. Current configuration will be lost.
Do you want to continue? [Y/n]
%
----------------------------------------
Creating a mirror for air-gapped systems
----------------------------------------
Spack's default configuration for bootstrapping relies on the user having
access to the internet, either to fetch pre-compiled binaries or source tarballs.
Sometimes though Spack is deployed on air-gapped systems where such access is denied.
To help with similar situations Spack has a command that recreates, in a local folder
of choice, a mirror containing the source tarballs and/or binary packages needed for
bootstrapping.
.. code-block:: console
% spack bootstrap mirror --binary-packages /opt/bootstrap
==> Adding "clingo-bootstrap@spack+python %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "gnupg@2.3: %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "patchelf@0.13.1:0.13.99 %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding binary packages from "https://github.com/alalazo/spack-bootstrap-mirrors/releases/download/v0.1-rc.2/bootstrap-buildcache.tar.gz" to the mirror at /opt/bootstrap/local-mirror
To register the mirror on the platform where it's supposed to be used run the following command(s):
% spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources
% spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -49,8 +49,9 @@ packages rather than building its own packages. This may be desirable
if machines ship with system packages, such as a customized MPI
that should be used instead of Spack building its own MPI.
External packages are configured through the ``packages.yaml`` file.
Here's an example of an external configuration:
External packages are configured through the ``packages.yaml`` file found
in a Spack installation's ``etc/spack/`` or a user's ``~/.spack/``
directory. Here's an example of an external configuration:
.. code-block:: yaml
@@ -96,14 +97,11 @@ Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prevent packages from being built from sources
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding an external spec in ``packages.yaml`` allows Spack to use an external location,
but it does not prevent Spack from building packages from sources. In the above example,
Spack might choose for many valid reasons to start building and linking with the
latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions.
The packages configuration can tell Spack to use an external location
for certain package versions, but it does not restrict Spack to using
external packages. In the above example, since newer versions of OpenMPI
are available, Spack will choose to start building and linking with the
latest version rather than continue using the pre-installed OpenMPI versions.
To prevent this, the ``packages.yaml`` configuration also allows packages
to be flagged as non-buildable. The previous example could be modified to
@@ -123,15 +121,9 @@ be:
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
its own version of OpenMPI from sources, and it will instead always rely on a pre-built
OpenMPI.
.. note::
If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag)
pre-built specs include specs already available from a local store, an upstream store, a registered
buildcache or specs marked as externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only
external specs in ``packages.yaml`` are included in the list of pre-built specs.
its own version of OpenMPI, and it will instead always rely on a pre-built
OpenMPI. Similar to ``paths``, ``buildable`` is specified as a property under
a package name.
If an external module is specified as not buildable, then Spack will load the
external module into the build environment which can be used for linking.
@@ -140,10 +132,6 @@ The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-buildable virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
@@ -165,37 +153,21 @@ but more conveniently:
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers
(available via stores or buildcaches) are not wanted, Spack can be configured to require
specs matching only the available externals:
Implementations can also be listed immediately under the virtual they provide:
.. code-block:: yaml
packages:
mpi:
buildable: False
require:
- one_of: [
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64",
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug",
"openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
]
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
mpich@3.3 %clang@9.0.0 arch=linux-debian7-x86_64: /opt/mpich-3.3-intel
This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused,
unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see
:ref:`package-requirements`.
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
.. _cmd-spack-external-find:
@@ -222,6 +194,11 @@ Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The current implementation only collects and examines executable files,
so it is typically only useful for build/run dependencies (in some cases
if a library package also provides an executable, it may be possible to
extract a meaningful Spec by running the executable - for example the
compiler wrappers in MPI implementations).
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
@@ -242,65 +219,33 @@ Concretizer options
but you can also use ``concretizer.yaml`` to customize aspects of the
algorithm it uses to select the dependencies you install:
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
:language: yaml
.. _code-block: yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Reuse already installed packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
concretizer:
# Whether to consider installed packages or packages from buildcaches when
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: false
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
^^^^^^^^^^^^^^^^
``reuse``
^^^^^^^^^^^^^^^^
This controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
``package.py`` files and ``packages.yaml`` (``false``). .
.. code-block:: console
You can use ``spack install --reuse`` to enable reuse for a single installation,
and you can use ``spack install --fresh`` to do a fresh install if ``reuse`` is
enabled by default.
% spack install --reuse <spec>
.. note::
to enable reuse for a single installation, and you can use:
``reuse: false`` is the current default, but ``reuse: true`` will be the default
in the next Spack release. You will still be able to use ``spack install --fresh``
to get the old behavior.
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: true`` is the default.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Selection of the target microarchitectures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The options under the ``targets`` attribute control which targets are considered during a solve.
Currently the options in this section are only configurable from the ``concretization.yaml`` file
and there are no corresponding command line arguments to enable them for a single solve.
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
If set to:
.. code-block:: yaml
concretizer:
targets:
granularity: microarchitectures
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
compatibility. If instead the option is set to:
.. code-block:: yaml
concretizer:
targets:
granularity: generic
Spack will consider only generic microarchitectures. For instance, when running on an
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
``x86_64_v3`` as the best target in the latter case.
The ``host_compatible`` option is a Boolean option that determines whether or not the
microarchitectures considered during the solve are constrained to be compatible with the
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-preferences:
@@ -362,126 +307,6 @@ concretization rules. A provider lists a value that packages may
``depend_on`` (e.g, MPI) and a list of rules for fulfilling that
dependency.
.. _package-requirements:
--------------------
Package Requirements
--------------------
You can use the configuration to force the concretizer to choose
specific properties for packages when building them. Like preferences,
these are only applied when the package is required by some other
request (e.g. if the package is needed as a dependency of a
request to ``spack install``).
An example of where this is useful is if you have a package that
is normally built as a dependency but only under certain circumstances
(e.g. only when a variant on a dependent is active): you can make
sure that it always builds the way you want it to; this distinguishes
package configuration requirements from constraints that you add to
``spack install`` or to environments (in those cases, the associated
packages are always built).
The following is an example of how to enforce package properties in
``packages.yaml``:
.. code-block:: yaml
packages:
libfabric:
require: "@1.13.2"
openmpi:
require:
- any_of: ["~cuda", "%gcc"]
mpich:
require:
- one_of: ["+cuda", "+rocm"]
Requirements are expressed using Spec syntax (the same as what is provided
to ``spack install``). In the simplest case, you can specify attributes
that you always want the package to have by providing a single spec to
``require``; in the above example, ``libfabric`` will always build
with version 1.13.2.
You can provide a more-relaxed constraint and allow the concretizer to
choose between a set of options using ``any_of`` or ``one_of``:
* ``any_of`` is a list of specs. One of those specs must be satisfied
and it is also allowed for the concretized spec to match more than one.
In the above example, that means you could build ``openmpi+cuda%gcc``,
``openmpi~cuda%clang`` or ``openmpi~cuda%gcc`` (in the last case,
note that both specs in the ``any_of`` for ``openmpi`` are
satisfied).
* ``one_of`` is also a list of specs, and the final concretized spec
must match exactly one of them. In the above example, that means
you could build ``mpich+cuda`` or ``mpich+rocm`` but not
``mpich+cuda+rocm`` (note the current package definition for
``mpich`` already includes a conflict, so this is redundant but
still demonstrates the concept).
.. note::
For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting default requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also set default requirements for all packages under ``all``
like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
which means every spec will be required to use ``clang`` as a compiler.
Note that in this case ``all`` represents a *default set of requirements* -
if there are specific package requirements, then the default requirements
under ``all`` are disregarded. For example, with a configuration like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
cmake:
require: '%gcc'
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including cmake dependencies)
to use ``clang``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting requirements on virtual specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A requirement on a virtual spec applies whenever that virtual is present in the DAG. This
can be useful for fixing which virtual provider you want to use:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
With the configuration above the only allowed ``mpi`` provider is ``mvapich2 %gcc``.
Requirements on the virtual spec and on the specific provider are both applied, if present. For
instance with a configuration like:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
mvapich2:
require: '~cuda'
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
.. _package_permissions:

View File

@@ -39,7 +39,6 @@ on these ideas for each distinct build system that Spack supports:
build_systems/autotoolspackage
build_systems/cmakepackage
build_systems/cachedcmakepackage
build_systems/mesonpackage
build_systems/qmakepackage
build_systems/sippackage
@@ -48,7 +47,6 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Language-specific
build_systems/luapackage
build_systems/octavepackage
build_systems/perlpackage
build_systems/pythonpackage
@@ -62,12 +60,11 @@ on these ideas for each distinct build system that Spack supports:
build_systems/bundlepackage
build_systems/cudapackage
build_systems/custompackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/multiplepackage
build_systems/rocmpackage
build_systems/sourceforgepackage
build_systems/custompackage
build_systems/multiplepackage
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
provide a list of build systems and methods/attributes that can be

View File

@@ -1,123 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _cachedcmakepackage:
------------------
CachedCMakePackage
------------------
The CachedCMakePackage base class is used for CMake-based workflows
that create a CMake cache file prior to running ``cmake``. This is
useful for packages with arguments longer than the system limit, and
for reproducibility.
The documentation for this class assumes that the user is familiar with
the ``CMakePackage`` class from which it inherits. See the documentation
for :ref:`CMakePackage <cmakepackage>`.
^^^^^^
Phases
^^^^^^
The ``CachedCMakePackage`` base class comes with the following phases:
#. ``initconfig`` - generate the CMake cache file
#. ``cmake`` - generate the Makefile
#. ``build`` - build the package
#. ``install`` - install the package
By default, these phases run:
.. code-block:: console
$ mkdir spack-build
$ cd spack-build
$ cat << EOF > name-arch-compiler@version.cmake
# Write information on compilers and dependencies
# includes information on mpi and cuda if applicable
$ cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/installation/prefix -C name-arch-compiler@version.cmake
$ make
$ make test # optional
$ make install
The ``CachedCMakePackage`` class inherits from the ``CMakePackage``
class, and accepts all of the same options and adds all of the same
flags to the ``cmake`` command. Similar to the ``CMakePAckage`` class,
you may need to add a few arguments yourself, and the
``CachedCMakePackage`` provides the same interface to add those
flags.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding entries to the CMake cache
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to adding flags to the ``cmake`` command, you may need to
add entries to the CMake cache in the ``initconfig`` phase. This can
be done by overriding one of four methods:
#. ``CachedCMakePackage.initconfig_compiler_entries``
#. ``CachedCMakePackage.initconfig_mpi_entries``
#. ``CachedCMakePackage.initconfig_hardware_entries``
#. ``CachedCMakePackage.initconfig_package_entries``
Each of these methods returns a list of CMake cache strings. The
distinction between these methods is merely to provide a
well-structured and legible cmake cache file -- otherwise, entries
from each of these methods are handled identically.
Spack also provides convenience methods for generating CMake cache
entries. These methods are available at module scope in every Spack
package. Because CMake parses boolean options, strings, and paths
differently, there are three such methods:
#. ``cmake_cache_option``
#. ``cmake_cache_string``
#. ``cmake_cache_path``
These methods each accept three parameters -- the name of the CMake
variable associated with the entry, the value of the entry, and an
optional comment -- and return strings in the appropriate format to be
returned from any of the ``initconfig*`` methods. Additionally, these
methods may return comments beginning with the ``#`` character.
A typical usage of these methods may look something like this:
.. code-block:: python
def initconfig_mpi_entries(self)
# Get existing MPI configurations
entries = super(self, Foo).initconfig_mpi_entries()
# The existing MPI configurations key on whether ``mpi`` is in the spec
# This spec has an MPI variant, and we need to enable MPI when it is on.
# This hypothetical package controls MPI with the ``FOO_MPI`` option to
# cmake.
if '+mpi' in self.spec:
entries.append(cmake_cache_option('FOO_MPI', True, "enable mpi"))
else:
entries.append(cmake_cache_option('FOO_MPI', False, "disable mpi"))
def initconfig_package_entries(self):
# Package specific options
entries = []
entries.append('#Entries for build options')
bar_on = '+bar' in self.spec
entries.append(cmake_cache_option('FOO_BAR', bar_on, 'toggle bar'))
entries.append('#Entries for dependencies')
if self.spec['blas'].name == 'baz': # baz is our blas provider
entries.append(cmake_cache_string('FOO_BLAS', 'baz', 'Use baz'))
entries.append(cmake_cache_path('BAZ_PREFIX', self.spec['baz'].prefix))
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on CMake cache files, see:
https://cmake.org/cmake/help/latest/manual/cmake.1.html

View File

@@ -84,8 +84,8 @@ build ``hdf5`` with Intel oneAPI MPI do::
spack install hdf5 +mpi ^intel-oneapi-mpi
Using Externally Installed oneAPI Tools
=======================================
Using an Externally Installed oneAPI
====================================
Spack can also use oneAPI tools that are manually installed with
`Intel Installers`_. The procedures for configuring Spack to use
@@ -110,7 +110,7 @@ Another option is to manually add the configuration to
Libraries
---------
If you want Spack to use oneMKL that you have installed without Spack in
If you want Spack to use MKL that you have installed without Spack in
the default location, then add the following to
``~/.spack/packages.yaml``, adjusting the version as appropriate::
@@ -139,7 +139,7 @@ You can also use Spack-installed libraries. For example::
spack load intel-oneapi-mkl
Will update your environment CPATH, LIBRARY_PATH, and other
environment variables for building an application with oneMKL.
environment variables for building an application with MKL.
More information
================

View File

@@ -15,9 +15,6 @@ IntelPackage
Intel packages in Spack
^^^^^^^^^^^^^^^^^^^^^^^^
This is an earlier version of Intel software development tools and has
now been replaced by Intel oneAPI Toolkits.
Spack can install and use several software development products offered by Intel.
Some of these are available under no-cost terms, others require a paid license.
All share the same basic steps for configuration, installation, and, where

View File

@@ -1,105 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _luapackage:
------------
LuaPackage
------------
LuaPackage is a helper for the common case of Lua packages that provide
a rockspec file. This is not meant to take a rock archive, but to build
a source archive or repository that provides a rockspec, which should cover
most lua packages. In the case a Lua package builds by Make rather than
luarocks, prefer MakefilePackage.
^^^^^^
Phases
^^^^^^
The ``LuaPackage`` base class comes with the following phases:
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
#. ``preprocess`` - adjust sources or rockspec to fix build
#. ``install`` - install the project
By default, these phases run:
.. code-block:: console
# If the archive is a source rock
$ luarocks unpack <archive>.src.rock
$ # preprocess is a noop by default
$ luarocks make <name>.rockspec
Any of these phases can be overridden in your package as necessary.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Packages that use the Lua/LuaRocks build system can be identified by the
presence of a ``*.rockspec`` file in their sourcetree, or can be fetched as
a source rock archive (``.src.rock``). This file declares things like build
instructions and dependencies, the ``.src.rock`` also contains all code.
It is common for the rockspec file to list the lua version required in
a dependency. The LuaPackage class adds appropriate dependencies on a Lua
implementation, but it is a good idea to specify the version required with
a ``depends_on`` statement. The block normally will be a table definition like
this:
.. code-block:: lua
dependencies = {
"lua >= 5.1",
}
The LuaPackage class supports source repositories and archives containing
a rockspec and directly downloading source rock files. It *does not* support
downloading dependencies listed inside a rockspec, and thus does not support
directly downloading a rockspec as an archive.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
All base dependencies are added by the build system, but LuaRocks is run to
avoid downloading extra Lua dependencies during build. If the package needs
Lua libraries outside the standard set, they should be added as dependencies.
To specify a Lua version constraint but allow all lua implementations, prefer
to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible
version. If the package requires LuaJit rather than Lua,
a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is
used instead of the Lua interpreter. Alternately, if only interpreted Lua will
work ``depends_on("lua")`` will express that.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to luarocks make
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you need to pass any arguments to the ``luarocks make`` call, you can
override the ``luarocks_args`` method like so:
.. code-block:: python
def luarocks_args(self):
return ['flag1', 'flag2']
One common use of this is to override warnings or flags for newer compilers, as in:
.. code-block:: python
def luarocks_args(self):
return ["CFLAGS='-Wno-error=implicit-function-declaration'"]
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the LuaRocks build system, see:
https://luarocks.org/

View File

@@ -48,11 +48,8 @@ important to understand.
**build backend**
Libraries used to define how to build a wheel. Examples
include `setuptools <https://setuptools.pypa.io/>`__,
`flit <https://flit.pypa.io/>`_,
`poetry <https://python-poetry.org/>`_,
`hatchling <https://hatch.pypa.io/latest/>`_,
`meson <https://meson-python.readthedocs.io/>`_, and
`pdm <https://pdm.fming.dev/latest/>`_.
`flit <https://flit.readthedocs.io/>`_, and
`poetry <https://python-poetry.org/>`_.
^^^^^^^^^^^
Downloading
@@ -176,9 +173,9 @@ package. The "Project description" tab may also contain a longer
description of the package. Either of these can be used to populate
the package docstring.
^^^^^^^^^^^^
Dependencies
^^^^^^^^^^^^
^^^^^^^^^^^^^
Build backend
^^^^^^^^^^^^^
Once you've determined the basic metadata for a package, the next
step is to determine the build backend. ``PythonPackage`` uses
@@ -216,33 +213,12 @@ Note that ``py-wheel`` is already listed as a build dependency in the
need to specify a specific version requirement or change the
dependency type.
See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`__ and
See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`_ and
`PEP 518 <https://www.python.org/dev/peps/pep-0518/>`_ for more
information on the design of ``pyproject.toml``.
Depending on which build backend a project uses, there are various
places that run-time dependencies can be listed. Most modern build
backends support listing dependencies directly in ``pyproject.toml``.
Look for dependencies under the following keys:
* ``requires-python`` under ``[project]``
This specifies the version of Python that is required
* ``dependencies`` under ``[project]``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``[project.optional-dependencies]``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be ``False``
by default.
Some build backends may have additional locations where dependencies
can be found.
places that run-time dependencies can be listed.
"""""""""
distutils
@@ -268,9 +244,9 @@ If the ``pyproject.toml`` lists ``setuptools.build_meta`` as a
``build-backend``, or if the package has a ``setup.py`` that imports
``setuptools``, or if the package has a ``setup.cfg`` file, then it
uses setuptools to build. Setuptools is a replacement for the
distutils library, and has almost the exact same API. In addition to
``pyproject.toml``, dependencies can be listed in the ``setup.py`` or
``setup.cfg`` file. Look for the following arguments:
distutils library, and has almost the exact same API. Dependencies
can be listed in the ``setup.py`` or ``setup.cfg`` file. Look for the
following arguments:
* ``python_requires``
@@ -315,22 +291,25 @@ listed directly in the ``pyproject.toml`` file. Older versions of
flit used to store this info in a ``flit.ini`` file, so check for
this too.
In addition to the default ``pyproject.toml`` keys listed above,
older versions of flit may use the following keys:
Either of these files may contain keys like:
* ``requires`` under ``[tool.flit.metadata]``
* ``requires-python``
This specifies the version of Python that is required
* ``dependencies`` or ``requires``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``[tool.flit.metadata.requires-extra]``
* ``project.optional-dependencies`` or ``requires-extra``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be False
by default.
See https://flit.pypa.io/en/latest/pyproject_toml.html for
See https://flit.readthedocs.io/en/latest/pyproject_toml.html for
more information.
""""""
@@ -347,38 +326,6 @@ for specifying the version requirements. Note that ``~=`` works
differently in poetry than in setuptools and flit for versions that
start with a zero.
"""""""""
hatchling
"""""""""
If the ``pyproject.toml`` lists ``hatchling.build`` as the
``build-backend``, it uses the hatchling build system. Hatchling
uses the default ``pyproject.toml`` keys to list dependencies.
See https://hatch.pypa.io/latest/config/dependency/ for more
information.
"""""
meson
"""""
If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``,
it uses the meson build system. Meson uses the default
``pyproject.toml`` keys to list dependencies.
See https://meson-python.readthedocs.io/en/latest/usage/start.html
for more information.
"""
pdm
"""
If the ``pyproject.toml`` lists ``pdm.pep517.api`` as the ``build-backend``,
it uses the PDM build system. PDM uses the default ``pyproject.toml``
keys to list dependencies.
See https://pdm.fming.dev/latest/ for more information.
""""""
wheels
""""""
@@ -423,34 +370,6 @@ packages. However, the installation instructions for a package may
suggest passing certain flags to the ``setup.py`` call. The
``PythonPackage`` class has two techniques for doing this.
"""""""""""""""
Config settings
"""""""""""""""
These settings are passed to
`PEP 517 <https://peps.python.org/pep-0517/>`__ build backends.
For example, ``py-scipy`` package allows you to specify the name of
the BLAS/LAPACK library you want pkg-config to search for:
.. code-block:: python
depends_on('py-pip@22.1:', type='build')
def config_settings(self, spec, prefix):
return {
'blas': spec['blas'].libs.names[0],
'lapack': spec['lapack'].libs.names[0],
}
.. note::
This flag only works for packages that define a ``build-backend``
in ``pyproject.toml``. Also, it is only supported by pip 22.1+,
which requires Python 3.7+. For packages that still support Python
3.6 and older, ``install_options`` should be used instead.
""""""""""""""
Global options
""""""""""""""
@@ -470,16 +389,6 @@ has an optional dependency on ``libyaml`` that can be enabled like so:
return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
"""""""""""""""
Install options
"""""""""""""""
@@ -500,16 +409,6 @@ allows you to specify the directories to search for ``libyaml``:
return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
^^^^^^^
Testing
^^^^^^^
@@ -582,19 +481,6 @@ libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed.
Instead of defining the ``import_modules`` explicity, only the subset
of module names to be skipped can be defined by using ``skip_modules``.
If a defined module has submodules, they are skipped as well, e.g.,
in case the ``plotting`` modules should be excluded from the
automatically detected ``import_modules`` ``['nilearn', 'nilearn.surface',
'nilearn.plotting', 'nilearn.plotting.data']`` set:
.. code-block:: python
skip_modules = ['nilearn.plotting']
This will set ``import_modules`` to ``['nilearn', 'nilearn.surface']``
Import tests can be run during the installation using ``spack install
--test=root`` or at any time after the installation using
``spack test run``.
@@ -778,8 +664,5 @@ For more information on build and installation frontend tools, see:
For more information on build backend tools, see:
* setuptools: https://setuptools.pypa.io/
* flit: https://flit.pypa.io/
* flit: https://flit.readthedocs.io/
* poetry: https://python-poetry.org/
* hatchling: https://hatch.pypa.io/latest/
* meson: https://meson-python.readthedocs.io/
* pdm: https://pdm.fming.dev/latest/

View File

@@ -95,7 +95,7 @@ class of your package. For example, you can add it to your
# Set up the hip macros needed by the build
args.extend([
'-DENABLE_HIP=ON',
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix)])
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
rocm_archs = spec.variants['amdgpu_target'].value
if 'none' not in rocm_archs:
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'

View File

@@ -1,55 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _sourceforgepackage:
------------------
SourceforgePackage
------------------
``SourceforgePackage`` is a
`mixin-class <https://en.wikipedia.org/wiki/Mixin>`_. It automatically
sets the URL based on a list of Sourceforge mirrors listed in
`sourceforge_mirror_path`, which defaults to a half dozen known mirrors.
Refer to the package source
(`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/sourceforge.py>`__) for the current list of mirrors used by Spack.
^^^^^^^
Methods
^^^^^^^
This package provides a method for populating mirror URLs.
**urls**
This method returns a list of possible URLs for package source.
It is decorated with `property` so its results are treated as
a package attribute.
Refer to
`<https://spack.readthedocs.io/en/latest/packaging_guide.html#mirrors-of-the-main-url>`__
for information on how Spack uses the `urls` attribute during
fetching.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package and defining the relative location of an archive
file for one version of your software.
.. code-block:: python
:emphasize-lines: 1,3
class MyPackage(AutotoolsPackage, SourceforgePackage):
...
sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz"
...
Over 40 packages are using ``SourceforcePackage`` this mix-in as of
July 2022 so there are multiple packages to choose from if you want
to see a real example.

View File

@@ -23,48 +23,43 @@
import sys
from glob import glob
from docutils.statemachine import StringList
from sphinx.domains.python import PythonDomain
from sphinx.ext.apidoc import main as sphinx_apidoc
from sphinx.parsers import RSTParser
# -- Spack customizations -----------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external"))
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/pytest-fallback"))
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external'))
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external/pytest-fallback'))
if sys.version_info[0] < 3:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib"))
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib'))
else:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib3"))
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib3'))
sys.path.append(os.path.abspath("_spack_root/lib/spack/"))
sys.path.append(os.path.abspath('_spack_root/lib/spack/'))
# Add the Spack bin directory to the path so that we can use its output in docs.
os.environ["SPACK_ROOT"] = os.path.abspath("_spack_root")
os.environ["PATH"] += "%s%s" % (os.pathsep, os.path.abspath("_spack_root/bin"))
os.environ['SPACK_ROOT'] = os.path.abspath('_spack_root')
os.environ['PATH'] += "%s%s" % (os.pathsep, os.path.abspath('_spack_root/bin'))
# Set an environment variable so that colify will print output like it would to
# a terminal.
os.environ["COLIFY_SIZE"] = "25x120"
os.environ["COLUMNS"] = "120"
os.environ['COLIFY_SIZE'] = '25x120'
os.environ['COLUMNS'] = '120'
# Generate full package list if needed
subprocess.call(["spack", "list", "--format=html", "--update=package_list.html"])
subprocess.call([
'spack', 'list', '--format=html', '--update=package_list.html'])
# Generate a command index if an update is needed
subprocess.call(
[
"spack",
"commands",
"--format=rst",
"--header=command_index.in",
"--update=command_index.rst",
]
+ glob("*rst")
)
subprocess.call([
'spack', 'commands',
'--format=rst',
'--header=command_index.in',
'--update=command_index.rst'] + glob('*rst'))
#
# Run sphinx-apidoc
@@ -74,12 +69,12 @@
# Without this, the API Docs will never actually update
#
apidoc_args = [
"--force", # Overwrite existing files
"--no-toc", # Don't create a table of contents file
"--output-dir=.", # Directory to place all output
'--force', # Overwrite existing files
'--no-toc', # Don't create a table of contents file
'--output-dir=.', # Directory to place all output
]
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/spack"])
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/llnl"])
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/spack'])
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/llnl'])
# Enable todo items
todo_include_todos = True
@@ -87,78 +82,60 @@
#
# Disable duplicate cross-reference warnings.
#
from sphinx.domains.python import PythonDomain
class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node:
del node["refspecific"]
if 'refspecific' in node:
del node['refspecific']
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode
)
#
# Disable tabs to space expansion in code blocks
# since Makefiles require tabs.
#
class NoTabExpansionRSTParser(RSTParser):
def parse(self, inputstring, document):
if isinstance(inputstring, str):
lines = inputstring.splitlines()
inputstring = StringList(lines, document.current_source)
super().parse(inputstring, document)
env, fromdocname, builder, typ, target, node, contnode)
def setup(sphinx):
sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.4"
needs_sphinx = '3.4'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_design",
"sphinxcontrib.programoutput",
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
# Set default graphviz options
graphviz_dot_args = [
"-Grankdir=LR",
"-Gbgcolor=transparent",
"-Nshape=box",
"-Nfontname=monaco",
"-Nfontsize=10",
]
'-Grankdir=LR', '-Gbgcolor=transparent',
'-Nshape=box', '-Nfontname=monaco', '-Nfontsize=10']
# Get nice vector graphics
graphviz_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ".rst"
source_suffix = '.rst'
# The encoding of source files.
source_encoding = "utf-8-sig"
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
master_doc = 'index'
# General information about the project.
project = u"Spack"
copyright = u"2013-2021, Lawrence Livermore National Laboratory."
project = u'Spack'
copyright = u'2013-2021, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -167,16 +144,16 @@ def setup(sphinx):
# The short X.Y version.
import spack
version = ".".join(str(s) for s in spack.spack_version_info[:2])
version = '.'.join(str(s) for s in spack.spack_version_info[:2])
# The full version, including alpha/beta/rc tags.
release = spack.spack_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
#language = None
# Places to look for .po/.mo files for doc translations
# locale_dirs = []
#locale_dirs = []
# Sphinx gettext settings
gettext_compact = True
@@ -184,43 +161,41 @@ def setup(sphinx):
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_spack_root", ".spack-env"]
exclude_patterns = ['_build', '_spack_root', '.spack-env']
nitpicky = True
nitpick_ignore = [
# Python classes that intersphinx is unable to resolve
("py:class", "argparse.HelpFormatter"),
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
("py:class", "six.moves.urllib.parse.ParseResult"),
('py:class', 'argparse.HelpFormatter'),
('py:class', 'contextlib.contextmanager'),
('py:class', 'module'),
('py:class', '_io.BufferedReader'),
('py:class', 'unittest.case.TestCase'),
('py:class', '_frozen_importlib_external.SourceFileLoader'),
# Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"),
('py:class', 'spack.provider_index._IndexBase'),
('py:class', 'spack.repo._PrependFileLoader'),
]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications
@@ -231,151 +206,156 @@ def setup(sphinx):
class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8"
background_color = "#f4f4f8"
styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9"
import pkg_resources
dist = pkg_resources.Distribution(__file__)
sys.path.append(".") # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse("spack = conf:SpackStyle", dist=dist)
dist._ep_map = {"pygments.styles": {"plugin1": ep}}
sys.path.append('.') # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
dist._ep_map = {'pygments.styles': {'plugin1': ep}}
pkg_resources.working_set.add(dist)
pygments_style = "spack"
pygments_style = 'spack'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"logo_only": True}
html_theme_options = { 'logo_only' : True }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_spack_root/share/spack/logo/spack-logo-white-text.svg"
html_logo = '_spack_root/share/spack/logo/spack-logo-white-text.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_spack_root/share/spack/logo/favicon.ico"
html_favicon = '_spack_root/share/spack/logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
#html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
#html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = False
#html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Spackdoc"
htmlhelp_basename = 'Spackdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Spack.tex", u"Spack Documentation", u"Todd Gamblin", "manual"),
('index', 'Spack.tex', u'Spack Documentation',
u'Todd Gamblin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
#latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
#latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "spack", u"Spack Documentation", [u"Todd Gamblin"], 1)]
man_pages = [
('index', 'spack', u'Spack Documentation',
[u'Todd Gamblin'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@@ -384,25 +364,19 @@ class SpackStyle(DefaultStyle):
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Spack",
u"Spack Documentation",
u"Todd Gamblin",
"Spack",
"One line description of project.",
"Miscellaneous",
),
('index', 'Spack', u'Spack Documentation',
u'Todd Gamblin', 'Spack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
#texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
#texinfo_show_urls = 'footnote'
# -- Extension configuration -------------------------------------------------

View File

@@ -19,9 +19,9 @@ see the default settings by looking at
These settings can be overridden in ``etc/spack/config.yaml`` or
``~/.spack/config.yaml``. See :ref:`configuration-scopes` for details.
---------------------
``install_tree:root``
---------------------
--------------------
``install_tree``
--------------------
The location where Spack will install packages and their dependencies.
Default is ``$spack/opt/spack``.

View File

@@ -59,8 +59,7 @@ other techniques to minimize the size of the final image:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -109,10 +108,9 @@ Spack Images on Docker Hub
--------------------------
Docker images with Spack preinstalled and ready to be used are
built when a release is tagged, or nightly on ``develop``. The images
are then pushed both to `Docker Hub <https://hub.docker.com/u/spack>`_
and to `GitHub Container Registry <https://github.com/orgs/spack/packages?repo_name=spack>`_.
The OS that are currently supported are summarized in the table below:
built on `Docker Hub <https://hub.docker.com/u/spack>`_
at every push to ``develop`` or to a release branch. The OS that
are currently supported are summarized in the table below:
.. _containers-supported-os:
@@ -122,31 +120,22 @@ The OS that are currently supported are summarized in the table below:
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 16.04
- ``ubuntu:16.04``
- ``spack/ubuntu-xenial``
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04
- ``ubuntu:20.04``
- ``spack/ubuntu-focal``
* - Ubuntu 22.04
- ``ubuntu:22.04``
- ``spack/ubuntu-jammy``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
* - CentOS Stream
- ``quay.io/centos/centos:stream``
- ``spack/centos-stream``
* - openSUSE Leap
- ``opensuse/leap``
- ``spack/leap15``
* - Amazon Linux 2
- ``amazonlinux:2``
- ``spack/amazon-linux``
All the images are tagged with the corresponding release of Spack:
.. image:: images/ghcr_spack.png
.. image:: dockerhub_spack.png
with the exception of the ``latest`` tag that points to the HEAD
of the ``develop`` branch. These images are available for anyone
@@ -256,8 +245,7 @@ software is respectively built and installed:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -378,8 +366,7 @@ produces, for instance, the following ``Dockerfile``:
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml

View File

@@ -71,7 +71,7 @@ locally to speed up the review process.
new release that is causing problems. If this is the case, please file an issue.
We currently test against Python 2.7 and 3.6-3.10 on both macOS and Linux and
We currently test against Python 2.7 and 3.5-3.9 on both macOS and Linux and
perform 3 types of tests:
.. _cmd-spack-unit-test:

View File

@@ -107,6 +107,7 @@ with a high level view of Spack's directory structure:
llnl/ <- some general-use libraries
spack/ <- spack module; contains Python code
analyzers/ <- modules to run analysis on installed packages
build_systems/ <- modules for different build systems
cmd/ <- each file in here is a spack subcommand
compilers/ <- compiler description files
@@ -150,7 +151,7 @@ Package-related modules
^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.package`
Contains the :class:`~spack.package_base.Package` class, which
Contains the :class:`~spack.package.Package` class, which
is the superclass for all packages in Spack. Methods on ``Package``
implement all phases of the :ref:`package lifecycle
<package-lifecycle>` and manage the build process.
@@ -241,6 +242,22 @@ Unit tests
Implements Spack's test suite. Add a module and put its name in
the test suite in ``__init__.py`` to add more unit tests.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Research and Monitoring Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.monitor`
Contains :class:`~spack.monitor.SpackMonitorClient`. This is accessed from
the ``spack install`` and ``spack analyze`` commands to send build and
package metadata up to a `Spack Monitor
<https://github.com/spack/spack-monitor>`_ server.
:mod:`spack.analyzers`
A module folder with a :class:`~spack.analyzers.analyzer_base.AnalyzerBase`
that provides base functions to run, save, and (optionally) upload analysis
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
^^^^^^^^^^^^^
Other Modules
@@ -284,6 +301,240 @@ Most spack commands look something like this:
The information in Package files is used at all stages in this
process.
Conceptually, packages are overloaded. They contain:
-------------
Stage objects
-------------
.. _writing-analyzers:
-----------------
Writing analyzers
-----------------
To write an analyzer, you should add a new python file to the
analyzers module directory at ``lib/spack/spack/analyzers`` .
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
to add an analyzer class ``Myanalyzer`` you would write to
``spack/analyzers/myanalyzer.py`` and import and
use the base as follows:
.. code-block:: python
from .analyzer_base import AnalyzerBase
class Myanalyzer(AnalyzerBase):
Note that the class name is your module file name, all lowercase
except for the first capital letter. You can look at other analyzers in
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
^^^^^^^^^^^^^^^^^^^^^^^^^
Analyzer Output Directory
^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack analyze run`` an analyzer output directory will
be created in your spack user directory in your ``$HOME``. The reason we output here
is because the install directory might not always be writable.
.. code-block:: console
~/.spack/
analyzers
Result files will be written here, organized in subfolders in the same structure
as the package, with each analyzer owning it's own subfolder. for example:
.. code-block:: console
$ tree ~/.spack/analyzers/
/home/spackuser/.spack/analyzers/
└── linux-ubuntu20.04-skylake
└── gcc-9.3.0
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
├── environment_variables
│   └── spack-analyzer-environment-variables.json
├── install_files
│   └── spack-analyzer-install-files.json
└── libabigail
└── lib
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
Notice that for the libabigail analyzer, since results are generated per object,
we honor the object's folder in case there are equivalently named files in
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
^^^^^^^^^^^^^^^^^
Analyzer Metadata
^^^^^^^^^^^^^^^^^
Your analyzer is required to have the class attributes ``name``, ``outfile``,
and ``description``. These are printed to the user with they use the subcommand
``spack analyze list-analyzers``. Here is an example.
As we mentioned above, note that this analyzer would live in a module named
``libabigail.py`` in the analyzers folder so that the class can be discovered.
.. code-block:: python
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
This means that the name and output file should be unique for your analyzer.
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
that the user wants to run all analyzers.
.. _analyzer_run_function:
^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer run Function
^^^^^^^^^^^^^^^^^^^^^^^^
The core of an analyzer is its ``run()`` function, which should accept no
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
and it's up to the run function to generate whatever analysis data you need,
and then return the object with a key as the analyzer name. The result data
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
and one of ``value`` or ``binary_value``. The install file should be for a relative
path, and not the absolute path. For example, let's say we extract a metric called
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
We might have data that looks like this:
.. code-block:: python
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
.. code-block:: python
return {self.name: result}
This will save the complete result to the analyzer metadata folder, as described
previously. If you want support for adding a different kind of metadata (e.g.,
not associated with an install file) then the monitor server would need to be updated
to support this first.
^^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer init Function
^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't need any extra dependencies or checks, you can skip defining an analyzer
init function, as the base class will handle it. Typically, it will accept
a spec, and an optional output directory (if the user does not want the default
metadata folder for analyzer results). The analyzer init function should call
it's parent init, and then do any extra checks or validation that are required to
work. For example:
.. code-block:: python
def __init__(self, spec, dirname=None):
super(Myanalyzer, self).__init__(spec, dirname)
# install extra dependencies, do extra preparation and checks here
At the end of the init, you will have available to you:
- **self.spec**: the spec object
- **self.dirname**: an optional directory name the user as provided at init to save
- **self.output_dir**: the analyzer metadata directory, where we save by default
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
And can proceed to write your analyzer.
^^^^^^^^^^^^^^^^^^^^^^^
Saving Analyzer Results
^^^^^^^^^^^^^^^^^^^^^^^
The analyzer will have ``save_result`` called, with the result object generated
to save it to the filesystem, and if the user has added the ``--monitor`` flag
to upload it to a monitor server. If your result follows an accepted result
format and you don't need to parse it further, you don't need to add this
function to your class. However, if your result data is large or otherwise
needs additional parsing, you can define it. If you define the function, it
is useful to know about the ``output_dir`` property, which you can join
with your output file relative path of choice:
.. code-block:: python
outfile = os.path.join(self.output_dir, "my-output-file.txt")
The directory will be provided by the ``output_dir`` property but it won't exist,
so you should create it:
.. code::block:: python
# Create the output directory
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
If you are generating results that match to specific files in the package
install directory, you should try to maintain those paths in the case that
there are equivalently named files in different directories that would
overwrite one another. As an example of an analyzer with a custom save,
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
folder in ``run()``, as they are either binaries, or as xml (text) would
usually be too big to pass in one request. For this reason, the files
are saved during ``run()`` and the filenames added to the result object,
and then when the result object is passed back into ``save_result()``,
we skip saving to the filesystem, and instead read the file and send
each one (separately) to the monitor:
.. code-block:: python
def save_result(self, result, monitor=None, overwrite=False):
"""ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not monitor:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
Notice that this function, if you define it, requires a result object (generated by
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
to check if a result exists first, and not write to it if the result exists and
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
yet, you might want to do that here. You should also use ``tty.info`` to give
the user a message of "Writing result to $DIRNAME."
.. _writing-commands:
@@ -448,6 +699,23 @@ with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
"""""""""""""""""""""""""""""""""
``on_analyzer_save(pkg, result)``
"""""""""""""""""""""""""""""""""
After an analyzer has saved some result for a package, this hook is called,
and it provides the package that we just ran the analysis for, along with
the loaded result. Typically, a result is structured to have the name
of the analyzer as key, and the result object that is defined in detail in
:ref:`analyzer_run_function`.
.. code-block:: python
def on_analyzer_save(pkg, result):
"""given a package and a result...
"""
print('Do something extra with a package analysis result here')
^^^^^^^^^^^^^^^^^^^^^^
Adding a New Hook Type
@@ -965,13 +1233,8 @@ completed, the steps to make the point release are:
$ git checkout releases/v0.15
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
in the project, create it. This pull request ought to be created as early as
possible when working on a release project, so that we can build the release
commits incrementally, and identify potential conflicts at an early stage.
#. Cherry-pick each pull request in the ``Done`` column of the release
project board onto the ``Backports vX.Y.Z`` pull request.
project board onto the release branch.
This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests. That means there is only one commit
@@ -996,7 +1259,7 @@ completed, the steps to make the point release are:
It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When
cherry-picking look at the merge date,
cherry-picking onto a point release, look at the merge date,
**not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have
@@ -1017,19 +1280,15 @@ completed, the steps to make the point release are:
branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice.
#. When all the commits from the project board are cherry-picked into
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
#. Bump the version in ``lib/spack/spack/__init__.py``.
1. Bump the version in ``lib/spack/spack/__init__.py``.
2. Update ``CHANGELOG.md`` with a list of the changes.
#. Update ``CHANGELOG.md`` with a list of the changes.
This is typically a summary of the commits you cherry-picked onto the
release branch. See `the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
is needed to keep track in the release branch of all the commits that were
cherry-picked.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
@@ -1048,8 +1307,6 @@ completed, the steps to make the point release are:
#. Follow the steps in :ref:`announcing-releases`.
#. Submit a PR to update the CHANGELOG in the `develop` branch
with the addition of this point release.
.. _publishing-releases:

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

View File

@@ -273,9 +273,19 @@ or
Concretizing
^^^^^^^^^^^^
Once some user specs have been added to an environment, they can be concretized.
There are at the moment three different modes of operation to concretize an environment,
which are explained in details in :ref:`environments_concretization_config`.
Once some user specs have been added to an environment, they can be
concretized. *By default specs are concretized separately*, one after
the other. This mode of operation permits to deploy a full
software stack where multiple configurations of the same package
need to be installed alongside each other. Central installations done
at HPC centers by system administrators or user support groups
are a common case that fits in this behavior.
Environments *can also be configured to concretize all
the root specs in a self-consistent way* to ensure that
each package in the environment comes with a single configuration. This
mode of operation is usually what is required by software developers that
want to deploy their development environment.
Regardless of which mode of operation has been chosen, the following
command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration:
@@ -339,24 +349,6 @@ If the Environment has been concretized, Spack will install the
concretized specs. Otherwise, ``spack install`` will first concretize
the Environment and then install the concretized specs.
.. note::
Every ``spack install`` process builds one package at a time with multiple build
jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option
(see :ref:`build-jobs`). To speed up environment builds further, independent
packages can be installed in parallel by launching more Spack instances. For
example, the following will build at most four packages in parallel using
three background jobs:
.. code-block:: console
[myenv]$ spack install & spack install & spack install & spack install
Another option is to generate a ``Makefile`` and run ``make -j<N>`` to control
the number of parallel install processes. See :ref:`env-generate-depfile`
for details.
As it installs, ``spack install`` creates symbolic links in the
``logs/`` directory in the Environment, allowing for easy inspection
of build logs related to that environment. The ``spack install``
@@ -376,30 +368,6 @@ from being added again. At the same time, a spec that already exists in the
environment, but only as a dependency, will be added to the environment as a
root spec without the ``--no-add`` option.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Developing Packages in a Spack Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``spack develop`` command allows one to develop Spack packages in
an environment. It requires a spec containing a concrete version, and
will configure Spack to install the package from local source. By
default, it will also clone the package to a subdirectory in the
environment. This package will have a special variant ``dev_path``
set, and Spack will ensure the package and its dependents are rebuilt
any time the environment is installed if the package's local source
code has been modified. Spack ensures that all instances of a
developed package in the environment are concretized to match the
version (and other constraints) passed as the spec argument to the
``spack develop`` command.
For packages with ``git`` attributes, git branches, tags, and commits can
also be used as valid concrete versions (see :ref:`version-specifier`).
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
the ``main`` branch of the package, and ``spack install`` will install from
that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
^^^^^^^
Loading
^^^^^^^
@@ -478,21 +446,14 @@ them to the Environment.
spack:
include:
- relative/path/to/config.yaml
- https://github.com/path/to/raw/config/compilers.yaml
- /absolute/path/to/packages.yaml
Environments can include files or URLs. File paths can be relative or
absolute. URLs include the path to the text for individual files or
can be the path to a directory containing configuration files.
^^^^^^^^^^^^^^^^^^^^^^^^
Configuration precedence
^^^^^^^^^^^^^^^^^^^^^^^^
Inline configurations take precedence over included configurations, so
you don't have to change shared configuration files to make small changes
to an individual environment. Included configurations listed earlier will
have higher precedence, as the included configs are applied in reverse order.
Environments can include files with either relative or absolute
paths. Inline configurations take precedence over included
configurations, so you don't have to change shared configuration files
to make small changes to an individual Environment. Included configs
listed earlier will have higher precedence, as the included configs are
applied in reverse order.
-------------------------------
Manually Editing the Specs List
@@ -514,76 +475,32 @@ Appending to this list in the yaml is identical to using the ``spack
add`` command from the command line. However, there is more power
available from the yaml file.
.. _environments_concretization_config:
^^^^^^^^^^^^^^^^^^^
Spec concretization
^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under any environment
is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
Specs can be concretized separately or together, as already
explained in :ref:`environments_concretization`. The behavior active
under any environment is determined by the ``concretization`` property:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: false
- ncview
- netcdf
- nco
- py-sphinx
concretization: together
This mode of operation permits to deploy a full software stack where multiple configurations of the same package
need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note::
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
replace the ``concretization`` property. For reference,
``concretization: together`` is replaced by ``concretizer:unify:true``,
and ``concretization: separately`` is replaced by ``concretizer:unify:false``.
which can currently take either one of the two allowed values ``together`` or ``separately``
(the default).
.. admonition:: Re-concretization of user specs
When concretizing specs *together* or *together where possible* the entire set of specs will be
When concretizing specs together the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead the specs are concretized
the environment remains consistent. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^
@@ -623,6 +540,31 @@ The following two Environment manifests are identical:
Spec matrices can be used to install swaths of software across various
toolchains.
The concretization logic for spec matrices differs slightly from the
rest of Spack. If a variant or dependency constraint from a matrix is
invalid, Spack will reject the constraint and try again without
it. For example, the following two Environment manifests will produce
the same specs:
.. code-block:: yaml
spack:
specs:
- matrix:
- [zlib, libelf, hdf5+mpi]
- [^mvapich2@2.2, ^openmpi@3.1.0]
spack:
specs:
- zlib
- libelf
- hdf5+mpi ^mvapich2@2.2
- hdf5+mpi ^openmpi@3.1.0
This allows one to create toolchains out of combinations of
constraints and apply them somewhat indiscriminately to packages,
without regard for the applicability of the constraint.
^^^^^^^^^^^^^^^^^^^^
Spec List References
^^^^^^^^^^^^^^^^^^^^
@@ -805,7 +747,7 @@ directories.
select: [^mpi]
exclude: ['%pgi@18.5']
projections:
all: '{name}/{version}-{compiler.name}'
all: {name}/{version}-{compiler.name}
link: all
link_type: symlink
@@ -954,6 +896,9 @@ Variable Paths
PATH bin
MANPATH man, share/man
ACLOCAL_PATH share/aclocal
LD_LIBRARY_PATH lib, lib64
LIBRARY_PATH lib, lib64
CPATH include
PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig
CMAKE_PREFIX_PATH .
=================== =========
@@ -965,117 +910,3 @@ environment.
The ``spack env deactivate`` command will remove the default view of
the environment from the user's path.
.. _env-generate-depfile:
------------------------------------------
Generating Depfiles from Environments
------------------------------------------
Spack can generate ``Makefile``\s to make it easier to build multiple
packages in an environment in parallel. Generated ``Makefile``\s expose
targets that can be included in existing ``Makefile``\s, to allow
other targets to depend on the environment installation.
A typical workflow is as follows:
.. code:: console
spack env create -d .
spack -e . add perl
spack -e . concretize
spack -e . env depfile -o Makefile
make -j64
This generates a ``Makefile`` from a concretized environment in the
current working directory, and ``make -j64`` installs the environment,
exploiting parallelism across packages as much as possible. Spack
respects the Make jobserver and forwards it to the build environment
of packages, meaning that a single ``-j`` flag is enough to control the
load, even when packages are built in parallel.
By default the following phony convenience targets are available:
- ``make all``: installs the environment (default target);
- ``make clean``: cleans files used by make, but does not uninstall packages.
.. tip::
GNU Make version 4.3 and above have great support for output synchronization
through the ``-O`` and ``--output-sync`` flags, which ensure that output is
printed orderly per package install. To get synchronized output with colors,
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specifying dependencies on generated ``make`` targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An interesting question is how to include generated ``Makefile``\s in your own
``Makefile``\s. This comes up when you want to install an environment that provides
executables required in a command for a make target of your own.
The example below shows how to accomplish this: the ``env`` target specifies
the generated ``spack/env`` target as a prerequisite, meaning that the environment
gets installed and is available for use in the ``env`` target.
.. code:: Makefile
SPACK ?= spack
.PHONY: all clean env
all: env
spack.lock: spack.yaml
$(SPACK) -e . concretize -f
env.mk: spack.lock
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack
env: spack/env
$(info Environment installed!)
clean:
rm -rf spack.lock env.mk spack/
ifeq (,$(filter clean,$(MAKECMDGOALS)))
include env.mk
endif
This works as follows: when ``make`` is invoked, it first "remakes" the missing
include ``env.mk`` as there is a target for it. This triggers concretization of
the environment and makes spack output ``env.mk``. At that point the
generated target ``spack/env`` becomes available through ``include env.mk``.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional.
.. note::
When including generated ``Makefile``\s, it is important to use
the ``--make-target-prefix`` flag and use the non-phony target
``<target-prefix>/env`` as prerequisite, instead of the phony target
``<target-prefix>/all``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building a subset of the environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The generated ``Makefile``\s contain install targets for each spec. Given the hash
of a particular spec, you can use the ``.install/<hash>`` target to install the
spec with its dependencies. There is also ``.install-deps/<hash>`` to *only* install
its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently:
.. code:: console
$ spack env depfile -o Makefile --make-target-prefix my_env
# Install dependencies in parallel, only show a log on error.
$ make -j16 my_env/.install-deps/<hash> SPACK_INSTALL_FLAGS=--show-log-on-error
# Install the root spec with verbose output.
$ make -j16 my_env/.install/<hash> SPACK_INSTALL_FLAGS=--verbose

View File

@@ -23,36 +23,8 @@ be present on the machine where Spack is run:
These requirements can be easily installed on most modern Linux systems;
on macOS, XCode is required. Spack is designed to run on HPC
platforms like Cray. Not all packages should be expected
to work on all platforms.
A build matrix showing which packages are working on which systems is shown below.
.. tab-set::
.. tab-item:: Debian/Ubuntu
.. code-block:: console
apt update
apt install build-essential ca-certificates coreutils curl environment-modules gfortran git gpg lsb-release python3 python3-distutils python3-venv unzip zip
.. tab-item:: RHEL
.. code-block:: console
yum update -y
yum install -y epel-release
yum update -y
yum --enablerepo epel groupinstall -y "Development Tools"
yum --enablerepo epel install -y curl findutils gcc-c++ gcc gcc-gfortran git gnupg2 hostname iproute make patch python3 python3-pip python3-setuptools unzip
python3 -m pip install boto3
.. tab-item:: macOS Brew
.. code-block:: console
brew update
brew install curl gcc git gnupg zip
to work on all platforms. A build matrix showing which packages are
working on which systems is planned but not yet available.
------------
Installation

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

View File

@@ -56,7 +56,6 @@ or refer to the full manual below.
basic_usage
Tutorial: Spack 101 <https://spack-tutorial.readthedocs.io>
replace_conda_homebrew
known_issues
.. toctree::
:maxdepth: 2
@@ -64,7 +63,6 @@ or refer to the full manual below.
configuration
config_yaml
bootstrapping
build_settings
environments
containers

View File

@@ -1,40 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
============
Known Issues
============
This is a list of known issues in Spack. It provides ways of getting around these
problems if you encounter them.
------------------------------------------------
Spack does not seem to respect ``packages.yaml``
------------------------------------------------
.. note::
This issue is **resolved** as of v0.19.0.dev0 commit
`8281a0c5feabfc4fe180846d6fe95cfe53420bc5`, through the introduction of package
requirements. See :ref:`package-requirements`.
A common problem in Spack v0.18.0 up to v0.19.0.dev0 is that package, compiler and target
preferences specified in ``packages.yaml`` do not seem to be respected. Spack picks the
"wrong" compilers and their versions, package versions and variants, and
micro-architectures.
This is however not a bug. In order to reduce the number of builds of the same
packages, the concretizer values reuse of installed packages higher than preferences
set in ``packages.yaml``. Note that ``packages.yaml`` specifies only preferences, not
hard constraints.
There are multiple workarounds:
1. Disable reuse during concretization: ``spack install --fresh <spec>`` when installing
from the command line, or ``spack concretize --fresh --force`` when using
environments.
2. Turn preferences into constrains, by moving them to the input spec. For example,
use ``spack spec zlib%gcc@12`` when you want to force GCC 12 even if ``zlib`` was
already installed with GCC 10.

View File

@@ -77,7 +77,7 @@ installation of a package.
Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to
will not regenerate modulefiles for the package. This may to
inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file
or changing scopes or environments.
@@ -113,8 +113,6 @@ from language interpreters into their extensions. The latter two instead permit
fine tune the filesystem layout, content and creation of module files to meet
site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -136,7 +134,7 @@ The second method:
pass
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired
that depend on it. In both cases you need to fill ``run_env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
@@ -310,7 +308,7 @@ the variable ``FOOBAR`` will be unset.
spec constraints are instead evaluated top to bottom.
""""""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files
Blacklist or whitelist specific module files
""""""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or
@@ -324,8 +322,8 @@ your system. If you write a configuration file like:
modules:
default:
tcl:
include: ['gcc', 'llvm'] # include will have precedence over exclude
exclude: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
whitelist: ['gcc', 'llvm'] # Whitelist will have precedence over blacklist
blacklist: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
@@ -492,7 +490,7 @@ satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
.. warning::
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
@@ -520,33 +518,18 @@ inspections and customize them per-module-set.
prefix_inspections:
bin:
- PATH
man:
- MANPATH
lib:
- LIBRARY_PATH
'':
- CMAKE_PREFIX_PATH
Prefix inspections are only applied if the relative path inside the
installation prefix exists. In this case, for a Spack package ``foo``
installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
``bin`` but no manpages in ``man``, the generated module file for
``bin`` but no libraries in ``lib``, the generated module file for
``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and
``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not
update ``MANPATH``.
The default list of environment variables in this config section
inludes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH``
and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH``
on macOS. On Linux however, the corresponding ``LD_LIBRARY_PATH``
variable is *not* set, because it affects the behavior of
system executables too.
.. note::
In general, the ``LD_LIBRARY_PATH`` variable is not required
when using packages built with Spack, thanks to the use of RPATH.
Some packages may still need the variable, which is best handled
on a per-package basis instead of globally, as explained in
:ref:`overide-api-calls-in-package-py`.
update ``LIBRARY_PATH``.
There is a special case for prefix inspections relative to environment
views. If all of the following conditions hold for a module set
@@ -606,7 +589,7 @@ Filter out environment modifications
Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections.
If you want to prevent modifications to some environment variables, you can
do so by using the ``exclude_env_vars``:
do so by using the environment blacklist:
.. code-block:: yaml
@@ -616,7 +599,7 @@ do so by using the ``exclude_env_vars``:
all:
filter:
# Exclude changes to any of these variables
exclude_env_vars: ['CPATH', 'LIBRARY_PATH']
environment_blacklist: ['CPATH', 'LIBRARY_PATH']
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.

View File

@@ -126,7 +126,7 @@ generates a boilerplate template for your package, and opens up the new
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack.package import *
from spack import *
class Gmp(AutotoolsPackage):
@@ -699,7 +699,7 @@ Spack versions may also be arbitrary non-numeric strings, for example
``@develop``, ``@master``, ``@local``.
The order on versions is defined as follows. A version string is split
into a list of components based on delimiters such as ``.``, ``-`` etc.
into a list of components based on delimiters such as ``.``, ``-`` etc.
Lists are then ordered lexicographically, where components are ordered
as follows:
@@ -1070,32 +1070,13 @@ Commits
Submodules
You can supply ``submodules=True`` to cause Spack to fetch submodules
recursively along with the repository at fetch time.
recursively along with the repository at fetch time. For more information
about git submodules see the manpage of git: ``man git-submodule``.
.. code-block:: python
version('1.0.1', tag='v1.0.1', submodules=True)
If a package has needs more fine-grained control over submodules, define
``submodules`` to be a callable function that takes the package instance as
its only argument. The function should return a list of submodules to be fetched.
.. code-block:: python
def submodules(package):
submodules = []
if "+variant-1" in package.spec:
submodules.append("submodule_for_variant_1")
if "+variant-2" in package.spec:
submodules.append("submodule_for_variant_2")
return submodules
class MyPackage(Package):
version("0.1.0", submodules=submodules)
For more information about git submodules see the manpage of git: ``man
git-submodule``.
.. _github-fetch:
@@ -2283,17 +2264,9 @@ The following dependency types are available:
One of the advantages of the ``build`` dependency type is that although the
dependency needs to be installed in order for the package to be built, it
can be uninstalled without concern afterwards. ``link`` and ``run`` disallow
this because uninstalling the dependency would break the package.
``build``, ``link``, and ``run`` dependencies all affect the hash of Spack
packages (along with ``sha256`` sums of patches and archives used to build the
package, and a [canonical hash](https://github.com/spack/spack/pull/28156) of
the ``package.py`` recipes). ``test`` dependencies do not affect the package
hash, as they are only used to construct a test environment *after* building and
installing a given package installation. Older versions of Spack did not include
build dependencies in the hash, but this has been
[fixed](https://github.com/spack/spack/pull/28504) as of [Spack
``v0.18``](https://github.com/spack/spack/releases/tag/v0.18.0)
this because uninstalling the dependency would break the package. Another
consequence of this is that ``build``-only dependencies do not affect the
hash of the package. The same is true for ``test`` dependencies.
If the dependency type is not specified, Spack uses a default of
``('build', 'link')``. This is the common case for compiler languages.
@@ -2420,9 +2393,9 @@ Influence how dependents are built or run
Spack provides a mechanism for dependencies to influence the
environment of their dependents by overriding the
:meth:`setup_dependent_run_environment <spack.package_base.PackageBase.setup_dependent_run_environment>`
:meth:`setup_dependent_run_environment <spack.package.PackageBase.setup_dependent_run_environment>`
or the
:meth:`setup_dependent_build_environment <spack.package_base.PackageBase.setup_dependent_build_environment>`
:meth:`setup_dependent_build_environment <spack.package.PackageBase.setup_dependent_build_environment>`
methods.
The Qt package, for instance, uses this call:
@@ -2444,7 +2417,7 @@ will have the ``PYTHONPATH``, ``PYTHONHOME`` and ``PATH`` environment
variables set appropriately before starting the installation. To make things
even simpler the ``python setup.py`` command is also inserted into the module
scope of dependents by overriding a third method called
:meth:`setup_dependent_package <spack.package_base.PackageBase.setup_dependent_package>`
:meth:`setup_dependent_package <spack.package.PackageBase.setup_dependent_package>`
:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
@@ -2802,256 +2775,6 @@ Suppose a user invokes ``spack install`` like this:
Spack will fail with a constraint violation, because the version of
MPICH requested is too low for the ``mpi`` requirement in ``foo``.
.. _custom-attributes:
------------------
Custom attributes
------------------
Often a package will need to provide attributes for dependents to query
various details about what it provides. While any number of custom defined
attributes can be implemented by a package, the four specific attributes
described below are always available on every package with default
implementations and the ability to customize with alternate implementations
in the case of virtual packages provided:
=========== =========================================== =====================
Attribute Purpose Default
=========== =========================================== =====================
``home`` The installation path for the package ``spec.prefix``
``command`` An executable command for the package | ``spec.name`` found
in
| ``.home.bin``
``headers`` A list of headers provided by the package | All headers
searched
| recursively in
``.home.include``
``libs`` A list of libraries provided by the package | ``lib{spec.name}``
searched
| recursively in
``.home`` starting
| with ``lib``,
``lib64``, then the
| rest of ``.home``
=========== =========================================== =====================
Each of these can be customized by implementing the relevant attribute
as a ``@property`` in the package's class:
.. code-block:: python
:linenos:
class Foo(Package):
...
@property
def libs(self):
# The library provided by Foo is libMyFoo.so
return find_libraries('libMyFoo', root=self.home, recursive=True)
A package may also provide a custom implementation of each attribute
for the virtual packages it provides by implementing the
``virtualpackagename_attributename`` property in the package's class.
The implementation used is the first one found from:
#. Specialized virtual: ``Package.virtualpackagename_attributename``
#. Generic package: ``Package.attributename``
#. Default
The use of customized attributes is demonstrated in the next example.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Example: Customized attributes for virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider a package ``foo`` that can optionally provide two virtual
packages ``bar`` and ``baz``. When both are enabled the installation tree
appears as follows:
.. code-block:: console
include/foo.h
include/bar/bar.h
lib64/libFoo.so
lib64/libFooBar.so
baz/include/baz/baz.h
baz/lib/libFooBaz.so
The install tree shows that ``foo`` is providing the header ``include/foo.h``
and library ``lib64/libFoo.so`` in it's install prefix. The virtual
package ``bar`` is providing ``include/bar/bar.h`` and library
``lib64/libFooBar.so``, also in ``foo``'s install prefix. The ``baz``
package, however, is provided in the ``baz`` subdirectory of ``foo``'s
prefix with the ``include/baz/baz.h`` header and ``lib/libFooBaz.so``
library. Such a package could implement the optional attributes as
follows:
.. code-block:: python
:linenos:
class Foo(Package):
...
variant('bar', default=False, description='Enable the Foo implementation of bar')
variant('baz', default=False, description='Enable the Foo implementation of baz')
...
provides('bar', when='+bar')
provides('baz', when='+baz')
....
# Just the foo headers
@property
def headers(self):
return find_headers('foo', root=self.home.include, recursive=False)
# Just the foo libraries
@property
def libs(self):
return find_libraries('libFoo', root=self.home, recursive=True)
# The header provided by the bar virutal package
@property
def bar_headers(self):
return find_headers('bar/bar.h', root=self.home.include, recursive=False)
# The libary provided by the bar virtual package
@property
def bar_libs(self):
return find_libraries('libFooBar', root=sef.home, recursive=True)
# The baz virtual package home
@property
def baz_home(self):
return self.prefix.baz
# The header provided by the baz virtual package
@property
def baz_headers(self):
return find_headers('baz/baz', root=self.baz_home.include, recursive=False)
# The library provided by the baz virtual package
@property
def baz_libs(self):
return find_libraries('libFooBaz', root=self.baz_home, recursive=True)
Now consider another package, ``foo-app``, depending on all three:
.. code-block:: python
:linenos:
class FooApp(CMakePackage):
...
depends_on('foo')
depends_on('bar')
depends_on('baz')
The resulting spec objects for it's dependencies shows the result of
the above attribute implementations:
.. code-block:: python
# The core headers and libraries of the foo package
>>> spec['foo']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['foo'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the package install prefix without an explicit implementation
>>> spec['foo'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# foo headers from the foo prefix
>>> spec['foo'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/foo.h',
])
# foo include directories from the foo prefix
>>> spec['foo'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# foo libraries from the foo prefix
>>> spec['foo'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFoo.so',
])
# foo library directories from the foo prefix
>>> spec['foo'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual bar package in the same prefix as foo
# bar resolves to the foo package
>>> spec['bar']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['bar'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the foo prefix without either a Foo.bar_home
# or Foo.home implementation
>>> spec['bar'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# bar header in the foo prefix
>>> spec['bar'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/bar/bar.h'
])
# bar include dirs from the foo prefix
>>> spec['bar'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# bar library from the foo prefix
>>> spec['bar'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFooBar.so'
])
# bar library directories from the foo prefix
>>> spec['bar'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual baz package in a subdirectory of foo's prefix
# baz resolves to the foo package
>>> spec['baz']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['baz'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# baz_home implementation provides the subdirectory inside the foo prefix
>>> spec['baz'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz'
# baz headers in the baz subdirectory of the foo prefix
>>> spec['baz'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include/baz/baz.h'
])
# baz include directories in the baz subdirectory of the foo prefix
>>> spec['baz'].headers.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include'
]
# baz libraries in the baz subdirectory of the foo prefix
>>> spec['baz'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib/libFooBaz.so'
])
# baz library directories in the baz subdirectory of the foo porefix
>>> spec['baz'].libs.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib'
]
.. _abstract-and-concrete:
-------------------------
@@ -3299,7 +3022,7 @@ The classes that are currently provided by Spack are:
+----------------------------------------------------------+----------------------------------+
| **Base Class** | **Purpose** |
+==========================================================+==================================+
| :class:`~spack.package_base.Package` | General base class not |
| :class:`~spack.package.Package` | General base class not |
| | specialized for any build system |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.makefile.MakefilePackage` | Specialized class for packages |
@@ -3430,7 +3153,7 @@ for the install phase is:
For those not used to Python instance methods, this is the
package itself. In this case it's an instance of ``Foo``, which
extends ``Package``. For API docs on Package objects, see
:py:class:`Package <spack.package_base.Package>`.
:py:class:`Package <spack.package.Package>`.
``spec``
This is the concrete spec object created by Spack from an
@@ -4561,9 +4284,6 @@ other checks.
* - :ref:`AutotoolsPackage <autotoolspackage>`
- ``check`` (``make test``, ``make check``)
- ``installcheck`` (``make installcheck``)
* - :ref:`CachedCMakePackage <cachedcmakepackage>`
- ``check`` (``make check``, ``make test``)
- Not applicable
* - :ref:`CMakePackage <cmakepackage>`
- ``check`` (``make check``, ``make test``)
- Not applicable
@@ -4588,9 +4308,6 @@ other checks.
* - :ref:`SIPPackage <sippackage>`
- Not applicable
- ``test`` (module imports)
* - :ref:`WafPackage <wafpackage>`
- ``build_test`` (must be overridden)
- ``install_test`` (must be overridden)
For example, the ``Libelf`` package inherits from ``AutotoolsPackage``
and its ``Makefile`` has a standard ``check`` target. So Spack will
@@ -5606,7 +5323,7 @@ would be quite complicated to do using regex only. Employing the
.. code-block:: python
class Gcc(Package):
executables = ['g++']
executables = [r'g\+\+']
def filter_detected_exes(cls, prefix, exes_in_prefix):
return [x for x in exes_in_prefix if 'clang' not in x]
@@ -5759,24 +5476,6 @@ Version Lists
Spack packages should list supported versions with the newest first.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``home`` vs ``prefix``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``home`` and ``prefix`` are both attributes that can be queried on a
package's dependencies, often when passing configure arguments pointing to the
location of a dependency. The difference is that while ``prefix`` is the
location on disk where a concrete package resides, ``home`` is the `logical`
location that a package resides, which may be different than ``prefix`` in
the case of virtual packages or other special circumstances. For most use
cases inside a package, it's dependency locations can be accessed via either
``self.spec['foo'].home`` or ``self.spec['foo'].prefix``. Specific packages
that should be consumed by dependents via ``.home`` instead of ``.prefix``
should be noted in their respective documentation.
See :ref:`custom-attributes` for more details and an example implementing
a custom ``home`` attribute.
---------------------------
Packaging workflow commands
---------------------------

View File

@@ -5,9 +5,9 @@
.. _pipelines:
============
CI Pipelines
============
=========
Pipelines
=========
Spack provides commands that support generating and running automated build
pipelines designed for Gitlab CI. At the highest level it works like this:
@@ -115,8 +115,7 @@ And here's the spack environment built by the pipeline represented as a
spack:
view: false
concretizer:
unify: false
concretization: separately
definitions:
- pkgs:
@@ -168,7 +167,7 @@ which specs are up to date and which need to be rebuilt (it's a good idea for ot
reasons as well, but those are out of scope for this discussion). In this case we
have disabled it (using ``rebuild-index: False``) because the index would only be
generated in the artifacts mirror anyway, and consequently would not be available
during subsequent pipeline runs.
during subesequent pipeline runs.
.. note::
With the addition of reproducible builds (#22887) a previously working
@@ -267,64 +266,24 @@ generated by jobs in the pipeline.
``spack ci rebuild``
^^^^^^^^^^^^^^^^^^^^^
The purpose of ``spack ci rebuild`` is straightforward: take its assigned
spec and ensure a binary of a successful build exists on the target mirror.
If the binary does not already exist, it is built from source and pushed
to the mirror. The associated stand-alone tests are optionally run against
the new build. Additionally, files for reproducing the build outside of the
CI environment are created to facilitate debugging.
The purpose of the ``spack ci rebuild`` is straightforward: take its assigned
spec job, check whether the target mirror already has a binary for that spec,
and if not, build the spec from source and push the binary to the mirror. To
accomplish this in a reproducible way, the sub-command prepares a ``spack install``
command line to build a single spec in the DAG, saves that command in a
shell script, ``install.sh``, in the current working directory, and then runs
it to install the spec. The shell script is also exported as an artifact to
aid in reproducing the build outside of the CI environment.
If a binary for the spec does not exist on the target mirror, an install
shell script, ``install.sh``, is created and saved in the current working
directory. The script is run in a job to install the spec from source. The
resulting binary package is pushed to the mirror. If ``cdash`` is configured
for the environment, then the build results will be uploaded to the site.
If it was necessary to install the spec from source, ``spack ci rebuild`` will
also subsequently create a binary package for the spec and try to push it to the
mirror.
Environment variables and values in the ``gitlab-ci`` section of the
``spack.yaml`` environment file provide inputs to this process. The
two main sources of environment variables are variables written into
``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime.
Several key CI pipeline variables are described in
:ref:`ci_environment_variables`.
If the ``--tests`` option is provided, stand-alone tests are performed but
only if the build was successful *and* the package does not appear in the
list of ``broken-tests-packages``. A shell script, ``test.sh``, is created
and run to perform the tests. On completion, test logs are exported as job
artifacts for review and to facilitate debugging. If `cdash` is configured,
test results are also uploaded to the site.
A snippet from an example ``spack.yaml`` file illustrating use of this
option *and* specification of a package with broken tests is given below.
The inclusion of a spec for building ``gptune`` is not shown here. Note
that ``--tests`` is passed to ``spack ci rebuild`` as part of the
``gitlab-ci`` script.
.. code-block:: yaml
gitlab-ci:
script:
- . "./share/spack/setup-env.sh"
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
- spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
broken-tests-packages:
- gptune
In this case, even if ``gptune`` is successfully built from source, the
pipeline will *not* run its stand-alone tests since the package is listed
under ``broken-tests-packages``.
Spack's cloud pipelines provide actual, up-to-date examples of the CI/CD
configuration and environment files used by Spack. You can find them
under Spack's `stacks
<https://github.com/spack/spack/tree/develop/share/spack/gitlab/cloud_pipelines/stacks>`_ repository directory.
The ``spack ci rebuild`` sub-command mainly expects its "input" to come either
from environment variables or from the ``gitlab-ci`` section of the ``spack.yaml``
environment file. There are two main sources of the environment variables, some
are written into ``.gitlab-ci.yml`` by ``spack ci generate``, and some are
provided by the GitLab CI runtime.
.. _cmd-spack-ci-rebuild-index:
@@ -487,7 +446,7 @@ Note about "no-op" jobs
^^^^^^^^^^^^^^^^^^^^^^^
If no specs in an environment need to be rebuilt during a given pipeline run
(meaning all are already up to date on the mirror), a single successful job
(meaning all are already up to date on the mirror), a single succesful job
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab
considers to be an error). An optional ``service-job-attributes`` section
can be added to your ``spack.yaml`` where you can provide ``tags`` and
@@ -765,7 +724,7 @@ above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
On the other hand, if you're pointing to a spack repository and branch under your
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
and you can instead just clone using the variables you define (``SPACK_REPO``
and ``SPACK_REF`` in the example above).
and ``SPACK_REF`` in the example aboves).
.. _custom_workflow:

View File

@@ -61,7 +61,7 @@ You can see the packages we added earlier in the ``specs:`` section. If you
ever want to add more packages, you can either use ``spack add`` or manually
edit this file.
We also need to change the ``concretizer:unify`` option. By default, Spack
We also need to change the ``concretization:`` option. By default, Spack
concretizes each spec *separately*, allowing multiple versions of the same
package to coexist. Since we want a single consistent environment, we want to
concretize all of the specs *together*.
@@ -78,8 +78,7 @@ Here is what your ``spack.yaml`` looks like with this new setting:
# add package specs to the `specs` list
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
view: true
concretizer:
unify: true
concretization: together
^^^^^^^^^^^^^^^^
Symlink location

View File

@@ -1,12 +1,10 @@
# These dependencies should be installed using pip in order
# to build the documentation.
sphinx>=3.4,!=4.1.2,!=5.1.0
sphinx>=3.4,!=4.1.2
sphinxcontrib-programoutput
sphinx-design
sphinx-rtd-theme
python-levenshtein
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.
# https://stackoverflow.com/questions/67542699
docutils <0.17
pygments <2.13

View File

@@ -18,15 +18,11 @@ spack:
- "py-sphinx@3.4:4.1.1,4.1.3:"
- py-sphinxcontrib-programoutput
- py-docutils@:0.16
- py-sphinx-design
- py-sphinx-rtd-theme
- py-pygments@:2.12
# VCS
- git
- mercurial
- subversion
# Plotting
- graphviz
concretizer:
unify: true
concretization: together

View File

@@ -1,5 +1,5 @@
Name, Supported Versions, Notes, Requirement Reason
Python, 2.7/3.6-3.10, , Interpreter for Spack
Python, 2.7/3.5-3.10, , Interpreter for Spack
C/C++ Compilers, , , Building software
make, , , Build software
patch, , , Build software
@@ -7,7 +7,7 @@ bash, , , Compiler wrappers
tar, , , Extract/create archives
gzip, , , Compress/Decompress archives
unzip, , , Compress/Decompress archives
bzip2, , , Compress/Decompress archives
bzip, , , Compress/Decompress archives
xz, , , Compress/Decompress archives
zstd, , Optional, Compress/Decompress archives
file, , , Create/Use Buildcaches
@@ -15,4 +15,4 @@ gnupg2, , , Sign/Verify Buildcaches
git, , , Manage Software Repositories
svn, , Optional, Manage Software Repositories
hg, , Optional, Manage Software Repositories
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
1 Name Supported Versions Notes Requirement Reason
2 Python 2.7/3.6-3.10 2.7/3.5-3.10 Interpreter for Spack
3 C/C++ Compilers Building software
4 make Build software
5 patch Build software
7 tar Extract/create archives
8 gzip Compress/Decompress archives
9 unzip Compress/Decompress archives
10 bzip2 bzip Compress/Decompress archives
11 xz Compress/Decompress archives
12 zstd Optional Compress/Decompress archives
13 file Create/Use Buildcaches
15 git Manage Software Repositories
16 svn Optional Manage Software Repositories
17 hg Optional Manage Software Repositories
18 Python header files Optional (e.g. ``python3-dev`` on Debian) Bootstrapping from sources

10
lib/spack/env/cc vendored
View File

@@ -1,4 +1,4 @@
#!/bin/sh -f
#!/bin/sh
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
@@ -241,14 +241,14 @@ case "$command" in
mode=cpp
debug_flags="-g"
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe)
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|emcc)
command="$SPACK_CC"
language="C"
comp="CC"
lang_flags=C
debug_flags="-g"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++)
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|em++)
command="$SPACK_CXX"
language="C++"
comp="CXX"
@@ -768,9 +768,7 @@ if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
echo "[$mode] ${full_command_list}" >> "$output_log"
fi
# Execute the full command, preserving spaces with IFS set

1
lib/spack/env/emscripten/em++ vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/emscripten/emcc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.4 (commit e2cfdc266174488dee78b8c9058e36d60dc1b548)
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
argparse
--------

View File

@@ -61,7 +61,7 @@ def proc_cpuinfo():
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
with open("/proc/cpuinfo") as file:
for line in file:
key, separator, value = line.partition(":")
@@ -80,46 +80,26 @@ def proc_cpuinfo():
def _check_output(args, env):
output = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=subprocess.PIPE, env=env
).communicate()[0]
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
return six.text_type(output.decode("utf-8"))
def _machine():
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
#
# See: https://bugs.python.org/issue42704
output = _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return "aarch64"
return "x86_64"
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == "x86_64":
if platform.machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
@@ -145,18 +125,6 @@ def sysctl(*args):
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
return child_environment
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
@@ -216,7 +184,12 @@ def compatible_microarchitectures(info):
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
architecture_family = platform.machine()
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
# so we should normalize the name here
if architecture_family == "arm64":
architecture_family = "aarch64"
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
@@ -271,7 +244,12 @@ def compatibility_check(architecture_family):
architecture_family = (architecture_family,)
def decorator(func):
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
# pylint: disable=fixme
# TODO: on removal of Python 2.6 support this can be re-written as
# TODO: an update + a dict comprehension
for arch_family in architecture_family:
COMPATIBILITY_CHECKS[arch_family] = func
return func
return decorator
@@ -310,7 +288,7 @@ def compatibility_check_for_x86_64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)
@@ -325,9 +303,8 @@ def compatibility_check_for_aarch64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)

View File

@@ -106,7 +106,7 @@ def __eq__(self, other):
self.name == other.name
and self.vendor == other.vendor
and self.features == other.features
and self.parents == other.parents # avoid ancestors here
and self.ancestors == other.ancestors
and self.compilers == other.compilers
and self.generation == other.generation
)

View File

@@ -11,7 +11,7 @@
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping # pylint: disable=deprecated-class
from collections import MutableMapping
class LazyDictionary(MutableMapping):
@@ -56,7 +56,7 @@ def _load_json_file(json_file):
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file: # pylint: disable=unspecified-encoding
with open(filename, "r") as file:
return json.load(file)
return _factory

View File

@@ -85,21 +85,7 @@
"intel": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
@@ -143,20 +129,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -214,20 +186,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -290,20 +248,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -344,19 +288,8 @@
"intel": [
{
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}
@@ -400,18 +333,6 @@
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -463,20 +384,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -525,20 +432,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -597,18 +490,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -669,18 +550,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -746,18 +615,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -815,18 +672,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -887,18 +732,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -965,20 +798,6 @@
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1049,20 +868,6 @@
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1099,7 +904,8 @@
"avx512cd",
"avx512vbmi",
"avx512ifma",
"sha"
"sha",
"umip"
],
"compilers": {
"gcc": [
@@ -1131,18 +937,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1210,18 +1004,6 @@
"versions": "19.0.1:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1262,6 +1044,7 @@
"avx512vbmi",
"avx512ifma",
"sha_ni",
"umip",
"clwb",
"rdpid",
"gfni",
@@ -1315,20 +1098,6 @@
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1373,20 +1142,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
]
}
},
@@ -1437,20 +1192,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1505,20 +1246,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1574,20 +1301,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
]
}
},
@@ -1647,22 +1360,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1725,22 +1422,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1804,22 +1485,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1878,30 +1543,6 @@
"name": "znver3",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "16.0:",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -2147,6 +1788,7 @@
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
@@ -2179,26 +1821,18 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "8:10.2",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve -msve-vector-bits=512"
},
{
"versions": "10.3:",
"flags": "-mcpu=a64fx -msve-vector-bits=512"
"versions": "8:",
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
}
],
"clang": [
{
"versions": "3.9:4.9",
"flags": "-march=armv8.2-a+crc+sha2+fp16"
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "5:10",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve"
},
{
"versions": "11:",
"flags": "-mcpu=a64fx"
"versions": "5:",
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
],
"arm": [
@@ -2247,7 +1881,7 @@
}
},
"graviton2": {
"from": ["graviton"],
"from": ["aarch64"],
"vendor": "ARM",
"features": [
"fp",
@@ -2317,144 +1951,10 @@
]
}
},
"graviton3": {
"from": ["graviton2"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"paca",
"pacg",
"dcpodp",
"svei8mm",
"svebf16",
"i8mm",
"bf16",
"dgh",
"rng"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:4.8.9",
"flags": "-march=armv8-a"
},
{
"versions": "4.9:5.9",
"flags": "-march=armv8-a+crc+crypto"
},
{
"versions": "6:6.9",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7:7.9",
"flags" : "-march=armv8.2-a+crypto+fp16 -mtune=cortex-a72"
},
{
"versions": "8.0:8.9",
"flags" : "-march=armv8.2-a+fp16+dotprod+crypto -mtune=cortex-a72"
},
{
"versions": "9.0:9.9",
"flags" : "-march=armv8.4-a+crypto+rcpc+sha3+sm4+sve+rng+nodotprod -mtune=neoverse-v1"
},
{
"versions": "10.0:",
"flags" : "-march=armv8.4-a+crypto+rcpc+sha3+sm4+sve+rng+ssbs+i8mm+bf16+nodotprod -mtune=neoverse-v1"
}
],
"clang" : [
{
"versions": "3.9:4.9",
"flags" : "-march=armv8.2-a+fp16+crc+crypto"
},
{
"versions": "5:10",
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
},
{
"versions": "11:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
}
],
"arm" : [
{
"versions": "20:21.9",
"flags" : "-march=armv8.2-a+sve+fp16+rcpc+dotprod+crypto"
},
{
"versions": "22:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
}
]
}
},
"m1": {
"from": ["aarch64"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint"
],
"features": [],
"compilers": {
"gcc": [
{
@@ -2464,22 +1964,14 @@
],
"clang" : [
{
"versions": "9.0:12.0",
"versions": "9.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"versions": "11.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
]
}

View File

@@ -71,8 +71,6 @@
import re
import math
import multiprocessing
import sys
import threading
import time
from contextlib import contextmanager
@@ -411,12 +409,7 @@ def parse(self, stream, context=6, jobs=None):
pool = multiprocessing.Pool(jobs)
try:
# this is a workaround for a Python bug in Pool with ctrl-C
if sys.version_info >= (3, 2):
max_timeout = threading.TIMEOUT_MAX
else:
max_timeout = 9999999
results = pool.map_async(_parse_unpack, args, 1).get(max_timeout)
results = pool.map_async(_parse_unpack, args, 1).get(9999999)
errors, warnings, timings = zip(*results)
finally:
pool.terminate()

View File

@@ -29,8 +29,8 @@ class Command(object):
- optionals: list of optional arguments (list)
- subcommands: list of subcommand parsers (list)
"""
def __init__(self, prog, description, usage, positionals, optionals, subcommands):
def __init__(self, prog, description, usage,
positionals, optionals, subcommands):
self.prog = prog
self.description = description
self.usage = usage
@@ -71,15 +71,15 @@ def parse(self, parser, prog):
"""
self.parser = parser
split_prog = parser.prog.split(" ")
split_prog = parser.prog.split(' ')
split_prog[-1] = prog
prog = " ".join(split_prog)
prog = ' '.join(split_prog)
description = parser.description
fmt = parser._get_formatter()
actions = parser._actions
groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip()
usage = fmt._format_usage(None, actions, groups, '').strip()
# Go through actions and split them into optionals, positionals,
# and subcommands
@@ -90,8 +90,8 @@ def parse(self, parser, prog):
if action.option_strings:
flags = action.option_strings
dest_flags = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
help = self._expand_help(action) if action.help else ''
help = help.replace('\n', ' ')
optionals.append((flags, dest_flags, help))
elif isinstance(action, argparse._SubParsersAction):
for subaction in action._choices_actions:
@@ -100,19 +100,20 @@ def parse(self, parser, prog):
# Look for aliases of the form 'name (alias, ...)'
if self.aliases:
match = re.match(r"(.*) \((.*)\)", subaction.metavar)
match = re.match(r'(.*) \((.*)\)', subaction.metavar)
if match:
aliases = match.group(2).split(", ")
aliases = match.group(2).split(', ')
for alias in aliases:
subparser = action._name_parser_map[alias]
subcommands.append((subparser, alias))
else:
args = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else ""
help = help.replace("\n", " ")
help = self._expand_help(action) if action.help else ''
help = help.replace('\n', ' ')
positionals.append((args, help))
return Command(prog, description, usage, positionals, optionals, subcommands)
return Command(
prog, description, usage, positionals, optionals, subcommands)
def format(self, cmd):
"""Returns the string representation of a single node in the
@@ -160,13 +161,14 @@ def write(self, parser):
raise
_rst_levels = ["=", "-", "^", "~", ":", "`"]
_rst_levels = ['=', '-', '^', '~', ':', '`']
class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections."""
def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels):
def __init__(self, prog, out=None, aliases=False,
rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter.
Parameters:
@@ -215,12 +217,11 @@ def begin_command(self, prog):
{1}
{2}
""".format(
prog.replace(" ", "-"), prog, self.rst_levels[self.level] * len(prog)
)
""".format(prog.replace(' ', '-'), prog,
self.rst_levels[self.level] * len(prog))
def description(self, description):
return description + "\n\n"
return description + '\n\n'
def usage(self, usage):
return """\
@@ -228,39 +229,33 @@ def usage(self, usage):
{0}
""".format(
usage
)
""".format(usage)
def begin_positionals(self):
return "\n**Positional arguments**\n\n"
return '\n**Positional arguments**\n\n'
def positional(self, name, help):
return """\
{0}
{1}
""".format(
name, help
)
""".format(name, help)
def end_positionals(self):
return ""
return ''
def begin_optionals(self):
return "\n**Optional arguments**\n\n"
return '\n**Optional arguments**\n\n'
def optional(self, opts, help):
return """\
``{0}``
{1}
""".format(
opts, help
)
""".format(opts, help)
def end_optionals(self):
return ""
return ''
def begin_subcommands(self, subcommands):
string = """
@@ -272,10 +267,11 @@ def begin_subcommands(self, subcommands):
"""
for cmd, _ in subcommands:
prog = re.sub(r"^[^ ]* ", "", cmd.prog)
string += " * :ref:`{0} <{1}>`\n".format(prog, cmd.prog.replace(" ", "-"))
prog = re.sub(r'^[^ ]* ', '', cmd.prog)
string += ' * :ref:`{0} <{1}>`\n'.format(
prog, cmd.prog.replace(' ', '-'))
return string + "\n"
return string + '\n'
class ArgparseCompletionWriter(ArgparseWriter):
@@ -310,11 +306,9 @@ def format(self, cmd):
# Flatten lists of lists
optionals = [x for xx in optionals for x in xx]
return (
self.start_function(cmd.prog)
+ self.body(positionals, optionals, subcommands)
+ self.end_function(cmd.prog)
)
return (self.start_function(cmd.prog) +
self.body(positionals, optionals, subcommands) +
self.end_function(cmd.prog))
def start_function(self, prog):
"""Returns the syntax needed to begin a function definition.
@@ -325,8 +319,8 @@ def start_function(self, prog):
Returns:
str: the function definition beginning
"""
name = prog.replace("-", "_").replace(" ", "_")
return "\n_{0}() {{".format(name)
name = prog.replace('-', '_').replace(' ', '_')
return '\n_{0}() {{'.format(name)
def end_function(self, prog=None):
"""Returns the syntax needed to end a function definition.
@@ -337,7 +331,7 @@ def end_function(self, prog=None):
Returns:
str: the function definition ending
"""
return "}\n"
return '}\n'
def body(self, positionals, optionals, subcommands):
"""Returns the body of the function.
@@ -350,7 +344,7 @@ def body(self, positionals, optionals, subcommands):
Returns:
str: the function body
"""
return ""
return ''
def positionals(self, positionals):
"""Returns the syntax for reporting positional arguments.
@@ -361,7 +355,7 @@ def positionals(self, positionals):
Returns:
str: the syntax for positional arguments
"""
return ""
return ''
def optionals(self, optionals):
"""Returns the syntax for reporting optional flags.
@@ -372,7 +366,7 @@ def optionals(self, optionals):
Returns:
str: the syntax for optional flags
"""
return ""
return ''
def subcommands(self, subcommands):
"""Returns the syntax for reporting subcommands.
@@ -383,4 +377,4 @@ def subcommands(self, subcommands):
Returns:
str: the syntax for subcommand parsers
"""
return ""
return ''

View File

@@ -18,22 +18,22 @@
map = map
zip = zip
from itertools import zip_longest as zip_longest # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
if sys.version_info >= (3, 3):
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import MutableSequence as MutableSequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
else:
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import MutableSequence as MutableSequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401

File diff suppressed because it is too large Load Diff

View File

@@ -11,9 +11,7 @@
import os
import re
import sys
import traceback
from datetime import datetime, timedelta
from typing import Any, Callable, Iterable, List, Tuple
import six
from six import string_types
@@ -21,7 +19,7 @@
from llnl.util.compat import MutableMapping, MutableSequence, zip_longest
# Ignore emacs backups when listing modules
ignore_modules = [r"^\.#", "~$"]
ignore_modules = [r'^\.#', '~$']
def index_by(objects, *funcs):
@@ -91,9 +89,9 @@ def index_by(objects, *funcs):
def caller_locals():
"""This will return the locals of the *parent* of the caller.
This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
@@ -105,7 +103,7 @@ def caller_locals():
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
@@ -115,13 +113,12 @@ def get_calling_module_name():
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
if '__module__' not in caller_locals:
raise RuntimeError("Must invoke get_calling_module_name() "
"from inside a class definition!")
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
module_name = caller_locals['__module__']
base_name = module_name.split('.')[-1]
return base_name
@@ -129,8 +126,8 @@ def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
"No required attribute '%s' in class '%s'"
% (attr_name, obj.__class__.__name__))
def attr_setdefault(obj, name, value):
@@ -202,35 +199,33 @@ def _memoized_function(*args, **kwargs):
# TypeError is raised when indexing into a dict if the key is unhashable.
raise six.raise_from(
UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format(
key, func.__name__
),
"args + kwargs '{}' was not hashable for function '{}'"
.format(key, func.__name__),
),
e,
)
e)
return _memoized_function
def list_modules(directory, **kwargs):
"""Lists all of the modules, excluding ``__init__.py``, in a
particular directory. Listed packages have no particular
order."""
list_directories = kwargs.setdefault("directories", True)
particular directory. Listed packages have no particular
order."""
list_directories = kwargs.setdefault('directories', True)
for name in os.listdir(directory):
if name == "__init__.py":
if name == '__init__.py':
continue
path = os.path.join(directory, name)
if list_directories and os.path.isdir(path):
init_py = os.path.join(path, "__init__.py")
init_py = os.path.join(path, '__init__.py')
if os.path.isfile(init_py):
yield name
elif name.endswith(".py"):
elif name.endswith('.py'):
if not any(re.search(pattern, name) for pattern in ignore_modules):
yield re.sub(".py$", "", name)
yield re.sub('.py$', '', name)
def decorator_with_or_without_args(decorator):
@@ -260,34 +255,41 @@ def new_dec(*args, **kwargs):
def key_ordering(cls):
"""Decorates a class with extra methods that implement rich comparison
operations and ``__hash__``. The decorator assumes that the class
implements a function called ``_cmp_key()``. The rich comparison
operations will compare objects using this key, and the ``__hash__``
function will return the hash of this key.
operations and ``__hash__``. The decorator assumes that the class
implements a function called ``_cmp_key()``. The rich comparison
operations will compare objects using this key, and the ``__hash__``
function will return the hash of this key.
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
Raises:
TypeError: If the class does not have a ``_cmp_key`` method
Raises:
TypeError: If the class does not have a ``_cmp_key`` method
"""
def setter(name, value):
value.__name__ = name
setattr(cls, name, value)
if not has_method(cls, "_cmp_key"):
if not has_method(cls, '_cmp_key'):
raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
setter("__eq__", lambda s, o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter("__lt__", lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
setter("__le__", lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
setter('__eq__',
lambda s, o:
(s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter('__lt__',
lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
setter('__le__',
lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
setter("__ne__", lambda s, o: (s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter("__gt__", lambda s, o: o is None or s._cmp_key() > o._cmp_key())
setter("__ge__", lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
setter('__ne__',
lambda s, o:
(s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter('__gt__',
lambda s, o: o is None or s._cmp_key() > o._cmp_key())
setter('__ge__',
lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
setter("__hash__", lambda self: hash(self._cmp_key()))
setter('__hash__', lambda self: hash(self._cmp_key()))
return cls
@@ -454,7 +456,8 @@ def gt(self, other):
def le(self, other):
if self is other:
return True
return (other is not None) and not lazy_lt(other._cmp_iter, self._cmp_iter)
return (other is not None) and not lazy_lt(other._cmp_iter,
self._cmp_iter)
def ge(self, other):
if self is other:
@@ -484,9 +487,7 @@ def add_func_to_class(name, func):
@lazy_lexicographic_ordering
class HashableMap(MutableMapping):
"""This is a hashable, comparable dictionary. Hash is performed on
a tuple of the values in the dictionary."""
__slots__ = ("dict",)
a tuple of the values in the dictionary."""
def __init__(self):
self.dict = {}
@@ -524,7 +525,7 @@ def copy(self):
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
@@ -537,25 +538,24 @@ def in_function(function_name):
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
% (next(iter(kwargs)), fun.__name__))
def match_predicate(*args):
@@ -571,7 +571,6 @@ def match_predicate(*args):
* any regex in a list or tuple of regexes matches.
* any predicate in args matches.
"""
def match(string):
for arg in args:
if isinstance(arg, string_types):
@@ -584,11 +583,9 @@ def match(string):
if arg(string):
return True
else:
raise ValueError(
"args to match_predicate must be regex, " "list of regexes, or callable."
)
raise ValueError("args to match_predicate must be regex, "
"list of regexes, or callable.")
return False
return match
@@ -648,7 +645,7 @@ def pretty_date(time, now=None):
day_diff = diff.days
if day_diff < 0:
return ""
return ''
if day_diff == 0:
if second_diff < 10:
@@ -706,40 +703,43 @@ def pretty_string_to_date(date_str, now=None):
now = now or datetime.now()
# datetime formats
pattern[re.compile(r"^\d{4}$")] = lambda x: datetime.strptime(x, "%Y")
pattern[re.compile(r"^\d{4}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m")
pattern[re.compile(r"^\d{4}-\d{2}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m-%d")
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$")] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M"
pattern[re.compile(r'^\d{4}$')] = lambda x: datetime.strptime(x, '%Y')
pattern[re.compile(r'^\d{4}-\d{2}$')] = lambda x: datetime.strptime(
x, '%Y-%m'
)
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M:%S"
pattern[re.compile(r'^\d{4}-\d{2}-\d{2}$')] = lambda x: datetime.strptime(
x, '%Y-%m-%d'
)
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M')
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
pretty_regex = re.compile(r"(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago")
pretty_regex = re.compile(
r'(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago')
def _n_xxx_ago(x):
how_many, time_period = pretty_regex.search(x).groups()
how_many = 1 if how_many == "a" else int(how_many)
how_many = 1 if how_many == 'a' else int(how_many)
# timedelta natively supports time periods up to 'weeks'.
# To apply month or year we convert to 30 and 365 days
if time_period == "month":
if time_period == 'month':
how_many *= 30
time_period = "day"
elif time_period == "year":
time_period = 'day'
elif time_period == 'year':
how_many *= 365
time_period = "day"
time_period = 'day'
kwargs = {(time_period + "s"): how_many}
kwargs = {(time_period + 's'): how_many}
return now - timedelta(**kwargs)
pattern[pretty_regex] = _n_xxx_ago
# yesterday
callback = lambda x: now - timedelta(days=1)
pattern[re.compile("^yesterday$")] = callback
pattern[re.compile('^yesterday$')] = callback
for regexp, parser in pattern.items():
if bool(regexp.match(date_str)):
@@ -750,6 +750,7 @@ def _n_xxx_ago(x):
class RequiredAttributeError(ValueError):
def __init__(self, message):
super(RequiredAttributeError, self).__init__(message)
@@ -761,7 +762,6 @@ class ObjectWrapper(object):
This class is modeled after the stackoverflow answer:
* http://stackoverflow.com/a/1445289/771663
"""
def __init__(self, wrapped_object):
wrapped_cls = type(wrapped_object)
wrapped_name = wrapped_cls.__name__
@@ -805,7 +805,7 @@ def __getattr__(self, name):
# requested but not yet set. The final 'getattr' line here requires
# 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here.
if name in ["_instance", "instance"]:
if name in ['_instance', 'instance']:
raise AttributeError()
return getattr(self.instance, name)
@@ -835,7 +835,7 @@ def __init__(self, ref_function):
self.ref_function = ref_function
def __getattr__(self, name):
if name == "ref_function":
if name == 'ref_function':
raise AttributeError()
return getattr(self.ref_function(), name)
@@ -873,8 +873,8 @@ def load_module_from_file(module_name, module_path):
# This recipe is adapted from https://stackoverflow.com/a/67692/771663
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path) # novm
spec = importlib.util.spec_from_file_location( # novm
module_name, module_path)
module = importlib.util.module_from_spec(spec) # novm
# The module object needs to exist in sys.modules before the
# loader executes the module code.
@@ -891,7 +891,6 @@ def load_module_from_file(module_name, module_path):
raise
elif sys.version_info[0] == 2:
import imp
module = imp.load_source(module_name, module_path)
return module
@@ -923,10 +922,8 @@ def uniq(sequence):
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
@@ -935,23 +932,22 @@ class Devnull(object):
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::
replacing intervening elements with '...'. For example::
elide_list([1,2,3,4,5,6], 4)
elide_list([1,2,3,4,5,6], 4)
gives::
gives::
[1, 2, 3, '...', 6]
[1, 2, 3, '...', 6]
"""
if len(line_list) > max_num:
return line_list[: max_num - 1] + ["..."] + line_list[-1:]
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
else:
return line_list
@@ -974,32 +970,7 @@ def enum(**kwargs):
Args:
**kwargs: explicit dictionary of enums
"""
return type("Enum", (object,), kwargs)
def stable_partition(
input_iterable, # type: Iterable
predicate_fn, # type: Callable[[Any], bool]
):
# type: (...) -> Tuple[List[Any], List[Any]]
"""Partition the input iterable according to a custom predicate.
Args:
input_iterable: input iterable to be partitioned.
predicate_fn: predicate function accepting an iterable item
as argument.
Return:
Tuple of the list of elements evaluating to True, and
list of elements evaluating to False.
"""
true_items, false_items = [], []
for item in input_iterable:
if predicate_fn(item):
true_items.append(item)
continue
false_items.append(item)
return true_items, false_items
return type('Enum', (object,), kwargs)
class TypedMutableSequence(MutableSequence):
@@ -1015,7 +986,6 @@ class Foo(TypedMutableSequence):
if isinstance(l, Foo):
# do something
"""
def __init__(self, iterable):
self.data = list(iterable)
@@ -1039,75 +1009,3 @@ def __repr__(self):
def __str__(self):
return str(self.data)
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self):
"""Whether any exceptions were handled."""
return bool(self.exceptions)
def forward(self, context):
# type: (str) -> GroupedExceptionForwarder
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
return GroupedExceptionForwarder(context, self)
def _receive_forwarded(self, context, exc, tb):
# type: (str, Exception, List[str]) -> None
self.exceptions.append((context, exc, tb))
def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors."""
each_exception_message = [
"{0} raised {1}: {2}{3}".format(
context,
exc.__class__.__name__,
exc,
"\n{0}".format("".join(tb)) if with_tracebacks else "",
)
for context, exc, tb in self.exceptions
]
return "due to the following failures:\n{0}".format("\n".join(each_exception_message))
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
def __init__(self, context, handler):
# type: (str, GroupedExceptionHandler) -> None
self._context = context
self._handler = handler
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)
"""
def __init__(self, callback):
self.callback = callback
def __get__(self, instance, owner):
return self.callback(owner)

View File

@@ -13,12 +13,12 @@
from collections import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree
from llnl.util.filesystem import mkdirp, touch, traverse_tree
from llnl.util.symlink import islink, symlink
__all__ = ["LinkTree"]
__all__ = ['LinkTree']
empty_file_name = ".spack-empty"
empty_file_name = '.spack-empty'
def remove_link(src, dest):
@@ -38,28 +38,26 @@ class MergeConflict:
project(src_a) == project(src_b) == dst
"""
def __init__(self, dst, src_a=None, src_b=None):
self.dst = dst
self.src_a = src_a
self.src_b = src_b
class SourceMergeVisitor(BaseDirectoryVisitor):
class SourceMergeVisitor(object):
"""
Visitor that produces actions:
- An ordered list of directories to create in dst
- A list of files to link in dst
- A list of merge conflicts in dst/
"""
def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the
# destination dir.
self.projection = ""
self.projection = ''
# When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or
@@ -90,13 +88,10 @@ def before_visit_dir(self, root, rel_path, depth):
elif proj_rel_path in self.files:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
self.fatal_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
return False
elif proj_rel_path in self.directories:
# No new directory, carry on.
@@ -106,6 +101,9 @@ def before_visit_dir(self, root, rel_path, depth):
self.directories[proj_rel_path] = (root, rel_path)
return True
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Replace symlinked dirs with actual directories when possible in low depths,
@@ -138,6 +136,9 @@ def before_visit_symlinked_dir(self, root, rel_path, depth):
self.visit_file(root, rel_path, depth)
return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path)
@@ -146,59 +147,46 @@ def visit_file(self, root, rel_path, depth):
elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
self.fatal_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
elif proj_rel_path in self.files:
# In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
self.file_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
else:
# Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
def set_projection(self, projection):
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
if self.projection == ".":
self.projection = ""
if self.projection == '.':
self.projection = ''
return
# If there is a projection, we'll also create the directories
# it consists of, and check whether that's causing conflicts.
path = ""
path = ''
for part in self.projection.split(os.sep):
path = os.path.join(path, part)
if path not in self.files:
self.directories[path] = ("<projection>", path)
self.directories[path] = ('<projection>', path)
else:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[path]
self.fatal_conflicts.append(
MergeConflict(
dst=path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join("<projection>", path),
)
)
self.fatal_conflicts.append(MergeConflict(
dst=path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join('<projection>', path)))
class DestinationMergeVisitor(BaseDirectoryVisitor):
class DestinationMergeVisitor(object):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
and:
@@ -212,7 +200,6 @@ class DestinationMergeVisitor(BaseDirectoryVisitor):
in the target prefix will never be merged with
directories in the sources directories.
"""
def __init__(self, source_merge_visitor):
self.src = source_merge_visitor
@@ -221,11 +208,10 @@ def before_visit_dir(self, root, rel_path, depth):
# and don't traverse deeper
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
return False
# If destination dir was also a src dir, remove the mkdir
@@ -238,6 +224,9 @@ def before_visit_dir(self, root, rel_path, depth):
# don't descend into it.
return False
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Symlinked directories in the destination prefix should
@@ -247,44 +236,39 @@ def before_visit_symlinked_dir(self, root, rel_path, depth):
# Always conflict
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
# Never descend into symlinked target dirs.
return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
elif rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(
MergeConflict(
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path)
)
)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
class LinkTree(object):
@@ -297,31 +281,30 @@ class LinkTree(object):
symlinked to, to prevent the source directory from ever being
modified.
"""
def __init__(self, source_root):
if not os.path.exists(source_root):
raise IOError("No such file or directory: '%s'", source_root)
self._root = source_root
def find_conflict(self, dest_root, ignore=None, ignore_file_conflicts=False):
def find_conflict(self, dest_root, ignore=None,
ignore_file_conflicts=False):
"""Returns the first file in dest that conflicts with src"""
ignore = ignore or (lambda x: False)
conflicts = self.find_dir_conflicts(dest_root, ignore)
if not ignore_file_conflicts:
conflicts.extend(
dst
for src, dst in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst)
)
dst for src, dst
in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst))
if conflicts:
return conflicts[0]
def find_dir_conflicts(self, dest_root, ignore):
conflicts = []
kwargs = {"follow_nonexisting": False, "ignore": ignore}
kwargs = {'follow_nonexisting': False, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
@@ -332,7 +315,7 @@ def find_dir_conflicts(self, dest_root, ignore):
def get_file_map(self, dest_root, ignore):
merge_map = {}
kwargs = {"follow_nonexisting": True, "ignore": ignore}
kwargs = {'follow_nonexisting': True, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if not os.path.isdir(src):
merge_map[src] = dest
@@ -354,7 +337,8 @@ def merge_directories(self, dest_root, ignore):
touch(marker)
def unmerge_directories(self, dest_root, ignore):
for src, dest in traverse_tree(self._root, dest_root, ignore=ignore, order="post"):
for src, dest in traverse_tree(
self._root, dest_root, ignore=ignore, order='post'):
if os.path.isdir(src):
if not os.path.exists(dest):
continue
@@ -370,7 +354,8 @@ def unmerge_directories(self, dest_root, ignore):
if os.path.exists(marker):
os.remove(marker)
def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, relative=False):
def merge(self, dest_root, ignore_conflicts=False, ignore=None,
link=symlink, relative=False):
"""Link all files in src into dest, creating directories
if necessary.
@@ -392,8 +377,7 @@ def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, re
ignore = lambda x: False
conflict = self.find_conflict(
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts
)
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts)
if conflict:
raise SingleMergeConflictError(conflict)
@@ -432,7 +416,8 @@ class MergeConflictError(Exception):
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path)
super(MergeConflictError, self).__init__(
"Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError):
@@ -445,6 +430,5 @@ def __init__(self, conflicts):
# show the first 3 merge conflicts.
for conflict in conflicts[:3]:
msg += "\n `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst
)
conflict.src_a, conflict.src_b, conflict.dst)
super(MergeConflictSummary, self).__init__(msg)

View File

@@ -15,22 +15,22 @@
import spack.util.string
if sys.platform != "win32":
if sys.platform != 'win32':
import fcntl
__all__ = [
"Lock",
"LockDowngradeError",
"LockUpgradeError",
"LockTransaction",
"WriteTransaction",
"ReadTransaction",
"LockError",
"LockTimeoutError",
"LockPermissionError",
"LockROFileError",
"CantCreateLockError",
'Lock',
'LockDowngradeError',
'LockUpgradeError',
'LockTransaction',
'WriteTransaction',
'ReadTransaction',
'LockError',
'LockTimeoutError',
'LockPermissionError',
'LockROFileError',
'CantCreateLockError'
]
@@ -47,7 +47,6 @@ class OpenFile(object):
the file descriptor from the file handle if needed -- or we could make this track
file descriptors as well in the future.
"""
def __init__(self, fh):
self.fh = fh
self.refs = 0
@@ -93,11 +92,11 @@ def get_fh(self, path):
path (str): path to lock file we want a filehandle for
"""
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), "r+"
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
pid = os.getpid()
open_file = None # OpenFile object, if there is one
stat = None # stat result for the lockfile, if it exists
stat = None # stat result for the lockfile, if it exists
try:
# see whether we've seen this inode/pid before
@@ -110,7 +109,7 @@ def get_fh(self, path):
raise
# path does not exist -- fail if we won't be able to create it
parent = os.path.dirname(path) or "."
parent = os.path.dirname(path) or '.'
if not os.access(parent, os.W_OK):
raise CantCreateLockError(path)
@@ -120,7 +119,7 @@ def get_fh(self, path):
# we know path exists but not if it's writable. If it's read-only,
# only open the file for reading (and fail if we're trying to get
# an exclusive (write) lock on it)
os_mode, fh_mode = os.O_RDONLY, "r"
os_mode, fh_mode = os.O_RDONLY, 'r'
fd = os.open(path, os_mode)
fh = os.fdopen(fd, fh_mode)
@@ -163,10 +162,10 @@ def release_fh(self, path):
def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try
if nattempts <= 1:
return ""
return ''
attempts = spack.util.string.plural(nattempts, "attempt")
return " after {0:0.2f}s and {1}".format(wait_time, attempts)
attempts = spack.util.string.plural(nattempts, 'attempt')
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
class LockType(object):
@@ -189,7 +188,8 @@ def to_module(tid):
@staticmethod
def is_valid(op):
return op == LockType.READ or op == LockType.WRITE
return op == LockType.READ \
or op == LockType.WRITE
class Lock(object):
@@ -207,7 +207,8 @@ class Lock(object):
overlapping byte ranges in the same file).
"""
def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, desc=""):
def __init__(self, path, start=0, length=0, default_timeout=None,
debug=False, desc=''):
"""Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally,
@@ -242,7 +243,7 @@ def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, d
self.debug = debug
# optional debug description
self.desc = " ({0})".format(desc) if desc else ""
self.desc = ' ({0})'.format(desc) if desc else ''
# If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the
@@ -279,17 +280,17 @@ def _poll_interval_generator(_wait_times=None):
def __repr__(self):
"""Formal representation of the lock."""
rep = "{0}(".format(self.__class__.__name__)
rep = '{0}('.format(self.__class__.__name__)
for attr, value in self.__dict__.items():
rep += "{0}={1}, ".format(attr, value.__repr__())
return "{0})".format(rep.strip(", "))
rep += '{0}={1}, '.format(attr, value.__repr__())
return '{0})'.format(rep.strip(', '))
def __str__(self):
"""Readable string (with key fields) of the lock."""
location = "{0}[{1}:{2}]".format(self.path, self._start, self._length)
timeout = "timeout={0}".format(self.default_timeout)
activity = "#reads={0}, #writes={1}".format(self._reads, self._writes)
return "({0}, {1}, {2})".format(location, timeout, activity)
location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
timeout = 'timeout={0}'.format(self.default_timeout)
activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
return '({0}, {1}, {2})'.format(location, timeout, activity)
def _lock(self, op, timeout=None):
"""This takes a lock using POSIX locks (``fcntl.lockf``).
@@ -304,7 +305,7 @@ def _lock(self, op, timeout=None):
assert LockType.is_valid(op)
op_str = LockType.to_str(op)
self._log_acquiring("{0} LOCK".format(op_str))
self._log_acquiring('{0} LOCK'.format(op_str))
timeout = timeout or self.default_timeout
# Create file and parent directories if they don't exist.
@@ -312,16 +313,14 @@ def _lock(self, op, timeout=None):
self._ensure_parent_directory()
self._file = file_tracker.get_fh(self.path)
if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == "r":
if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == 'r':
# Attempt to upgrade to write lock w/a read-only file.
# If the file were writable, we'd have opened it 'r+'
raise LockROFileError(self.path)
self._log_debug(
"{0} locking [{1}:{2}]: timeout {3} sec".format(
op_str.lower(), self._start, self._length, timeout
)
)
self._log_debug("{0} locking [{1}:{2}]: timeout {3} sec"
.format(op_str.lower(), self._start, self._length,
timeout))
poll_intervals = iter(Lock._poll_interval_generator())
start_time = time.time()
@@ -340,7 +339,8 @@ def _lock(self, op, timeout=None):
total_wait_time = time.time() - start_time
return total_wait_time, num_attempts
raise LockTimeoutError("Timed out waiting for a {0} lock.".format(op_str.lower()))
raise LockTimeoutError("Timed out waiting for a {0} lock."
.format(op_str.lower()))
def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether
@@ -349,19 +349,16 @@ def _poll_lock(self, op):
module_op = LockType.to_module(op)
try:
# Try to get the lock (will raise if not available.)
fcntl.lockf(
self._file, module_op | fcntl.LOCK_NB, self._length, self._start, os.SEEK_SET
)
fcntl.lockf(self._file, module_op | fcntl.LOCK_NB,
self._length, self._start, os.SEEK_SET)
# help for debugging distributed locking
if self.debug:
# All locks read the owner PID and host
self._read_log_debug_data()
self._log_debug(
"{0} locked {1} [{2}:{3}] (owner={4})".format(
LockType.to_str(op), self.path, self._start, self._length, self.pid
)
)
self._log_debug('{0} locked {1} [{2}:{3}] (owner={4})'
.format(LockType.to_str(op), self.path,
self._start, self._length, self.pid))
# Exclusive locks write their PID/host
if module_op == fcntl.LOCK_EX:
@@ -381,17 +378,14 @@ def _ensure_parent_directory(self):
# relative paths to lockfiles in the current directory have no parent
if not parent:
return "."
return '.'
try:
os.makedirs(parent)
except OSError as e:
# os.makedirs can fail in a number of ways when the directory already exists.
# With EISDIR, we know it exists, and others like EEXIST, EACCES, and EROFS
# are fine if we ensure that the directory exists.
# Python 3 allows an exist_ok parameter and ignores any OSError as long as
# the directory exists.
if not (e.errno == errno.EISDIR or os.path.isdir(parent)):
# makedirs can fail when diretory already exists.
if not (e.errno == errno.EEXIST and os.path.isdir(parent) or
e.errno == errno.EISDIR):
raise
return parent
@@ -402,9 +396,9 @@ def _read_log_debug_data(self):
line = self._file.read()
if line:
pid, host = line.strip().split(",")
_, _, self.pid = pid.rpartition("=")
_, _, self.host = host.rpartition("=")
pid, host = line.strip().split(',')
_, _, self.pid = pid.rpartition('=')
_, _, self.host = host.rpartition('=')
self.pid = int(self.pid)
def _write_log_debug_data(self):
@@ -429,7 +423,8 @@ def _unlock(self):
be masquerading as write locks, but this removes either.
"""
fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET)
fcntl.lockf(self._file, fcntl.LOCK_UN,
self._length, self._start, os.SEEK_SET)
file_tracker.release_fh(self.path)
self._file = None
@@ -454,7 +449,7 @@ def acquire_read(self, timeout=None):
wait_time, nattempts = self._lock(LockType.READ, timeout=timeout)
self._reads += 1
# Log if acquired, which includes counts when verbose
self._log_acquired("READ LOCK", wait_time, nattempts)
self._log_acquired('READ LOCK', wait_time, nattempts)
return True
else:
# Increment the read count for nested lock tracking
@@ -479,7 +474,7 @@ def acquire_write(self, timeout=None):
wait_time, nattempts = self._lock(LockType.WRITE, timeout=timeout)
self._writes += 1
# Log if acquired, which includes counts when verbose
self._log_acquired("WRITE LOCK", wait_time, nattempts)
self._log_acquired('WRITE LOCK', wait_time, nattempts)
# return True only if we weren't nested in a read lock.
# TODO: we may need to return two values: whether we got
@@ -566,7 +561,7 @@ def release_read(self, release_fn=None):
"""
assert self._reads > 0
locktype = "READ LOCK"
locktype = 'READ LOCK'
if self._reads == 1 and self._writes == 0:
self._log_releasing(locktype)
@@ -574,7 +569,7 @@ def release_read(self, release_fn=None):
release_fn = release_fn or true_fn
result = release_fn()
self._unlock() # can raise LockError.
self._unlock() # can raise LockError.
self._reads = 0
self._log_released(locktype)
return result
@@ -602,14 +597,14 @@ def release_write(self, release_fn=None):
assert self._writes > 0
release_fn = release_fn or true_fn
locktype = "WRITE LOCK"
locktype = 'WRITE LOCK'
if self._writes == 1 and self._reads == 0:
self._log_releasing(locktype)
# we need to call release_fn before releasing the lock
result = release_fn()
self._unlock() # can raise LockError.
self._unlock() # can raise LockError.
self._writes = 0
self._log_released(locktype)
return result
@@ -630,55 +625,56 @@ def cleanup(self):
raise LockError("Attempting to cleanup active lock.")
def _get_counts_desc(self):
return (
"(reads {0}, writes {1})".format(self._reads, self._writes) if tty.is_verbose() else ""
)
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
if tty.is_verbose() else ''
def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Acquired at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, "{0}{1}".format(desc, attempts_part)))
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, '{0}{1}'
.format(desc, attempts_part)))
def _log_acquiring(self, locktype):
self._log_debug(self._status_msg(locktype, "Acquiring"), level=3)
self._log_debug(self._status_msg(locktype, 'Acquiring'), level=3)
def _log_debug(self, *args, **kwargs):
"""Output lock debug messages."""
kwargs["level"] = kwargs.get("level", 2)
kwargs['level'] = kwargs.get('level', 2)
tty.debug(*args, **kwargs)
def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Downgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("READ LOCK", "{0}{1}".format(desc, attempts_part)))
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
def _log_downgrading(self):
self._log_debug(self._status_msg("WRITE LOCK", "Downgrading"), level=3)
self._log_debug(self._status_msg('WRITE LOCK', 'Downgrading'), level=3)
def _log_released(self, locktype):
now = datetime.now()
desc = "Released at %s" % now.strftime("%H:%M:%S.%f")
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype):
self._log_debug(self._status_msg(locktype, "Releasing"), level=3)
self._log_debug(self._status_msg(locktype, 'Releasing'), level=3)
def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = "Upgraded at %s" % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("WRITE LOCK", "{0}{1}".format(desc, attempts_part)))
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
def _log_upgrading(self):
self._log_debug(self._status_msg("READ LOCK", "Upgrading"), level=3)
self._log_debug(self._status_msg('READ LOCK', 'Upgrading'), level=3)
def _status_msg(self, locktype, status):
status_desc = "[{0}] {1}".format(status, self._get_counts_desc())
return "{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}".format(
locktype, self, status_desc
)
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
locktype, self, status_desc)
class LockTransaction(object):
@@ -719,7 +715,7 @@ def __init__(self, lock, acquire=None, release=None, timeout=None):
def __enter__(self):
if self._enter() and self._acquire_fn:
self._as = self._acquire_fn()
if hasattr(self._as, "__enter__"):
if hasattr(self._as, '__enter__'):
return self._as.__enter__()
else:
return self._as
@@ -731,7 +727,7 @@ def release_fn():
if self._release_fn is not None:
return self._release_fn(type, value, traceback)
if self._as and hasattr(self._as, "__exit__"):
if self._as and hasattr(self._as, '__exit__'):
if self._as.__exit__(type, value, traceback):
suppress = True
@@ -743,7 +739,6 @@ def release_fn():
class ReadTransaction(LockTransaction):
"""LockTransaction context manager that does a read and releases it."""
def _enter(self):
return self._lock.acquire_read(self._timeout)
@@ -753,7 +748,6 @@ def _exit(self, release_fn):
class WriteTransaction(LockTransaction):
"""LockTransaction context manager that does a write and releases it."""
def _enter(self):
return self._lock.acquire_write(self._timeout)
@@ -767,7 +761,6 @@ class LockError(Exception):
class LockDowngradeError(LockError):
"""Raised when unable to downgrade from a write to a read lock."""
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super(LockDowngradeError, self).__init__(msg)
@@ -783,7 +776,6 @@ class LockTimeoutError(LockError):
class LockUpgradeError(LockError):
"""Raised when unable to upgrade from a read to a write lock."""
def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path
super(LockUpgradeError, self).__init__(msg)
@@ -795,7 +787,6 @@ class LockPermissionError(LockError):
class LockROFileError(LockPermissionError):
"""Tried to take an exclusive lock on a read-only file."""
def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path
super(LockROFileError, self).__init__(msg)
@@ -803,7 +794,6 @@ def __init__(self, path):
class CantCreateLockError(LockPermissionError):
"""Attempt to create a lock in an unwritable location."""
def __init__(self, path):
msg = "cannot create lock '%s': " % path
msg += "file does not exist and location is not writable"

View File

@@ -10,7 +10,7 @@
"""
from multiprocessing import Semaphore, Value
__all__ = ["Barrier"]
__all__ = ['Barrier']
class Barrier:
@@ -24,7 +24,7 @@ class Barrier:
def __init__(self, n, timeout=None):
self.n = n
self.to = timeout
self.count = Value("i", 0)
self.count = Value('i', 0)
self.mutex = Semaphore(1)
self.turnstile1 = Semaphore(0)
self.turnstile2 = Semaphore(1)

View File

@@ -11,7 +11,7 @@
from llnl.util import lang
is_windows = _platform == "win32"
is_windows = _platform == 'win32'
if is_windows:
from win32file import CreateHardLink
@@ -47,7 +47,7 @@ def _win32_junction(path, link):
# os.symlink will fail if link exists, emulate the behavior here
if exists(link):
raise OSError(errno.EEXIST, "File exists: %s -> %s" % (link, path))
raise OSError(errno.EEXIST, 'File exists: %s -> %s' % (link, path))
if not os.path.isabs(path):
parent = os.path.join(link, os.pardir)
@@ -61,14 +61,13 @@ def _win32_junction(path, link):
def _win32_can_symlink():
tempdir = tempfile.mkdtemp()
dpath = join(tempdir, "dpath")
fpath = join(tempdir, "fpath.txt")
dpath = join(tempdir, 'dpath')
fpath = join(tempdir, 'fpath.txt')
dlink = join(tempdir, "dlink")
flink = join(tempdir, "flink.txt")
dlink = join(tempdir, 'dlink')
flink = join(tempdir, 'flink.txt')
import llnl.util.filesystem as fs
fs.touchp(fpath)
try:
@@ -107,6 +106,7 @@ def _win32_is_junction(path):
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
res = GetFileAttributes(path)
return res != INVALID_FILE_ATTRIBUTES and bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
return res != INVALID_FILE_ATTRIBUTES and \
bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
return False

View File

@@ -54,7 +54,7 @@ def is_stacktrace():
def set_debug(level=0):
global _debug
assert level >= 0, "Debug level must be a positive value"
assert level >= 0, 'Debug level must be a positive value'
_debug = level
@@ -110,7 +110,10 @@ def output_filter(filter_fn):
class SuppressOutput:
"""Class for disabling output in a scope using 'with' keyword"""
def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True):
def __init__(self,
msg_enabled=True,
warn_enabled=True,
error_enabled=True):
self._msg_enabled_initial = _msg_enabled
self._warn_enabled_initial = _warn_enabled
@@ -161,10 +164,11 @@ def get_timestamp(force=False):
"""Get a string timestamp"""
if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds.
pid = ", {0}".format(os.getpid()) if show_pid() else ""
return "[{0}{1}] ".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
pid = ', {0}'.format(os.getpid()) if show_pid() else ''
return '[{0}{1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
else:
return ""
return ''
def msg(message, *args, **kwargs):
@@ -174,14 +178,26 @@ def msg(message, *args, **kwargs):
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
newline = kwargs.get("newline", True)
newline = kwargs.get('newline', True)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(2)
if newline:
cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cprint(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cwrite(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
for arg in args:
print(indent + _output_filter(six.text_type(arg)))
@@ -190,19 +206,23 @@ def info(message, *args, **kwargs):
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
format = kwargs.get("format", "*b")
stream = kwargs.get("stream", sys.stdout)
wrap = kwargs.get("wrap", False)
break_long_words = kwargs.get("break_long_words", False)
st_countback = kwargs.get("countback", 3)
format = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
break_long_words = kwargs.get('break_long_words', False)
st_countback = kwargs.get('countback', 3)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(st_countback)
cprint(
"@%s{%s==>} %s%s"
% (format, st_text, get_timestamp(), cescape(_output_filter(six.text_type(message)))),
stream=stream,
"@%s{%s==>} %s%s" % (
format,
st_text,
get_timestamp(),
cescape(_output_filter(six.text_type(message)))
),
stream=stream
)
for arg in args:
if wrap:
@@ -210,25 +230,27 @@ def info(message, *args, **kwargs):
_output_filter(six.text_type(arg)),
initial_indent=indent,
subsequent_indent=indent,
break_long_words=break_long_words,
break_long_words=break_long_words
)
for line in lines:
stream.write(line + "\n")
stream.write(line + '\n')
else:
stream.write(indent + _output_filter(six.text_type(arg)) + "\n")
stream.write(
indent + _output_filter(six.text_type(arg)) + '\n'
)
def verbose(message, *args, **kwargs):
if _verbose:
kwargs.setdefault("format", "c")
kwargs.setdefault('format', 'c')
info(message, *args, **kwargs)
def debug(message, *args, **kwargs):
level = kwargs.get("level", 1)
level = kwargs.get('level', 1)
if is_debug(level):
kwargs.setdefault("format", "g")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)
@@ -236,8 +258,8 @@ def error(message, *args, **kwargs):
if not error_enabled():
return
kwargs.setdefault("format", "*r")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', '*r')
kwargs.setdefault('stream', sys.stderr)
info("Error: " + six.text_type(message), *args, **kwargs)
@@ -245,27 +267,27 @@ def warn(message, *args, **kwargs):
if not warn_enabled():
return
kwargs.setdefault("format", "*Y")
kwargs.setdefault("stream", sys.stderr)
kwargs.setdefault('format', '*Y')
kwargs.setdefault('stream', sys.stderr)
info("Warning: " + six.text_type(message), *args, **kwargs)
def die(message, *args, **kwargs):
kwargs.setdefault("countback", 4)
kwargs.setdefault('countback', 4)
error(message, *args, **kwargs)
sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
default = kwargs.get('default', None)
abort = kwargs.get('abort', None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
prompt += ' (default is %s) ' % default
elif abort is not None:
prompt += " (%s to abort) " % abort
prompt += ' (%s to abort) ' % abort
number = None
while number is None:
@@ -288,16 +310,17 @@ def get_number(prompt, **kwargs):
def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None)
default_value = kwargs.get('default', None)
if default_value is None:
prompt += " [y/n] "
prompt += ' [y/n] '
elif default_value is True:
prompt += " [Y/n] "
prompt += ' [Y/n] '
elif default_value is False:
prompt += " [y/N] "
prompt += ' [y/N] '
else:
raise ValueError("default for get_yes_no() must be True, False, or None.")
raise ValueError(
"default for get_yes_no() must be True, False, or None.")
result = None
while result is None:
@@ -308,9 +331,9 @@ def get_yes_or_no(prompt, **kwargs):
if result is None:
print("Please enter yes or no.")
else:
if ans == "y" or ans == "yes":
if ans == 'y' or ans == 'yes':
result = True
elif ans == "n" or ans == "no":
elif ans == 'n' or ans == 'no':
result = False
return result
@@ -322,12 +345,12 @@ def hline(label=None, **kwargs):
char (str): Char to draw the line with. Default '-'
max_width (int): Maximum width of the line. Default is 64 chars.
"""
char = kwargs.pop("char", "-")
max_width = kwargs.pop("max_width", 64)
char = kwargs.pop('char', '-')
max_width = kwargs.pop('max_width', 64)
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(kwargs.iterkeys())
)
"'%s' is an invalid keyword argument for this function."
% next(kwargs.iterkeys()))
rows, cols = terminal_size()
if not cols:
@@ -351,14 +374,13 @@ def hline(label=None, **kwargs):
def terminal_size():
"""Gets the dimensions of the console: (rows, cols)."""
if _platform != "win32":
def ioctl_gwinsz(fd):
try:
rc = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
rc = struct.unpack('hh', fcntl.ioctl(
fd, termios.TIOCGWINSZ, '1234'))
except BaseException:
return
return rc
rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not rc:
try:
@@ -368,14 +390,12 @@ def ioctl_gwinsz(fd):
except BaseException:
pass
if not rc:
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1])
else:
if sys.version_info[0] < 3:
raise RuntimeError(
"Terminal size not obtainable on Windows with a\
Python version older than 3"
)
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
raise RuntimeError("Terminal size not obtainable on Windows with a\
Python version older than 3")
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1])

View File

@@ -18,27 +18,29 @@
class ColumnConfig:
def __init__(self, cols):
self.cols = cols
self.line_length = 0
self.valid = True
self.widths = [0] * cols # does not include ansi colors
self.widths = [0] * cols # does not include ansi colors
def __repr__(self):
attrs = [(a, getattr(self, a)) for a in dir(self) if not a.startswith("__")]
attrs = [(a, getattr(self, a))
for a in dir(self) if not a.startswith("__")]
return "<Config: %s>" % ", ".join("%s: %r" % a for a in attrs)
def config_variable_cols(elts, console_width, padding, cols=0):
"""Variable-width column fitting algorithm.
This function determines the most columns that can fit in the
screen width. Unlike uniform fitting, where all columns take
the width of the longest element in the list, each column takes
the width of its own longest element. This packs elements more
efficiently on screen.
This function determines the most columns that can fit in the
screen width. Unlike uniform fitting, where all columns take
the width of the longest element in the list, each column takes
the width of its own longest element. This packs elements more
efficiently on screen.
If cols is nonzero, force
If cols is nonzero, force
"""
if cols < 0:
raise ValueError("cols must be non-negative.")
@@ -62,8 +64,8 @@ def config_variable_cols(elts, console_width, padding, cols=0):
if conf.widths[col] < (length + p):
conf.line_length += length + p - conf.widths[col]
conf.widths[col] = length + p
conf.valid = conf.line_length < console_width
conf.widths[col] = length + p
conf.valid = (conf.line_length < console_width)
try:
config = next(conf for conf in reversed(configs) if conf.valid)
@@ -79,9 +81,9 @@ def config_variable_cols(elts, console_width, padding, cols=0):
def config_uniform_cols(elts, console_width, padding, cols=0):
"""Uniform-width column fitting algorithm.
Determines the longest element in the list, and determines how
many columns of that width will fit on screen. Returns a
corresponding column config.
Determines the longest element in the list, and determines how
many columns of that width will fit on screen. Returns a
corresponding column config.
"""
if cols < 0:
raise ValueError("cols must be non-negative.")
@@ -120,18 +122,18 @@ def colify(elts, **options):
and fit less data on the screen
"""
# Get keyword arguments or set defaults
cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0)
padding = options.pop("padding", 2)
tty = options.pop("tty", None)
method = options.pop("method", "variable")
cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0)
padding = options.pop("padding", 2)
tty = options.pop('tty', None)
method = options.pop("method", "variable")
console_cols = options.pop("width", None)
if options:
raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(options.iterkeys())
)
"'%s' is an invalid keyword argument for this function."
% next(options.iterkeys()))
# elts needs to be an array of strings so we can count the elements
elts = [text_type(elt) for elt in elts]
@@ -139,10 +141,10 @@ def colify(elts, **options):
return (0, ())
# environment size is of the form "<rows>x<cols>"
env_size = os.environ.get("COLIFY_SIZE")
env_size = os.environ.get('COLIFY_SIZE')
if env_size:
try:
r, c = env_size.split("x")
r, c = env_size.split('x')
console_rows, console_cols = int(r), int(c)
tty = True
except BaseException:
@@ -178,7 +180,7 @@ def colify(elts, **options):
elt = col * rows + row
width = config.widths[col] + cextra(elts[elt])
if col < cols - 1:
fmt = "%%-%ds" % width
fmt = '%%-%ds' % width
output.write(fmt % elts[elt])
else:
# Don't pad the rightmost column (sapces can wrap on
@@ -196,15 +198,15 @@ def colify(elts, **options):
def colify_table(table, **options):
"""Version of ``colify()`` for data expressed in rows, (list of lists).
Same as regular colify but:
Same as regular colify but:
1. This takes a list of lists, where each sub-list must be the
same length, and each is interpreted as a row in a table.
Regular colify displays a sequential list of values in columns.
1. This takes a list of lists, where each sub-list must be the
same length, and each is interpreted as a row in a table.
Regular colify displays a sequential list of values in columns.
2. Regular colify will always print with 1 column when the output
is not a tty. This will always print with same dimensions of
the table argument.
2. Regular colify will always print with 1 column when the output
is not a tty. This will always print with same dimensions of
the table argument.
"""
if table is None:
@@ -219,20 +221,20 @@ def transpose():
for row in table:
yield row[i]
if "cols" in options:
if 'cols' in options:
raise ValueError("Cannot override columsn in colify_table.")
options["cols"] = columns
options['cols'] = columns
# don't reduce to 1 column for non-tty
options["tty"] = True
options['tty'] = True
colify(transpose(), **options)
def colified(elts, **options):
"""Invokes the ``colify()`` function but returns the result as a string
instead of writing it to an output string."""
instead of writing it to an output string."""
sio = StringIO()
options["output"] = sio
options['output'] = sio
colify(elts, **options)
return sio.getvalue()

View File

@@ -76,33 +76,29 @@ def __init__(self, message):
# Text styles for ansi codes
styles = {"*": "1", "_": "4", None: "0"} # bold # underline # plain
styles = {'*': '1', # bold
'_': '4', # underline
None: '0'} # plain
# Dim and bright ansi colors
colors = {
"k": 30,
"K": 90, # black
"r": 31,
"R": 91, # red
"g": 32,
"G": 92, # green
"y": 33,
"Y": 93, # yellow
"b": 34,
"B": 94, # blue
"m": 35,
"M": 95, # magenta
"c": 36,
"C": 96, # cyan
"w": 37,
"W": 97,
} # white
colors = {'k': 30, 'K': 90, # black
'r': 31, 'R': 91, # red
'g': 32, 'G': 92, # green
'y': 33, 'Y': 93, # yellow
'b': 34, 'B': 94, # blue
'm': 35, 'M': 95, # magenta
'c': 36, 'C': 96, # cyan
'w': 37, 'W': 97} # white
# Regex to be used for color formatting
color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)"
color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)'
# Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False}
color_when_values = {
'always': True,
'auto': None,
'never': False
}
# Force color; None: Only color if stdout is a tty
# True: Always colorize output, False: Never colorize output
@@ -118,7 +114,7 @@ def _color_when_value(when):
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
raise ValueError('Invalid color setting: %s' % when)
return when
@@ -150,19 +146,16 @@ def color_when(value):
class match_to_ansi(object):
def __init__(self, color=True, enclose=False):
def __init__(self, color=True):
self.color = _color_when_value(color)
self.enclose = enclose
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.enclose:
return r"\[\033[%sm\]" % s
else:
return "\033[%sm" % s
return "\033[%sm" % s
else:
return ""
return ''
def __call__(self, match):
"""Convert a match object generated by ``color_re`` into an ansi
@@ -171,22 +164,22 @@ def __call__(self, match):
style, color, text = match.groups()
m = match.group(0)
if m == "@@":
return "@"
elif m == "@.":
if m == '@@':
return '@'
elif m == '@.':
return self.escape(0)
elif m == "@":
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string))
elif m == '@':
raise ColorParseError("Incomplete color format: '%s' in %s"
% (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError(
"Invalid color specifier: '%s' in '%s'" % (color, match.string)
)
string += ";" + str(colors[color])
raise ColorParseError("Invalid color specifier: '%s' in '%s'"
% (color, match.string))
string += ';' + str(colors[color])
colored_text = ""
colored_text = ''
if text:
colored_text = text + self.escape(0)
@@ -205,31 +198,29 @@ def colorize(string, **kwargs):
Keyword Arguments:
color (bool): If False, output will be plain text without control
codes, for output to non-console devices.
enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width.
"""
color = _color_when_value(kwargs.get("color", get_color_when()))
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string)
string = string.replace("}}", "}")
color = _color_when_value(kwargs.get('color', get_color_when()))
string = re.sub(color_re, match_to_ansi(color), string)
string = string.replace('}}', '}')
return string
def clen(string):
"""Return the length of a string, excluding ansi color sequences."""
return len(re.sub(r"\033[^m]*m", "", string))
return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string):
"""Length of extra color characters in a string"""
return len("".join(re.findall(r"\033[^m]*m", string)))
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=None, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
codes and write the result to the stream. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
"""
stream = sys.stdout if stream is None else stream
if color is None:
@@ -260,19 +251,20 @@ def cescape(string):
(str): the string with color codes escaped
"""
string = six.text_type(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
string = string.replace('@', '@@')
string = string.replace('}', '}}')
return string
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color
def write(self, string, **kwargs):
raw = kwargs.get("raw", False)
raw_write = getattr(self._stream, "write")
raw = kwargs.get('raw', False)
raw_write = getattr(self._stream, 'write')
color = self._color
if self._color is None:
@@ -283,6 +275,6 @@ def write(self, string, **kwargs):
raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs):
raw = kwargs.get("raw", False)
raw = kwargs.get('raw', False)
for string in sequence:
self.write(string, self.color, raw=raw)

View File

@@ -31,22 +31,21 @@
termios = None # type: Optional[ModuleType]
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
# Use this to strip escape sequences
_escape = re.compile(r"\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07")
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07')
# control characters for enabling/disabling echo
#
# We use control characters to ensure that echo enable/disable are inline
# with the other output. We always follow these with a newline to ensure
# one per line the following newline is ignored in output.
xon, xoff = "\x11\n", "\x13\n"
control = re.compile("(\x11\n|\x13\n)")
xon, xoff = '\x11\n', '\x13\n'
control = re.compile('(\x11\n|\x13\n)')
@contextmanager
@@ -60,13 +59,17 @@ def ignore_signal(signum):
def _is_background_tty(stream):
"""True if the stream is a tty and calling process is in the background."""
return stream.isatty() and os.getpgrp() != os.tcgetpgrp(stream.fileno())
"""True if the stream is a tty and calling process is in the background.
"""
return (
stream.isatty() and
os.getpgrp() != os.tcgetpgrp(stream.fileno())
)
def _strip(line):
"""Strip color and control characters from a line."""
return _escape.sub("", line)
return _escape.sub('', line)
class keyboard_input(object):
@@ -144,7 +147,6 @@ class keyboard_input(object):
a TTY, ``keyboard_input`` has no effect.
"""
def __init__(self, stream):
"""Create a context manager that will enable keyboard input on stream.
@@ -202,7 +204,7 @@ def check_fg_bg(self):
bg = self._is_background()
# restore sanity if flags are amiss -- see diagram in class docs
if not bg and any(flags): # fg, but input not enabled
if not bg and any(flags): # fg, but input not enabled
self._enable_keyboard_input()
elif bg and not all(flags): # bg, but input enabled
self._restore_default_terminal_settings()
@@ -226,7 +228,8 @@ def __enter__(self):
# Install a signal handler to disable/enable keyboard input
# when the process moves between foreground and background.
self.old_handlers[signal.SIGTSTP] = signal.signal(signal.SIGTSTP, self._tstp_handler)
self.old_handlers[signal.SIGTSTP] = signal.signal(
signal.SIGTSTP, self._tstp_handler)
# add an atexit handler to ensure the terminal is restored
atexit.register(self._restore_default_terminal_settings)
@@ -255,7 +258,6 @@ class Unbuffered(object):
This is implemented by forcing a flush after each write.
"""
def __init__(self, stream):
self.stream = stream
@@ -300,7 +302,6 @@ class FileWrapper(object):
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
"""
def __init__(self, file_like):
# This records whether the file-like object returned by "unwrap" is
# purely in-memory. In that case a subprocess will need to explicitly
@@ -324,9 +325,9 @@ def unwrap(self):
if self.open:
if self.file_like:
if sys.version_info < (3,):
self.file = open(self.file_like, "w")
self.file = open(self.file_like, 'w')
else:
self.file = open(self.file_like, "w", encoding="utf-8") # novm
self.file = open(self.file_like, 'w', encoding='utf-8') # novm
else:
self.file = StringIO()
return self.file
@@ -342,9 +343,8 @@ def close(self):
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
@@ -434,7 +434,7 @@ def log_output(*args, **kwargs):
This method is actually a factory serving a per platform
(unix vs windows) log_output class
"""
if sys.platform == "win32":
if sys.platform == 'win32':
return winlog(*args, **kwargs)
else:
return nixlog(*args, **kwargs)
@@ -454,9 +454,8 @@ class nixlog(object):
work within test frameworks like nose and pytest.
"""
def __init__(
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
):
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
env=None, filter_fn=None):
"""Create a new output log context manager.
Args:
@@ -525,7 +524,8 @@ def __enter__(self):
raise RuntimeError("Can't re-enter the same log_output!")
if self.file_like is None:
raise RuntimeError("file argument must be set by either __init__ or __call__")
raise RuntimeError(
"file argument must be set by either __init__ or __call__")
# set up a stream for the daemon to write to
self.log_file = FileWrapper(self.file_like)
@@ -555,7 +555,9 @@ def __enter__(self):
input_multiprocess_fd = None
try:
if sys.stdin.isatty():
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
input_multiprocess_fd = MultiProcessFd(
os.dup(sys.stdin.fileno())
)
except BaseException:
# just don't forward input if this fails
pass
@@ -564,14 +566,9 @@ def __enter__(self):
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_multiprocess_fd,
read_multiprocess_fd,
write_fd,
self.echo,
self.log_file,
child_pipe,
self.filter_fn,
),
input_multiprocess_fd, read_multiprocess_fd, write_fd,
self.echo, self.log_file, child_pipe, self.filter_fn
)
)
self.process.daemon = True # must set before start()
self.process.start()
@@ -612,7 +609,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(write_fd, "w")
pipe_fd_out = os.fdopen(write_fd, 'w')
sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out
@@ -677,7 +674,8 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def force_echo(self):
"""Context manager to force local echo, even if echo is off."""
if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!")
raise RuntimeError(
"Can't call force_echo() outside log_output region!")
# This uses the xon/xoff to highlight regions to be echoed in the
# output. We us these control characters rather than, say, a
@@ -693,26 +691,25 @@ def force_echo(self):
class StreamWrapper:
"""Wrapper class to handle redirection of io streams"""
""" Wrapper class to handle redirection of io streams """
def __init__(self, sys_attr):
self.sys_attr = sys_attr
self.saved_stream = None
if sys.platform.startswith("win32"):
if sys.platform.startswith('win32'):
if sys.version_info < (3, 5):
libc = ctypes.CDLL(ctypes.util.find_library("c"))
libc = ctypes.CDLL(ctypes.util.find_library('c'))
else:
if hasattr(sys, "gettotalrefcount"): # debug build
libc = ctypes.CDLL("ucrtbased")
if hasattr(sys, 'gettotalrefcount'): # debug build
libc = ctypes.CDLL('ucrtbased')
else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0")
libc = ctypes.CDLL('api-ms-win-crt-stdio-l1-1-0')
kernel32 = ctypes.WinDLL("kernel32")
kernel32 = ctypes.WinDLL('kernel32')
# https://docs.microsoft.com/en-us/windows/console/getstdhandle
if self.sys_attr == "stdout":
if self.sys_attr == 'stdout':
STD_HANDLE = -11
elif self.sys_attr == "stderr":
elif self.sys_attr == 'stderr':
STD_HANDLE = -12
else:
raise KeyError(self.sys_attr)
@@ -731,7 +728,7 @@ def __init__(self, sys_attr):
def redirect_stream(self, to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stream
if sys.platform.startswith("win32"):
if sys.platform.startswith('win32'):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
@@ -742,13 +739,13 @@ def redirect_stream(self, to_fd):
# Make orig_stream_fd point to the same file as to_fd
os.dup2(to_fd, self.orig_stream_fd)
# Set sys_stream to a new stream that points to the redirected fd
new_buffer = open(self.orig_stream_fd, "wb")
new_buffer = open(self.orig_stream_fd, 'wb')
new_stream = io.TextIOWrapper(new_buffer)
setattr(sys, self.sys_attr, new_stream)
self.sys_stream = getattr(sys, self.sys_attr)
def flush(self):
if sys.platform.startswith("win32"):
if sys.platform.startswith('win32'):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
@@ -771,16 +768,14 @@ class winlog(object):
Does not support the use of 'v' toggling as nixlog does.
"""
def __init__(
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
):
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
env=None, filter_fn=None):
self.env = env
self.debug = debug
self.echo = echo
self.logfile = file_like
self.stdout = StreamWrapper("stdout")
self.stderr = StreamWrapper("stderr")
self.stdout = StreamWrapper('stdout')
self.stderr = StreamWrapper('stderr')
self._active = False
self._ioflag = False
self.old_stdout = sys.stdout
@@ -791,7 +786,8 @@ def __enter__(self):
raise RuntimeError("Can't re-enter the same log_output!")
if self.logfile is None:
raise RuntimeError("file argument must be set by __init__ ")
raise RuntimeError(
"file argument must be set by __init__ ")
# Open both write and reading on logfile
if type(self.logfile) == StringIO:
@@ -800,8 +796,8 @@ def __enter__(self):
sys.stdout = self.logfile
sys.stderr = self.logfile
else:
self.writer = open(self.logfile, mode="wb+")
self.reader = open(self.logfile, mode="rb+")
self.writer = open(self.logfile, mode='wb+')
self.reader = open(self.logfile, mode='rb+')
# Dup stdout so we can still write to it after redirection
self.echo_writer = open(os.dup(sys.stdout.fileno()), "w")
@@ -813,29 +809,24 @@ def __enter__(self):
def background_reader(reader, echo_writer, _kill):
# for each line printed to logfile, read it
# if echo: write line to user
try:
while True:
is_killed = _kill.wait(0.1)
# Flush buffered build output to file
# stdout/err fds refer to log file
self.stderr.flush()
self.stdout.flush()
while True:
is_killed = _kill.wait(.1)
self.stderr.flush()
self.stdout.flush()
line = reader.readline()
while line:
if self.echo:
self.echo_writer.write('{0}'.format(line.decode()))
self.echo_writer.flush()
line = reader.readline()
if self.echo and line:
echo_writer.write("{0}".format(line.decode()))
echo_writer.flush()
if is_killed:
break
finally:
reader.close()
if is_killed:
break
self._active = True
with replace_environment(self.env):
self._thread = Thread(
target=background_reader, args=(self.reader, self.echo_writer, self._kill)
)
self._thread = Thread(target=background_reader,
args=(self.reader, self.echo_writer, self._kill))
self._thread.start()
return self
@@ -846,6 +837,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
self._ioflag = False
else:
self.writer.close()
self.reader.close()
self.echo_writer.flush()
self.stdout.flush()
self.stderr.flush()
@@ -859,19 +851,16 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def force_echo(self):
"""Context manager to force local echo, even if echo is off."""
if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!")
yield
raise RuntimeError(
"Can't call force_echo() outside log_output region!")
try:
yield self
finally:
pass
def _writer_daemon(
stdin_multiprocess_fd,
read_multiprocess_fd,
write_fd,
echo,
log_file_wrapper,
control_pipe,
filter_fn,
):
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
log_file_wrapper, control_pipe, filter_fn):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
@@ -924,16 +913,16 @@ def _writer_daemon(
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != "darwin":
if sys.version_info < (3, 8) or sys.platform != 'darwin':
os.close(write_fd)
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
if sys.version_info < (3,):
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1)
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
else:
# Python 3.x before 3.7 does not open with UTF-8 encoding by default
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
@@ -942,7 +931,7 @@ def _writer_daemon(
# list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output
force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap()
@@ -965,7 +954,7 @@ def _writer_daemon(
# check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN):
try:
if stdin.read(1) == "v":
if stdin.read(1) == 'v':
echo = not echo
except IOError as e:
# If SIGTTIN is ignored, the system gives EIO
@@ -983,14 +972,14 @@ def _writer_daemon(
line = _retry(in_pipe.readline)()
except UnicodeDecodeError:
# installs like --test=root gpgme produce non-UTF8 logs
line = "<line lost: output was not encoded as UTF-8>\n"
line = '<line lost: output was not encoded as UTF-8>\n'
if not line:
return
line_count += 1
# find control characters and strip them.
clean_line, num_controls = control.subn("", line)
clean_line, num_controls = control.subn('', line)
# Echo to stdout if requested or forced.
if echo or force_echo:
@@ -1054,7 +1043,6 @@ def _retry(function):
relevant for this file.
"""
def wrapped(*args, **kwargs):
while True:
try:
@@ -1067,7 +1055,6 @@ def wrapped(*args, **kwargs):
if e.args[0] == errno.EINTR:
continue
raise
return wrapped

View File

@@ -30,7 +30,6 @@
termios = None
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
@@ -43,8 +42,8 @@ class ProcessController(object):
minion) similar to the way a shell would, by sending signals and I/O.
"""
def __init__(self, pid, controller_fd, timeout=1, sleep_time=1e-1, debug=False):
def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
@@ -85,19 +84,18 @@ def get_canon_echo_attrs(self):
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
if self.debug:
sys.stderr.write("------------------------------------------- %s\n" % name)
sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self):
"""Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write(
"canon: %s, echo: %s\n"
% (
"on" if canon else "off",
"on" if echo else "off",
)
)
sys.stderr.write("canon: %s, echo: %s\n" % (
"on" if canon else "off",
"on" if echo else "off",
))
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n")
@@ -139,7 +137,7 @@ def write(self, byte_string):
def wait(self, condition):
start = time.time()
while ((time.time() - start) < self.timeout) and not condition():
while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2)
assert condition()
@@ -221,15 +219,14 @@ def minion_function(**kwargs)
|_________________________________________________________|
"""
def __init__(self, controller_function, minion_function):
self.proc = None
self.controller_function = controller_function
self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 3
self.sleep_time = 0.1
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the controller and minion processes.
@@ -245,12 +242,8 @@ def start(self, **kwargs):
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function,
args=(
self.controller_function,
self.minion_function,
self.controller_timeout,
self.sleep_time,
),
args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
@@ -262,8 +255,7 @@ def join(self):
@staticmethod
def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs
):
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
"""Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
@@ -281,7 +273,8 @@ def _set_up_and_run_minion_function(
os.close(stdin_fd)
if kwargs.get("debug"):
sys.stderr.write("minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
@@ -295,15 +288,15 @@ def _set_up_and_run_minion_function(
@staticmethod
def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout, sleep_time, **kwargs
):
controller_function, minion_function, controller_timeout,
sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
``controller_function``.
"""
os.setsid() # new session; this process is the controller
os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd)
@@ -312,10 +305,11 @@ def _set_up_and_run_controller_function(
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value("i", False)
ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(), ready, minion_function),
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
kwargs=kwargs,
)
minion_process.start()
@@ -335,7 +329,8 @@ def _set_up_and_run_controller_function(
minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write("minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell
@@ -344,8 +339,7 @@ def _set_up_and_run_controller_function(
# call the controller function once the minion is ready
try:
controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug")
)
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs)

View File

@@ -4,10 +4,10 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: (major, minor, micro, dev release) tuple
spack_version_info = (0, 19, 0, "dev0")
spack_version_info = (0, 18, 0, 'dev0')
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
spack_version = ".".join(str(s) for s in spack_version_info)
spack_version = '.'.join(str(s) for s in spack_version_info)
__all__ = ["spack_version_info", "spack_version"]
__all__ = ['spack_version_info', 'spack_version']
__version__ = spack_version

View File

@@ -15,25 +15,21 @@
class ABI(object):
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""
The current implementation is rather rough and could be improved."""
def architecture_compatible(self, target, constraint):
"""Return true if architecture of target spec is ABI compatible
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
as architecture ABI compatible to constraint."""
return (
not target.architecture
or not constraint.architecture
or target.architecture.satisfies(constraint.architecture)
)
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
as architecture ABI compatible to constraint."""
return not target.architecture or not constraint.architecture or \
target.architecture.satisfies(constraint.architecture)
@memoized
def _gcc_get_libstdcxx_version(self, version):
"""Returns gcc ABI compatibility info by getting the library version of
a compiler's libstdc++ or libgcc_s"""
a compiler's libstdc++ or libgcc_s"""
from spack.build_environment import dso_suffix
spec = CompilerSpec("gcc", version)
compilers = spack.compilers.compilers_for_spec(spec)
if not compilers:
@@ -54,7 +50,7 @@ def _gcc_get_libstdcxx_version(self, version):
# Some gcc's are actually clang and don't respond properly to
# --print-file-name (they just print the filename, not the
# full path). Ignore these and expect them to be handled as clang.
if Clang.default_version(rungcc.exe[0]) != "unknown":
if Clang.default_version(rungcc.exe[0]) != 'unknown':
return None
output = rungcc("--print-file-name=%s" % libname, output=str)
@@ -70,7 +66,7 @@ def _gcc_get_libstdcxx_version(self, version):
@memoized
def _gcc_compiler_compare(self, pversion, cversion):
"""Returns true iff the gcc version pversion and cversion
are ABI compatible."""
are ABI compatible."""
plib = self._gcc_get_libstdcxx_version(pversion)
clib = self._gcc_get_libstdcxx_version(cversion)
if not plib or not clib:
@@ -79,10 +75,10 @@ def _gcc_compiler_compare(self, pversion, cversion):
def _intel_compiler_compare(self, pversion, cversion):
"""Returns true iff the intel version pversion and cversion
are ABI compatible"""
are ABI compatible"""
# Test major and minor versions. Ignore build version.
if len(pversion.version) < 2 or len(cversion.version) < 2:
if (len(pversion.version) < 2 or len(cversion.version) < 2):
return False
return pversion.version[:2] == cversion.version[:2]
@@ -95,7 +91,7 @@ def compiler_compatible(self, parent, child, **kwargs):
# Different compiler families are assumed ABI incompatible
return False
if kwargs.get("loose", False):
if kwargs.get('loose', False):
return True
# TODO: Can we move the specialized ABI matching stuff
@@ -106,19 +102,16 @@ def compiler_compatible(self, parent, child, **kwargs):
# Otherwise match on version match.
if pversion.satisfies(cversion):
return True
elif parent.compiler.name == "gcc" and self._gcc_compiler_compare(
pversion, cversion
):
elif (parent.compiler.name == "gcc" and
self._gcc_compiler_compare(pversion, cversion)):
return True
elif parent.compiler.name == "intel" and self._intel_compiler_compare(
pversion, cversion
):
elif (parent.compiler.name == "intel" and
self._intel_compiler_compare(pversion, cversion)):
return True
return False
def compatible(self, target, constraint, **kwargs):
"""Returns true if target spec is ABI compatible to constraint spec"""
loosematch = kwargs.get("loose", False)
return self.architecture_compatible(target, constraint) and self.compiler_compatible(
target, constraint, loose=loosematch
)
loosematch = kwargs.get('loose', False)
return self.architecture_compatible(target, constraint) and \
self.compiler_compatible(target, constraint, loose=loosematch)

View File

@@ -0,0 +1,42 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This package contains code for creating analyzers to extract Application
Binary Interface (ABI) information, along with simple analyses that just load
existing metadata.
"""
from __future__ import absolute_import
import llnl.util.tty as tty
import spack.paths
import spack.util.classes
mod_path = spack.paths.analyzers_path
analyzers = spack.util.classes.list_classes("spack.analyzers", mod_path)
# The base analyzer does not have a name, and cannot do dict comprehension
analyzer_types = {}
for a in analyzers:
if not hasattr(a, "name"):
continue
analyzer_types[a.name] = a
def list_all():
"""A helper function to list all analyzers and their descriptions
"""
for name, analyzer in analyzer_types.items():
print("%-25s: %-35s" % (name, analyzer.description))
def get_analyzer(name):
"""Courtesy function to retrieve an analyzer, and exit on error if it
does not exist.
"""
if name in analyzer_types:
return analyzer_types[name]
tty.die("Analyzer %s does not exist" % name)

View File

@@ -0,0 +1,116 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An analyzer base provides basic functions to run the analysis, save results,
and (optionally) interact with a Spack Monitor
"""
import os
import llnl.util.tty as tty
import spack.config
import spack.hooks
import spack.monitor
import spack.util.path
def get_analyzer_dir(spec, analyzer_dir=None):
"""
Given a spec, return the directory to save analyzer results.
We create the directory if it does not exist. We also check that the
spec has an associated package. An analyzer cannot be run if the spec isn't
associated with a package. If the user provides a custom analyzer_dir,
we use it over checking the config and the default at ~/.spack/analyzers
"""
# An analyzer cannot be run if the spec isn't associated with a package
if not hasattr(spec, "package") or not spec.package:
tty.die("A spec can only be analyzed with an associated package.")
# The top level directory is in the user home, or a custom location
if not analyzer_dir:
analyzer_dir = spack.util.path.canonicalize_path(
spack.config.get('config:analyzers_dir', '~/.spack/analyzers'))
# We follow the same convention as the spec install (this could be better)
package_prefix = os.sep.join(spec.package.prefix.split('/')[-3:])
meta_dir = os.path.join(analyzer_dir, package_prefix)
return meta_dir
class AnalyzerBase(object):
def __init__(self, spec, dirname=None):
"""
Verify that the analyzer has correct metadata.
An Analyzer is intended to run on one spec install, so the spec
with its associated package is required on init. The child analyzer
class should define an init function that super's the init here, and
also check that the analyzer has all dependencies that it
needs. If an analyzer subclass does not have dependencies, it does not
need to define an init. An Analyzer should not be allowed to proceed
if one or more dependencies are missing. The dirname, if defined,
is an optional directory name to save to (instead of the default meta
spack directory).
"""
self.spec = spec
self.dirname = dirname
self.meta_dir = os.path.dirname(spec.package.install_log_path)
for required in ["name", "outfile", "description"]:
if not hasattr(self, required):
tty.die("Please add a %s attribute on the analyzer." % required)
def run(self):
"""
Given a spec with an installed package, run the analyzer on it.
"""
raise NotImplementedError
@property
def output_dir(self):
"""
The full path to the output directory.
This includes the nested analyzer directory structure. This function
does not create anything.
"""
if not hasattr(self, "_output_dir"):
output_dir = get_analyzer_dir(self.spec, self.dirname)
self._output_dir = os.path.join(output_dir, self.name)
return self._output_dir
def save_result(self, result, overwrite=False):
"""
Save a result to the associated spack monitor, if defined.
This function is on the level of the analyzer because it might be
the case that the result is large (appropriate for a single request)
or that the data is organized differently (e.g., more than one
request per result). If an analyzer subclass needs to over-write
this function with a custom save, that is appropriate to do (see abi).
"""
# We maintain the structure in json with the analyzer as key so
# that in the future, we could upload to a monitor server
if result[self.name]:
outfile = os.path.join(self.output_dir, self.outfile)
# Only try to create the results directory if we have a result
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
# Don't overwrite an existing result if overwrite is False
if os.path.exists(outfile) and not overwrite:
tty.info("%s exists and overwrite is False, skipping." % outfile)
else:
tty.info("Writing result to %s" % outfile)
spack.monitor.write_json(result[self.name], outfile)
# This hook runs after a save result
spack.hooks.on_analyzer_save(self.spec.package, result)

View File

@@ -0,0 +1,33 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""A configargs analyzer is a class of analyzer that typically just uploads
already existing metadata about config args from a package spec install
directory."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class ConfigArgs(AnalyzerBase):
name = "config_args"
outfile = "spack-analyzer-config-args.json"
description = "config args loaded from spack-configure-args.txt"
def run(self):
"""
Load the configure-args.txt and save in json.
The run function will find the spack-config-args.txt file in the
package install directory, and read it into a json structure that has
the name of the analyzer as the key.
"""
config_file = os.path.join(self.meta_dir, "spack-configure-args.txt")
return {self.name: spack.monitor.read_file(config_file)}

View File

@@ -0,0 +1,54 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An environment analyzer will read and parse the environment variables
file in the installed package directory, generating a json file that has
an index of key, value pairs for environment variables."""
import os
import llnl.util.tty as tty
from spack.util.environment import EnvironmentModifications
from .analyzer_base import AnalyzerBase
class EnvironmentVariables(AnalyzerBase):
name = "environment_variables"
outfile = "spack-analyzer-environment-variables.json"
description = "environment variables parsed from spack-build-env.txt"
def run(self):
"""
Load, parse, and save spack-build-env.txt to analyzers.
Read in the spack-build-env.txt file from the package install
directory and parse the environment variables into key value pairs.
The result should have the key for the analyzer, the name.
"""
env_file = os.path.join(self.meta_dir, "spack-build-env.txt")
return {self.name: self._read_environment_file(env_file)}
def _read_environment_file(self, filename):
"""
Read and parse the environment file.
Given an environment file, we want to read it, split by semicolons
and new lines, and then parse down to the subset of SPACK_* variables.
We assume that all spack prefix variables are not secrets, and unlike
the install_manifest.json, we don't (at least to start) parse the values
to remove path prefixes specific to user systems.
"""
if not os.path.exists(filename):
tty.warn("No environment file available")
return
mods = EnvironmentModifications.from_sourcing_file(filename)
env = {}
mods.apply_modifications(env)
return env

View File

@@ -0,0 +1,31 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The install files json file (install_manifest.json) already exists in
the package install folder, so this analyzer simply moves it to the user
analyzer folder for further processing."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class InstallFiles(AnalyzerBase):
name = "install_files"
outfile = "spack-analyzer-install-files.json"
description = "install file listing read from install_manifest.json"
def run(self):
"""
Load in the install_manifest.json and save to analyzers.
We write it out to the analyzers folder, with key as the analyzer name.
"""
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
return {self.name: spack.monitor.read_json(manifest_file)}

View File

@@ -0,0 +1,114 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import llnl.util.tty as tty
import spack
import spack.binary_distribution
import spack.bootstrap
import spack.error
import spack.hooks
import spack.monitor
import spack.package
import spack.repo
import spack.util.executable
from .analyzer_base import AnalyzerBase
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
def __init__(self, spec, dirname=None):
"""
init for an analyzer ensures we have all needed dependencies.
For the libabigail analyzer, this means Libabigail.
Since the output for libabigail is one file per object, we communicate
with the monitor multiple times.
"""
super(Libabigail, self).__init__(spec, dirname)
# This doesn't seem to work to import on the module level
tty.debug("Preparing to use Libabigail, will install if missing.")
with spack.bootstrap.ensure_bootstrap_configuration():
# libabigail won't install lib/bin/share without docs
spec = spack.spec.Spec("libabigail+docs")
spack.bootstrap.ensure_executables_in_path_or_raise(
["abidw"], abstract_spec=spec
)
self.abidw = spack.util.executable.which('abidw')
def run(self):
"""
Run libabigail, and save results to filename.
This run function differs in that we write as we generate and then
return a dict with the analyzer name as the key, and the value of a
dict of results, where the key is the object name, and the value is
the output file written to.
"""
manifest = spack.binary_distribution.get_buildfile_manifest(self.spec)
# This result will store a path to each file
result = {}
# Generate an output file for each binary or object
for obj in manifest.get("binary_to_relocate_fullpath", []):
# We want to preserve the path in the install directory in case
# a library has an equivalenly named lib or executable, for example
outdir = os.path.dirname(obj.replace(self.spec.package.prefix,
'').strip(os.path.sep))
outfile = "spack-analyzer-libabigail-%s.xml" % os.path.basename(obj)
outfile = os.path.join(self.output_dir, outdir, outfile)
outdir = os.path.dirname(outfile)
# Create the output directory
if not os.path.exists(outdir):
os.makedirs(outdir)
# Sometimes libabigail segfaults and dumps
try:
self.abidw(obj, "--out-file", outfile)
result[obj] = outfile
tty.info("Writing result to %s" % outfile)
except spack.error.SpackError:
tty.warn("Issue running abidw for %s" % obj)
return {self.name: result}
def save_result(self, result, overwrite=False):
"""
Read saved ABI results and upload to monitor server.
ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not spack.monitor.cli:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
spack.hooks.on_analyzer_save(self.spec.package, {"libabigail": [data]})

View File

@@ -35,11 +35,8 @@ def _search_duplicate_compilers(error_cls):
the decorator object, that will forward the keyword arguments passed
as input.
"""
import ast
import collections
import inspect
import itertools
import pickle
import re
from six.moves.urllib.request import urlopen
@@ -51,7 +48,6 @@ def _search_duplicate_compilers(error_cls):
import spack.patch
import spack.repo
import spack.spec
import spack.util.crypto
import spack.variant
#: Map an audit tag to a list of callables implementing checks
@@ -63,13 +59,14 @@ def _search_duplicate_compilers(error_cls):
class Error(object):
"""Information on an error reported in a test."""
def __init__(self, summary, details):
self.summary = summary
self.details = tuple(details)
def __str__(self):
return self.summary + "\n" + "\n".join([" " + detail for detail in self.details])
return self.summary + '\n' + '\n'.join([
' ' + detail for detail in self.details
])
def __eq__(self, other):
if self.summary != other.summary or self.details != other.details:
@@ -121,11 +118,11 @@ def __len__(self):
def run(self, **kwargs):
msg = 'please pass "{0}" as keyword arguments'
msg = msg.format(", ".join(self.kwargs))
msg = msg.format(', '.join(self.kwargs))
assert set(self.kwargs) == set(kwargs), msg
errors = []
kwargs["error_cls"] = Error
kwargs['error_cls'] = Error
for fn in self.callbacks:
errors.extend(fn(**kwargs))
@@ -167,16 +164,19 @@ def run_check(tag, **kwargs):
# TODO: https://github.com/spack/spack/pull/23053/files#r630265011
#: Generic checks relying on global state
generic = AuditClass(
group="generic",
tag="GENERIC",
description="Generic checks relying on global variables",
kwargs=(),
group='generic',
tag='GENERIC',
description='Generic checks relying on global variables',
kwargs=()
)
#: Sanity checks on compilers.yaml
config_compiler = AuditClass(
group="configs", tag="CFG-COMPILER", description="Sanity checks on compilers.yaml", kwargs=()
group='configs',
tag='CFG-COMPILER',
description='Sanity checks on compilers.yaml',
kwargs=()
)
@@ -185,25 +185,34 @@ def _search_duplicate_compilers(error_cls):
"""Report compilers with the same spec and two different definitions"""
errors = []
compilers = list(sorted(spack.config.get("compilers"), key=lambda x: x["compiler"]["spec"]))
for spec, group in itertools.groupby(compilers, key=lambda x: x["compiler"]["spec"]):
compilers = list(sorted(
spack.config.get('compilers'), key=lambda x: x['compiler']['spec']
))
for spec, group in itertools.groupby(
compilers, key=lambda x: x['compiler']['spec']
):
group = list(group)
if len(group) == 1:
continue
error_msg = "Compiler defined multiple times: {0}"
error_msg = 'Compiler defined multiple times: {0}'
try:
details = [str(x._start_mark).strip() for x in group]
except Exception:
details = []
errors.append(error_cls(summary=error_msg.format(spec), details=details))
errors.append(error_cls(
summary=error_msg.format(spec), details=details
))
return errors
#: Sanity checks on packages.yaml
config_packages = AuditClass(
group="configs", tag="CFG-PACKAGES", description="Sanity checks on packages.yaml", kwargs=()
group='configs',
tag='CFG-PACKAGES',
description='Sanity checks on packages.yaml',
kwargs=()
)
@@ -211,19 +220,19 @@ def _search_duplicate_compilers(error_cls):
def _search_duplicate_specs_in_externals(error_cls):
"""Search for duplicate specs declared as externals"""
errors, externals = [], collections.defaultdict(list)
packages_yaml = spack.config.get("packages")
packages_yaml = spack.config.get('packages')
for name, pkg_config in packages_yaml.items():
# No externals can be declared under all
if name == "all" or "externals" not in pkg_config:
if name == 'all' or 'externals' not in pkg_config:
continue
current_externals = pkg_config["externals"]
current_externals = pkg_config['externals']
for entry in current_externals:
# Ask for the string representation of the spec to normalize
# aspects of the spec that may be represented in multiple ways
# e.g. +foo or foo=true
key = str(spack.spec.Spec(entry["spec"]))
key = str(spack.spec.Spec(entry['spec']))
externals[key].append(entry)
for spec, entries in sorted(externals.items()):
@@ -232,14 +241,14 @@ def _search_duplicate_specs_in_externals(error_cls):
continue
# Otherwise wwe need to report an error
error_msg = "Multiple externals share the same spec: {0}".format(spec)
error_msg = 'Multiple externals share the same spec: {0}'.format(spec)
try:
lines = [str(x._start_mark).strip() for x in entries]
details = (
["Please remove all but one of the following entries:"]
+ lines
+ ["as they might result in non-deterministic hashes"]
)
details = [
'Please remove all but one of the following entries:'
] + lines + [
'as they might result in non-deterministic hashes'
]
except TypeError:
details = []
@@ -250,66 +259,34 @@ def _search_duplicate_specs_in_externals(error_cls):
#: Sanity checks on package directives
package_directives = AuditClass(
group="packages",
tag="PKG-DIRECTIVES",
description="Sanity checks on specs used in directives",
kwargs=("pkgs",),
)
package_attributes = AuditClass(
group="packages",
tag="PKG-ATTRIBUTES",
description="Sanity checks on reserved attributes of packages",
kwargs=("pkgs",),
)
package_properties = AuditClass(
group="packages",
tag="PKG-PROPERTIES",
description="Sanity checks on properties a package should maintain",
kwargs=("pkgs",),
group='packages',
tag='PKG-DIRECTIVES',
description='Sanity checks on specs used in directives',
kwargs=('pkgs',)
)
#: Sanity checks on linting
# This can take some time, so it's run separately from packages
package_https_directives = AuditClass(
group="packages-https",
tag="PKG-HTTPS-DIRECTIVES",
description="Sanity checks on https checks of package urls, etc.",
kwargs=("pkgs",),
group='packages-https',
tag='PKG-HTTPS-DIRECTIVES',
description='Sanity checks on https checks of package urls, etc.',
kwargs=('pkgs',)
)
@package_directives
def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test method is not included in build-time callbacks"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = pkg_cls.build_time_test_callbacks
if test_callbacks and "test" in test_callbacks:
msg = '{0} package contains "test" method in ' "build_time_test_callbacks"
instr = 'Remove "test" from: [{0}]'.format(", ".join(test_callbacks))
errors.append(error_cls(msg.format(pkg_name), [instr]))
return errors
@package_directives
def _check_patch_urls(pkgs, error_cls):
"""Ensure that patches fetched from GitHub have stable sha256 hashes."""
github_patch_url_re = (
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
".+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
r"^https?://github\.com/.+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
)
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for condition, patches in pkg_cls.patches.items():
pkg = spack.repo.get(pkg_name)
for condition, patches in pkg.patches.items():
for patch in patches:
if not isinstance(patch, spack.patch.UrlPatch):
continue
@@ -319,210 +296,37 @@ def _check_patch_urls(pkgs, error_cls):
full_index_arg = "?full_index=1"
if not patch.url.endswith(full_index_arg):
errors.append(
error_cls(
"patch URL in package {0} must end with {1}".format(
pkg_cls.name,
full_index_arg,
),
[patch.url],
)
)
return errors
@package_attributes
def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
"""Ensure that packages don't override reserved names"""
RESERVED_NAMES = ("name",)
errors = []
for pkg_name in pkgs:
name_definitions = collections.defaultdict(list)
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for cls_item in inspect.getmro(pkg_cls):
for name in RESERVED_NAMES:
current_value = cls_item.__dict__.get(name)
if current_value is None:
continue
name_definitions[name].append((cls_item, current_value))
for name in RESERVED_NAMES:
if len(name_definitions[name]) == 1:
continue
error_msg = (
"Package '{}' overrides the '{}' attribute or method, "
"which is reserved for Spack internal use"
)
definitions = [
"defined in '{}'".format(x[0].__module__) for x in name_definitions[name]
]
errors.append(error_cls(error_msg.format(pkg_name, name), definitions))
return errors
@package_properties
def _ensure_all_package_names_are_lowercase(pkgs, error_cls):
"""Ensure package names are lowercase and consistent"""
badname_regex, errors = re.compile(r"[_A-Z]"), []
for pkg_name in pkgs:
if badname_regex.search(pkg_name):
error_msg = "Package name '{}' is either lowercase or conatine '_'".format(pkg_name)
errors.append(error_cls(error_msg, []))
return errors
@package_properties
def _ensure_packages_are_pickeleable(pkgs, error_cls):
"""Ensure that package objects are pickleable"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = pkg_cls(spack.spec.Spec(pkg_name))
try:
pickle.dumps(pkg)
except Exception as e:
error_msg = "Package '{}' failed to pickle".format(pkg_name)
details = ["{}".format(str(e))]
errors.append(error_cls(error_msg, details))
return errors
@package_properties
def _ensure_packages_are_unparseable(pkgs, error_cls):
"""Ensure that all packages can unparse and that unparsed code is valid Python"""
import spack.util.package_hash as ph
errors = []
for pkg_name in pkgs:
try:
source = ph.canonical_source(pkg_name, filter_multimethods=False)
except Exception as e:
error_msg = "Package '{}' failed to unparse".format(pkg_name)
details = ["{}".format(str(e))]
errors.append(error_cls(error_msg, details))
continue
try:
compile(source, "internal", "exec", ast.PyCF_ONLY_AST)
except Exception as e:
error_msg = "The unparsed package '{}' failed to compile".format(pkg_name)
details = ["{}".format(str(e))]
errors.append(error_cls(error_msg, details))
return errors
@package_properties
def _ensure_all_versions_can_produce_a_fetcher(pkgs, error_cls):
"""Ensure all versions in a package can produce a fetcher"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = pkg_cls(spack.spec.Spec(pkg_name))
try:
spack.fetch_strategy.check_pkg_attributes(pkg)
for version in pkg.versions:
assert spack.fetch_strategy.for_package_version(pkg, version)
except Exception as e:
error_msg = "The package '{}' cannot produce a fetcher for some of its versions"
details = ["{}".format(str(e))]
errors.append(error_cls(error_msg.format(pkg_name), details))
return errors
@package_properties
def _ensure_docstring_and_no_fixme(pkgs, error_cls):
"""Ensure the package has a docstring and no fixmes"""
errors = []
fixme_regexes = [
re.compile(r"remove this boilerplate"),
re.compile(r"FIXME: Put"),
re.compile(r"FIXME: Add"),
re.compile(r"example.com"),
]
for pkg_name in pkgs:
details = []
filename = spack.repo.path.filename_for_package_name(pkg_name)
with open(filename, "r") as package_file:
for i, line in enumerate(package_file):
pattern = next((r for r in fixme_regexes if r.search(line)), None)
if pattern:
details.append(
"%s:%d: boilerplate needs to be removed: %s" % (filename, i, line.strip())
)
if details:
error_msg = "Package '{}' contains boilerplate that need to be removed"
errors.append(error_cls(error_msg.format(pkg_name), details))
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
if not pkg_cls.__doc__:
error_msg = "Package '{}' miss a docstring"
errors.append(error_cls(error_msg.format(pkg_name), []))
return errors
@package_properties
def _ensure_all_packages_use_sha256_checksums(pkgs, error_cls):
"""Ensure no packages use md5 checksums"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
if pkg_cls.manual_download:
continue
pkg = pkg_cls(spack.spec.Spec(pkg_name))
def invalid_sha256_digest(fetcher):
if getattr(fetcher, "digest", None):
h = spack.util.crypto.hash_algo_for_digest(fetcher.digest)
if h != "sha256":
return h, True
return None, False
error_msg = "Package '{}' does not use sha256 checksum".format(pkg_name)
details = []
for v, args in pkg.versions.items():
fetcher = spack.fetch_strategy.for_package_version(pkg, v)
digest, is_bad = invalid_sha256_digest(fetcher)
if is_bad:
details.append("{}@{} uses {}".format(pkg_name, v, digest))
for _, resources in pkg.resources.items():
for resource in resources:
digest, is_bad = invalid_sha256_digest(resource.fetcher)
if is_bad:
details.append("Resource in '{}' uses {}".format(pkg_name, digest))
if details:
errors.append(error_cls(error_msg, details))
errors.append(error_cls(
"patch URL in package {0} must end with {1}".format(
pkg.name, full_index_arg,
),
[patch.url],
))
return errors
@package_https_directives
def _linting_package_file(pkgs, error_cls):
"""Check for correctness of links"""
"""Check for correctness of links
"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Does the homepage have http, and if so, does https work?
if pkg_cls.homepage.startswith("http://"):
https = re.sub("http", "https", pkg_cls.homepage, 1)
if pkg.homepage.startswith('http://'):
https = re.sub("http", "https", pkg.homepage, 1)
try:
response = urlopen(https)
except Exception as e:
msg = 'Error with attempting https for "{0}": '
errors.append(error_cls(msg.format(pkg_cls.name), [str(e)]))
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
continue
if response.getcode() == 200:
msg = 'Package "{0}" uses http but has a valid https endpoint.'
errors.append(msg.format(pkg_cls.name))
errors.append(msg.format(pkg.name))
return llnl.util.lang.dedupe(errors)
@@ -532,10 +336,10 @@ def _unknown_variants_in_directives(pkgs, error_cls):
"""Report unknown or wrong variants in directives for this package"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Check "conflicts" directive
for conflict, triggers in pkg_cls.conflicts.items():
for conflict, triggers in pkg.conflicts.items():
for trigger, _ in triggers:
vrn = spack.spec.Spec(conflict)
try:
@@ -547,48 +351,36 @@ def _unknown_variants_in_directives(pkgs, error_cls):
# conflict and trigger separately in that case.
# When os and target constraints can be created independently of
# the platform, TODO change this back to add an error.
errors.extend(
_analyze_variants_in_directive(
pkg_cls,
spack.spec.Spec(trigger),
directive="conflicts",
error_cls=error_cls,
)
)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="conflicts", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, spack.spec.Spec(trigger),
directive='conflicts', error_cls=error_cls
))
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='conflicts', error_cls=error_cls
))
# Check "depends_on" directive
for _, triggers in pkg_cls.dependencies.items():
for _, triggers in pkg.dependencies.items():
triggers = list(triggers)
for trigger in list(triggers):
vrn = spack.spec.Spec(trigger)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="depends_on", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='depends_on', error_cls=error_cls
))
# Check "patch" directive
for _, triggers in pkg_cls.provided.items():
for _, triggers in pkg.provided.items():
triggers = [spack.spec.Spec(x) for x in triggers]
for vrn in triggers:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="patch", error_cls=error_cls
)
)
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='patch', error_cls=error_cls
))
# Check "resource" directive
for vrn in pkg_cls.resources:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="resource", error_cls=error_cls
)
)
for vrn in pkg.resources:
errors.extend(_analyze_variants_in_directive(
pkg, vrn, directive='resource', error_cls=error_cls
))
return llnl.util.lang.dedupe(errors)
@@ -598,22 +390,23 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
"""Report unknown dependencies and wrong variants for dependencies"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# No need to analyze virtual packages
if spack.repo.path.is_virtual(dependency_name):
continue
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(dependency_name)
dependency_pkg = spack.repo.get(dependency_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
summary = pkg_name + ": unknown package '{0}' in " "'depends_on' directive".format(
dependency_name
)
details = [" in " + filename]
summary = (pkg_name + ": unknown package '{0}' in "
"'depends_on' directive".format(dependency_name))
details = [
" in " + filename
]
errors.append(error_cls(summary=summary, details=details))
continue
@@ -621,51 +414,20 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
dependency_variants = dependency_edge.spec.variants
for name, value in dependency_variants.items():
try:
v, _ = dependency_pkg_cls.variants[name]
v.validate_or_raise(value, pkg_cls=dependency_pkg_cls)
v, _ = dependency_pkg.variants[name]
v.validate_or_raise(value, pkg=dependency_pkg)
except Exception as e:
summary = (
pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive"
)
summary = (pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive")
error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = "the variant {0} does not " "exist".format(error_msg)
error_msg = ('the variant {0} does not '
'exist'.format(error_msg))
error_msg += " in package '" + dependency_name + "'"
errors.append(
error_cls(summary=summary, details=[error_msg, "in " + filename])
)
return errors
@package_directives
def _ensure_variant_defaults_are_parsable(pkgs, error_cls):
"""Ensures that variant defaults are present and parsable from cli"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for variant_name, entry in pkg_cls.variants.items():
variant, _ = entry
default_is_parsable = (
# Permitting a default that is an instance on 'int' permits
# to have foo=false or foo=0. Other falsish values are
# not allowed, since they can't be parsed from cli ('foo=')
isinstance(variant.default, int)
or variant.default
)
if not default_is_parsable:
error_msg = "Variant '{}' of package '{}' has a bad default value"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
continue
vspec = variant.make_default()
try:
variant.validate_or_raise(vspec, pkg_cls=pkg_cls)
except spack.variant.InvalidVariantValueError:
error_msg = "The variant '{}' default value in package '{}' cannot be validated"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
errors.append(error_cls(
summary=summary, details=[error_msg, 'in ' + filename]
))
return errors
@@ -675,33 +437,34 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
"""Report if version constraints used in directives are not satisfiable"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
dependencies_to_check = []
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.path.is_virtual(dependency_name):
continue
dependencies_to_check.extend([edge.spec for edge in dependency_data.values()])
dependencies_to_check.extend(
[edge.spec for edge in dependency_data.values()]
)
for s in dependencies_to_check:
dependency_pkg_cls = None
dependency_pkg = None
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(s.name)
assert any(v.satisfies(s.versions) for v in list(dependency_pkg_cls.versions))
dependency_pkg = spack.repo.get(s.name)
assert any(
v.satisfies(s.versions) for v in list(dependency_pkg.versions)
)
except Exception:
summary = (
"{0}: dependency on {1} cannot be satisfied " "by known versions of {1.name}"
).format(pkg_name, s)
details = ["happening in " + filename]
if dependency_pkg_cls is not None:
details.append(
"known versions of {0.name} are {1}".format(
s, ", ".join([str(x) for x in dependency_pkg_cls.versions])
)
)
summary = ("{0}: dependency on {1} cannot be satisfied "
"by known versions of {1.name}").format(pkg_name, s)
details = ['happening in ' + filename]
if dependency_pkg is not None:
details.append('known versions of {0.name} are {1}'.format(
s, ', '.join([str(x) for x in dependency_pkg.versions])
))
errors.append(error_cls(summary=summary, details=details))
return errors
@@ -712,13 +475,13 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
spack.variant.InconsistentValidationError,
spack.variant.MultipleValuesInExclusiveVariantError,
spack.variant.InvalidVariantValueError,
KeyError,
KeyError
)
errors = []
for name, v in constraint.variants.items():
try:
variant, _ = pkg.variants[name]
variant.validate_or_raise(v, pkg_cls=pkg)
variant.validate_or_raise(v, pkg=pkg)
except variant_exceptions as e:
summary = pkg.name + ': wrong variant in "{0}" directive'
summary = summary.format(directive)
@@ -726,9 +489,11 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = "the variant {0} does not exist".format(error_msg)
error_msg = 'the variant {0} does not exist'.format(error_msg)
err = error_cls(summary=summary, details=[error_msg, "in " + filename])
err = error_cls(summary=summary, details=[
error_msg, 'in ' + filename
])
errors.append(err)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,7 @@
from spack.build_systems.autotools import AutotoolsPackage
from spack.directives import extends
from spack.package_base import ExtensionError
from spack.package import ExtensionError
from spack.util.executable import which
@@ -23,32 +23,29 @@
class AspellDictPackage(AutotoolsPackage):
"""Specialized class for building aspell dictionairies."""
extends("aspell")
extends('aspell')
def view_destination(self, view):
aspell_spec = self.spec["aspell"]
aspell_spec = self.spec['aspell']
if view.get_projection_for_spec(aspell_spec) != aspell_spec.prefix:
raise ExtensionError("aspell does not support non-global extensions")
raise ExtensionError(
'aspell does not support non-global extensions')
aspell = aspell_spec.command
return aspell("dump", "config", "dict-dir", output=str).strip()
return aspell('dump', 'config', 'dict-dir', output=str).strip()
def view_source(self):
return self.prefix.lib
def patch(self):
filter_file(r"^dictdir=.*$", "dictdir=/lib", "configure")
filter_file(r"^datadir=.*$", "datadir=/lib", "configure")
filter_file(r'^dictdir=.*$', 'dictdir=/lib', 'configure')
filter_file(r'^datadir=.*$', 'datadir=/lib', 'configure')
def configure(self, spec, prefix):
aspell = spec["aspell"].prefix.bin.aspell
prezip = spec["aspell"].prefix.bin.prezip
aspell = spec['aspell'].prefix.bin.aspell
prezip = spec['aspell'].prefix.bin.prezip
destdir = prefix
sh = which("sh")
sh(
"./configure",
"--vars",
"ASPELL={0}".format(aspell),
"PREZIP={0}".format(prezip),
"DESTDIR={0}".format(destdir),
)
sh = which('sh')
sh('./configure', '--vars', "ASPELL={0}".format(aspell),
"PREZIP={0}".format(prezip),
"DESTDIR={0}".format(destdir))

View File

@@ -16,7 +16,7 @@
from spack.build_environment import InstallError
from spack.directives import conflicts, depends_on
from spack.operating_systems.mac_os import macos_version
from spack.package_base import PackageBase, run_after, run_before
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
from spack.version import Version
@@ -52,12 +52,11 @@ class AutotoolsPackage(PackageBase):
+-----------------------------------------------+--------------------+
"""
#: Phases of a GNU Autotools package
phases = ["autoreconf", "configure", "build", "install"]
phases = ['autoreconf', 'configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "AutotoolsPackage"
build_system_class = 'AutotoolsPackage'
@property
def patch_config_files(self):
@@ -72,11 +71,9 @@ def patch_config_files(self):
the directory containing the system ``config.guess`` and ``config.sub``
files.
"""
return (
self.spec.satisfies("target=ppc64le:")
or self.spec.satisfies("target=aarch64:")
or self.spec.satisfies("target=riscv64:")
)
return (self.spec.satisfies('target=ppc64le:')
or self.spec.satisfies('target=aarch64:')
or self.spec.satisfies('target=riscv64:'))
#: Whether or not to update ``libtool``
#: (currently only for Arm/Clang/Fujitsu/NVHPC compilers)
@@ -87,13 +84,13 @@ def patch_config_files(self):
build_targets = [] # type: List[str]
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.install`
#: phase
install_targets = ["install"]
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ["check"]
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ["installcheck"]
install_time_test_callbacks = ['installcheck']
#: Set to true to force the autoreconf step even if configure is present
force_autoreconf = False
@@ -104,10 +101,10 @@ def patch_config_files(self):
#: after the installation. If True instead it installs them.
install_libtool_archives = False
depends_on("gnuconfig", type="build", when="target=ppc64le:")
depends_on("gnuconfig", type="build", when="target=aarch64:")
depends_on("gnuconfig", type="build", when="target=riscv64:")
conflicts("platform=windows")
depends_on('gnuconfig', type='build', when='target=ppc64le:')
depends_on('gnuconfig', type='build', when='target=aarch64:')
depends_on('gnuconfig', type='build', when='target=riscv64:')
conflicts('platform=windows')
@property
def _removed_la_files_log(self):
@@ -115,17 +112,17 @@ def _removed_la_files_log(self):
build_dir = self.build_directory
if not os.path.isabs(self.build_directory):
build_dir = os.path.join(self.stage.path, build_dir)
return os.path.join(build_dir, "removed_la_files.txt")
return os.path.join(build_dir, 'removed_la_files.txt')
@property
def archive_files(self):
"""Files to archive for packages based on autotools"""
files = [os.path.join(self.build_directory, "config.log")]
files = [os.path.join(self.build_directory, 'config.log')]
if not self.install_libtool_archives:
files.append(self._removed_la_files_log)
return files
@run_after("autoreconf")
@run_after('autoreconf')
def _do_patch_config_files(self):
"""Some packages ship with older config.guess/config.sub files and
need to have these updated when installed on a newer architecture.
@@ -139,18 +136,20 @@ def _do_patch_config_files(self):
# TODO: Expand this to select the 'config.sub'-compatible architecture
# for each platform (e.g. 'config.sub' doesn't accept 'power9le', but
# does accept 'ppc64le').
if self.spec.satisfies("target=ppc64le:"):
config_arch = "ppc64le"
elif self.spec.satisfies("target=aarch64:"):
config_arch = "aarch64"
elif self.spec.satisfies("target=riscv64:"):
config_arch = "riscv64"
if self.spec.satisfies('target=ppc64le:'):
config_arch = 'ppc64le'
elif self.spec.satisfies('target=aarch64:'):
config_arch = 'aarch64'
elif self.spec.satisfies('target=riscv64:'):
config_arch = 'riscv64'
else:
config_arch = "local"
config_arch = 'local'
def runs_ok(script_abs_path):
# Construct the list of arguments for the call
additional_args = {"config.sub": [config_arch]}
additional_args = {
'config.sub': [config_arch]
}
script_name = os.path.basename(script_abs_path)
args = [script_abs_path] + additional_args.get(script_name, [])
@@ -163,7 +162,7 @@ def runs_ok(script_abs_path):
return True
# Get the list of files that needs to be patched
to_be_patched = fs.find(self.stage.path, files=["config.sub", "config.guess"])
to_be_patched = fs.find(self.stage.path, files=['config.sub', 'config.guess'])
to_be_patched = [f for f in to_be_patched if not runs_ok(f)]
# If there are no files to be patched, return early
@@ -172,37 +171,33 @@ def runs_ok(script_abs_path):
# Otherwise, require `gnuconfig` to be a build dependency
self._require_build_deps(
pkgs=["gnuconfig"], spec=self.spec, err="Cannot patch config files"
)
pkgs=['gnuconfig'],
spec=self.spec,
err="Cannot patch config files")
# Get the config files we need to patch (config.sub / config.guess).
to_be_found = list(set(os.path.basename(f) for f in to_be_patched))
gnuconfig = self.spec["gnuconfig"]
gnuconfig = self.spec['gnuconfig']
gnuconfig_dir = gnuconfig.prefix
# An external gnuconfig may not not have a prefix.
if gnuconfig_dir is None:
raise InstallError(
"Spack could not find substitutes for GNU config "
"files because no prefix is available for the "
"`gnuconfig` package. Make sure you set a prefix "
"path instead of modules for external `gnuconfig`."
)
raise InstallError("Spack could not find substitutes for GNU config "
"files because no prefix is available for the "
"`gnuconfig` package. Make sure you set a prefix "
"path instead of modules for external `gnuconfig`.")
candidates = fs.find(gnuconfig_dir, files=to_be_found, recursive=False)
# For external packages the user may have specified an incorrect prefix.
# otherwise the installation is just corrupt.
if not candidates:
msg = (
"Spack could not find `config.guess` and `config.sub` "
"files in the `gnuconfig` prefix `{0}`. This means the "
"`gnuconfig` package is broken"
).format(gnuconfig_dir)
msg = ("Spack could not find `config.guess` and `config.sub` "
"files in the `gnuconfig` prefix `{0}`. This means the "
"`gnuconfig` package is broken").format(gnuconfig_dir)
if gnuconfig.external:
msg += (
" or the `gnuconfig` package prefix is misconfigured as" " an external package"
)
msg += (" or the `gnuconfig` package prefix is misconfigured as"
" an external package")
raise InstallError(msg)
# Filter working substitutes
@@ -228,7 +223,7 @@ def runs_ok(script_abs_path):
and set the prefix to the directory containing the `config.guess` and
`config.sub` files.
"""
raise InstallError(msg.format(", ".join(to_be_found), self.name))
raise InstallError(msg.format(', '.join(to_be_found), self.name))
# Copy the good files over the bad ones
for abs_path in to_be_patched:
@@ -238,19 +233,7 @@ def runs_ok(script_abs_path):
fs.copy(substitutes[name], abs_path)
os.chmod(abs_path, mode)
@run_before("configure")
def _patch_usr_bin_file(self):
"""On NixOS file is not available in /usr/bin/file. Patch configure
scripts to use file from path."""
if self.spec.os.startswith("nixos"):
x = fs.FileFilter(
*filter(fs.is_exe, fs.find(self.build_directory, "configure", recursive=True))
)
with fs.keep_modification_time(*x.filenames):
x.filter(regex="/usr/bin/file", repl="file", string=True)
@run_before("configure")
@run_before('configure')
def _set_autotools_environment_variables(self):
"""Many autotools builds use a version of mknod.m4 that fails when
running as root unless FORCE_UNSAFE_CONFIGURE is set to 1.
@@ -265,184 +248,39 @@ def _set_autotools_environment_variables(self):
"""
os.environ["FORCE_UNSAFE_CONFIGURE"] = "1"
@run_before("configure")
def _do_patch_libtool_configure(self):
"""Patch bugs that propagate from libtool macros into "configure" and
further into "libtool". Note that patches that can be fixed by patching
"libtool" directly should be implemented in the _do_patch_libtool method
below."""
# Exit early if we are required not to patch libtool-related problems:
if not self.patch_libtool:
return
x = fs.FileFilter(
*filter(fs.is_exe, fs.find(self.build_directory, "configure", recursive=True))
)
# There are distributed automatically generated files that depend on the configure script
# and require additional tools for rebuilding.
# See https://github.com/spack/spack/pull/30768#issuecomment-1219329860
with fs.keep_modification_time(*x.filenames):
# Fix parsing of compiler output when collecting predeps and postdeps
# https://lists.gnu.org/archive/html/bug-libtool/2016-03/msg00003.html
x.filter(regex=r'^(\s*if test x-L = )("\$p" \|\|\s*)$', repl=r"\1x\2")
x.filter(
regex=r'^(\s*test x-R = )("\$p")(; then\s*)$', repl=r'\1x\2 || test x-l = x"$p"\3'
)
# Support Libtool 2.4.2 and older:
x.filter(regex=r'^(\s*test \$p = "-R")(; then\s*)$', repl=r'\1 || test x-l = x"$p"\2')
@run_after("configure")
@run_after('configure')
def _do_patch_libtool(self):
"""If configure generates a "libtool" script that does not correctly
detect the compiler (and patch_libtool is set), patch in the correct
values for libtool variables.
flags for the Arm, Clang/Flang, Fujitsu and NVHPC compilers."""
The generated libtool script supports mixed compilers through tags:
``libtool --tag=CC/CXX/FC/...```. For each tag there is a block with variables,
which defines what flags to pass to the compiler. The default variables (which
are used by the default tag CC) are set in a block enclosed by
``# ### {BEGIN,END} LIBTOOL CONFIG``. For non-default tags, there are
corresponding blocks ``# ### {BEGIN,END} LIBTOOL TAG CONFIG: {CXX,FC,F77}`` at
the end of the file (after the exit command). libtool evals these blocks.
Whenever we need to update variables that the configure script got wrong
(for example cause it did not recognize the compiler), we should properly scope
those changes to these tags/blocks so they only apply to the compiler we care
about. Below, ``start_at`` and ``stop_at`` are used for that."""
# Exit early if we are required not to patch libtool:
# Exit early if we are required not to patch libtool
if not self.patch_libtool:
return
x = fs.FileFilter(
*filter(fs.is_exe, fs.find(self.build_directory, "libtool", recursive=True))
)
for libtool_path in fs.find(
self.build_directory, 'libtool', recursive=True):
self._patch_libtool(libtool_path)
# Exit early if there is nothing to patch:
if not x.filenames:
return
markers = {"cc": "LIBTOOL CONFIG"}
for tag in ["cxx", "fc", "f77"]:
markers[tag] = "LIBTOOL TAG CONFIG: {0}".format(tag.upper())
# Replace empty linker flag prefixes:
if self.compiler.name == "nag":
# Nag is mixed with gcc and g++, which are recognized correctly.
# Therefore, we change only Fortran values:
for tag in ["fc", "f77"]:
marker = markers[tag]
x.filter(
regex='^wl=""$',
repl='wl="{0}"'.format(self.compiler.linker_arg),
start_at="# ### BEGIN {0}".format(marker),
stop_at="# ### END {0}".format(marker),
)
else:
x.filter(regex='^wl=""$', repl='wl="{0}"'.format(self.compiler.linker_arg))
# Replace empty PIC flag values:
for cc, marker in markers.items():
x.filter(
regex='^pic_flag=""$',
repl='pic_flag="{0}"'.format(getattr(self.compiler, "{0}_pic_flag".format(cc))),
start_at="# ### BEGIN {0}".format(marker),
stop_at="# ### END {0}".format(marker),
)
# Other compiler-specific patches:
if self.compiler.name == "fj":
x.filter(regex="-nostdlib", repl="", string=True)
rehead = r"/\S*/"
for o in [
"fjhpctag.o",
"fjcrt0.o",
"fjlang08.o",
"fjomp.o",
"crti.o",
"crtbeginS.o",
"crtendS.o",
]:
x.filter(regex=(rehead + o), repl="", string=True)
elif self.compiler.name == "dpcpp":
# Hack to filter out spurious predep_objects when building with Intel dpcpp
# (see https://github.com/spack/spack/issues/32863):
x.filter(regex=r"^(predep_objects=.*)/tmp/conftest-[0-9A-Fa-f]+\.o", repl=r"\1")
x.filter(regex=r"^(predep_objects=.*)/tmp/a-[0-9A-Fa-f]+\.o", repl=r"\1")
elif self.compiler.name == "nag":
for tag in ["fc", "f77"]:
marker = markers[tag]
start_at = "# ### BEGIN {0}".format(marker)
stop_at = "# ### END {0}".format(marker)
# Libtool 2.4.2 does not know the shared flag:
x.filter(
regex=r"\$CC -shared",
repl=r"\$CC -Wl,-shared",
string=True,
start_at=start_at,
stop_at=stop_at,
)
# Libtool does not know how to inject whole archives
# (e.g. https://github.com/pmodels/mpich/issues/4358):
x.filter(
regex=r'^whole_archive_flag_spec="\\\$({?wl}?)--whole-archive'
r'\\\$convenience \\\$\1--no-whole-archive"$',
repl=r'whole_archive_flag_spec="\$\1--whole-archive'
r"\`for conv in \$convenience\\\\\"\\\\\"; do test -n \\\\\"\$conv\\\\\" && "
r"new_convenience=\\\\\"\$new_convenience,\$conv\\\\\"; done; "
r'func_echo_all \\\\\"\$new_convenience\\\\\"\` \$\1--no-whole-archive"',
start_at=start_at,
stop_at=stop_at,
)
# The compiler requires special treatment in certain cases:
x.filter(
regex=r"^(with_gcc=.*)$",
repl="\\1\n\n# Is the compiler the NAG compiler?\nwith_nag=yes",
start_at=start_at,
stop_at=stop_at,
)
# Disable the special treatment for gcc and g++:
for tag in ["cc", "cxx"]:
marker = markers[tag]
x.filter(
regex=r"^(with_gcc=.*)$",
repl="\\1\n\n# Is the compiler the NAG compiler?\nwith_nag=no",
start_at="# ### BEGIN {0}".format(marker),
stop_at="# ### END {0}".format(marker),
)
# The compiler does not support -pthread flag, which might come
# from the inherited linker flags. We prepend the flag with -Wl,
# before using it:
x.filter(
regex=r"^(\s*)(for tmp_inherited_linker_flag in \$tmp_inherited_linker_flags; "
r"do\s*)$",
repl='\\1if test "x$with_nag" = xyes; then\n'
"\\1 revert_nag_pthread=$tmp_inherited_linker_flags\n"
"\\1 tmp_inherited_linker_flags="
"`$ECHO \"$tmp_inherited_linker_flags\" | $SED 's% -pthread% -Wl,-pthread%g'`\n"
'\\1 test x"$revert_nag_pthread" = x"$tmp_inherited_linker_flags" && '
"revert_nag_pthread=no || revert_nag_pthread=yes\n"
"\\1fi\n\\1\\2",
start_at='if test -n "$inherited_linker_flags"; then',
stop_at='case " $new_inherited_linker_flags " in',
)
# And revert the modification to produce '*.la' files that can be
# used with gcc (normally, we do not install the files but they can
# still be used during the building):
start_at = '# Time to change all our "foo.ltframework" stuff back to "-framework foo"'
stop_at = "# installed libraries to the beginning of the library search list"
x.filter(
regex=r"(\s*)(# move library search paths that coincide with paths to not "
r"yet\s*)$",
repl='\\1test x"$with_nag$revert_nag_pthread" = xyesyes &&\n'
'\\1 new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | '
"$SED 's% -Wl,-pthread% -pthread%g'`\n\\1\\2",
start_at=start_at,
stop_at=stop_at,
)
def _patch_libtool(self, libtool_path):
if (
self.spec.satisfies('%arm') or
self.spec.satisfies('%clang') or
self.spec.satisfies('%fj') or
self.spec.satisfies('%nvhpc')
):
fs.filter_file('wl=""\n', 'wl="-Wl,"\n', libtool_path)
fs.filter_file('pic_flag=""\n',
'pic_flag="{0}"\n'
.format(self.compiler.cc_pic_flag),
libtool_path)
if self.spec.satisfies('%fj'):
fs.filter_file('-nostdlib', '', libtool_path)
rehead = r'/\S*/'
objfile = ['fjhpctag.o', 'fjcrt0.o', 'fjlang08.o', 'fjomp.o',
'crti.o', 'crtbeginS.o', 'crtendS.o']
for o in objfile:
fs.filter_file(rehead + o, '', libtool_path)
@property
def configure_directory(self):
@@ -455,7 +293,9 @@ def configure_directory(self):
@property
def configure_abs_path(self):
# Absolute path to configure
configure_abs_path = os.path.join(os.path.abspath(self.configure_directory), "configure")
configure_abs_path = os.path.join(
os.path.abspath(self.configure_directory), 'configure'
)
return configure_abs_path
@property
@@ -463,7 +303,7 @@ def build_directory(self):
"""Override to provide another place to build the package"""
return self.configure_directory
@run_before("autoreconf")
@run_before('autoreconf')
def delete_configure_to_force_update(self):
if self.force_autoreconf:
force_remove(self.configure_abs_path)
@@ -472,20 +312,20 @@ def _require_build_deps(self, pkgs, spec, err):
"""Require `pkgs` to be direct build dependencies of `spec`. Raises a
RuntimeError with a helpful error messages when any dep is missing."""
build_deps = [d.name for d in spec.dependencies(deptype="build")]
build_deps = [d.name for d in spec.dependencies(deptype='build')]
missing_deps = [x for x in pkgs if x not in build_deps]
if not missing_deps:
return
# Raise an exception on missing deps.
msg = (
"{0}: missing dependencies: {1}.\n\nPlease add "
"the following lines to the package:\n\n".format(err, ", ".join(missing_deps))
)
msg = ("{0}: missing dependencies: {1}.\n\nPlease add "
"the following lines to the package:\n\n"
.format(err, ", ".join(missing_deps)))
for dep in missing_deps:
msg += " depends_on('{0}', type='build', when='@{1}')\n".format(dep, spec.version)
msg += (" depends_on('{0}', type='build', when='@{1}')\n"
.format(dep, spec.version))
msg += "\nUpdate the version (when='@{0}') as needed.".format(spec.version)
raise RuntimeError(msg)
@@ -499,32 +339,34 @@ def autoreconf(self, spec, prefix):
# Else try to regenerate it, which reuquires a few build dependencies
self._require_build_deps(
pkgs=["autoconf", "automake", "libtool"], spec=spec, err="Cannot generate configure"
)
pkgs=['autoconf', 'automake', 'libtool'],
spec=spec,
err="Cannot generate configure")
tty.msg("Configure script not found: trying to generate it")
tty.warn("*********************************************************")
tty.warn("* If the default procedure fails, consider implementing *")
tty.warn("* a custom AUTORECONF phase in the package *")
tty.warn("*********************************************************")
tty.msg('Configure script not found: trying to generate it')
tty.warn('*********************************************************')
tty.warn('* If the default procedure fails, consider implementing *')
tty.warn('* a custom AUTORECONF phase in the package *')
tty.warn('*********************************************************')
with working_dir(self.configure_directory):
m = inspect.getmodule(self)
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ["-ivf"]
autoreconf_args = ['-ivf']
autoreconf_args += self.autoreconf_search_path_args
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@property
def autoreconf_search_path_args(self):
"""Search path includes for autoreconf. Add an -I flag for all `aclocal` dirs
of build deps, skips the default path of automake, move external include
flags to the back, since they might pull in unrelated m4 files shadowing
spack dependencies."""
return _autoreconf_search_path_args(self.spec)
"""Arguments to autoreconf to modify the search paths"""
search_path_args = []
for dep in self.spec.dependencies(deptype='build'):
if os.path.exists(dep.prefix.share.aclocal):
search_path_args.extend(['-I', dep.prefix.share.aclocal])
return search_path_args
@run_after("autoreconf")
@run_after('autoreconf')
def set_configure_or_die(self):
"""Checks the presence of a ``configure`` file after the
autoreconf phase. If it is found sets a module attribute
@@ -535,11 +377,13 @@ def set_configure_or_die(self):
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
msg = "configure script not found in {0}"
msg = 'configure script not found in {0}'
raise RuntimeError(msg.format(self.configure_directory))
# Monkey-patch the configure script in the corresponding module
inspect.getmodule(self).configure = Executable(self.configure_abs_path)
inspect.getmodule(self).configure = Executable(
self.configure_abs_path
)
def configure_args(self):
"""Produces a list containing all the arguments that must be passed to
@@ -553,16 +397,16 @@ def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass specified
compiler flags to configure."""
# Has to be dynamic attribute due to caching.
setattr(self, "configure_flag_args", [])
setattr(self, 'configure_flag_args', [])
for flag, values in flags.items():
if values:
values_str = "{0}={1}".format(flag.upper(), " ".join(values))
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
# Spack's fflags are meant for both F77 and FC, therefore we
# additionaly set FCFLAGS if required.
values = flags.get("fflags", None)
values = flags.get('fflags', None)
if values:
values_str = "FCFLAGS={0}".format(" ".join(values))
values_str = 'FCFLAGS={0}'.format(' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
@@ -570,25 +414,29 @@ def configure(self, spec, prefix):
:meth:`~spack.build_systems.autotools.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = getattr(self, "configure_flag_args", [])
options += ["--prefix={0}".format(prefix)]
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += self.configure_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).configure(*options)
if self.spec.satisfies('%emscripten'):
inspect.getmodule(self).emconfigure(*options)
else:
inspect.getmodule(self).configure(*options)
def setup_build_environment(self, env):
if self.spec.platform == "darwin" and macos_version() >= Version("11"):
if (self.spec.platform == 'darwin'
and macos_version() >= Version('11')):
# Many configure files rely on matching '10.*' for macOS version
# detection and fail to add flags if it shows as version 11.
env.set("MACOSX_DEPLOYMENT_TARGET", "10.16")
env.set('MACOSX_DEPLOYMENT_TARGET', '10.16')
def build(self, spec, prefix):
"""Makes the build targets specified by
:py:attr:``~.AutotoolsPackage.build_targets``
"""
# See https://autotools.io/automake/silent.html
params = ["V=1"]
params = ['V=1']
params += self.build_targets
with working_dir(self.build_directory):
inspect.getmodule(self).make(*params)
@@ -600,18 +448,23 @@ def install(self, spec, prefix):
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after("build")(PackageBase._run_default_build_time_test_callbacks)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("test")
self._if_make_target_execute("check")
self._if_make_target_execute('test')
self._if_make_target_execute('check')
def _activate_or_not(
self, name, activation_word, deactivation_word, activation_value=None, variant=None
self,
name,
activation_word,
deactivation_word,
activation_value=None,
variant=None
):
"""This function contains the current implementation details of
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without` and
@@ -674,7 +527,7 @@ def _activate_or_not(
spec = self.spec
args = []
if activation_value == "prefix":
if activation_value == 'prefix':
activation_value = lambda x: spec[x].prefix
variant = variant or name
@@ -695,41 +548,45 @@ def _activate_or_not(
# BoolValuedVariant carry information about a single option.
# Nonetheless, for uniformity of treatment we'll package them
# in an iterable of one element.
condition = "+{name}".format(name=variant)
condition = '+{name}'.format(name=variant)
options = [(name, condition in spec)]
else:
condition = "{variant}={value}"
condition = '{variant}={value}'
# "feature_values" is used to track values which correspond to
# features which can be enabled or disabled as understood by the
# package's build system. It excludes values which have special
# meanings and do not correspond to features (e.g. "none")
feature_values = (
getattr(variant_desc.values, "feature_values", None) or variant_desc.values
)
feature_values = getattr(
variant_desc.values, 'feature_values', None
) or variant_desc.values
options = [
(value, condition.format(variant=variant, value=value) in spec)
(value,
condition.format(variant=variant,
value=value) in spec)
for value in feature_values
]
# For each allowed value in the list of values
for option_value, activated in options:
# Search for an override in the package for this value
override_name = "{0}_or_{1}_{2}".format(
override_name = '{0}_or_{1}_{2}'.format(
activation_word, deactivation_word, option_value
)
line_generator = getattr(self, override_name, None)
# If not available use a sensible default
if line_generator is None:
def _default_generator(is_activated):
if is_activated:
line = "--{0}-{1}".format(activation_word, option_value)
if activation_value is not None and activation_value(option_value):
line += "={0}".format(activation_value(option_value))
line = '--{0}-{1}'.format(
activation_word, option_value
)
if activation_value is not None and activation_value(option_value): # NOQA=ignore=E501
line += '={0}'.format(
activation_value(option_value)
)
return line
return "--{0}-{1}".format(deactivation_word, option_value)
return '--{0}-{1}'.format(deactivation_word, option_value)
line_generator = _default_generator
args.append(line_generator(activated))
return args
@@ -760,7 +617,8 @@ def with_or_without(self, name, activation_value=None, variant=None):
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, "with", "without", activation_value, variant)
return self._activate_or_not(name, 'with', 'without', activation_value,
variant)
def enable_or_disable(self, name, activation_value=None, variant=None):
"""Same as
@@ -779,21 +637,23 @@ def enable_or_disable(self, name, activation_value=None, variant=None):
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, "enable", "disable", activation_value, variant)
return self._activate_or_not(
name, 'enable', 'disable', activation_value, variant
)
run_after("install")(PackageBase._run_default_install_time_test_callbacks)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("installcheck")
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)
@run_after("install")
@run_after('install')
def remove_libtool_archives(self):
"""Remove all .la files in prefix sub-folders if the package sets
``install_libtool_archives`` to be False.
@@ -803,40 +663,11 @@ def remove_libtool_archives(self):
return
# Remove the files and create a log of what was removed
libtool_files = fs.find(str(self.prefix), "*.la", recursive=True)
libtool_files = fs.find(str(self.prefix), '*.la', recursive=True)
with fs.safe_remove(*libtool_files):
fs.mkdirp(os.path.dirname(self._removed_la_files_log))
with open(self._removed_la_files_log, mode="w") as f:
f.write("\n".join(libtool_files))
with open(self._removed_la_files_log, mode='w') as f:
f.write('\n'.join(libtool_files))
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
run_after("install")(PackageBase.apply_macos_rpath_fixups)
def _autoreconf_search_path_args(spec):
dirs_seen = set()
flags_spack, flags_external = [], []
# We don't want to add an include flag for automake's default search path.
for automake in spec.dependencies(name="automake", deptype="build"):
try:
s = os.stat(automake.prefix.share.aclocal)
if stat.S_ISDIR(s.st_mode):
dirs_seen.add((s.st_ino, s.st_dev))
except OSError:
pass
for dep in spec.dependencies(deptype="build"):
path = dep.prefix.share.aclocal
# Skip non-existing aclocal paths
try:
s = os.stat(path)
except OSError:
continue
# Skip things seen before, as well as non-dirs.
if (s.st_ino, s.st_dev) in dirs_seen or not stat.S_ISDIR(s.st_mode):
continue
dirs_seen.add((s.st_ino, s.st_dev))
flags = flags_external if dep.external else flags_spack
flags.extend(["-I", path])
return flags_spack + flags_external
run_after('install')(PackageBase.apply_macos_rpath_fixups)

View File

@@ -8,7 +8,7 @@
from llnl.util.filesystem import install, mkdirp
from spack.build_systems.cmake import CMakePackage
from spack.package_base import run_after
from spack.package import run_after
def cmake_cache_path(name, value, comment=""):
@@ -36,7 +36,7 @@ class CachedCMakePackage(CMakePackage):
sidestep certain parsing bugs in extremely long ``cmake`` commands, and to
avoid system limits on the length of the command line."""
phases = ["initconfig", "cmake", "build", "install"]
phases = ['initconfig', 'cmake', 'build', 'install']
@property
def cache_name(self):
@@ -52,7 +52,7 @@ def cache_path(self):
return os.path.join(self.stage.source_path, self.cache_name)
def flag_handler(self, name, flags):
if name in ("cflags", "cxxflags", "cppflags", "fflags"):
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
return (None, None, None) # handled in the cmake cache
return (flags, None, None)
@@ -64,8 +64,10 @@ def initconfig_compiler_entries(self):
# Fortran compiler is optional
if "FC" in os.environ:
spack_fc_entry = cmake_cache_path("CMAKE_Fortran_COMPILER", os.environ["FC"])
system_fc_entry = cmake_cache_path("CMAKE_Fortran_COMPILER", self.compiler.fc)
spack_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", os.environ['FC'])
system_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", self.compiler.fc)
else:
spack_fc_entry = "# No Fortran compiler defined in spec"
system_fc_entry = "# No Fortran compiler defined in spec"
@@ -76,59 +78,42 @@ def initconfig_compiler_entries(self):
"#------------------{0}".format("-" * 60),
"# Compiler Spec: {0}".format(spec.compiler),
"#------------------{0}".format("-" * 60),
"if(DEFINED ENV{SPACK_CC})\n",
" " + cmake_cache_path("CMAKE_C_COMPILER", os.environ["CC"]),
" " + cmake_cache_path("CMAKE_CXX_COMPILER", os.environ["CXX"]),
" " + spack_fc_entry,
"else()\n",
" " + cmake_cache_path("CMAKE_C_COMPILER", self.compiler.cc),
" " + cmake_cache_path("CMAKE_CXX_COMPILER", self.compiler.cxx),
" " + system_fc_entry,
"endif()\n",
'if(DEFINED ENV{SPACK_CC})\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", os.environ['CC']),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", os.environ['CXX']),
' ' + spack_fc_entry,
'else()\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", self.compiler.cc),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", self.compiler.cxx),
' ' + system_fc_entry,
'endif()\n'
]
flags = spec.compiler_flags
# use global spack compiler flags
cppflags = " ".join(flags["cppflags"])
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with " " with no flags defined
cppflags += " "
cflags = cppflags + " ".join(flags["cflags"])
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
entries.append(cmake_cache_string("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + " ".join(flags["cxxflags"])
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS", cxxflags))
fflags = " ".join(flags["fflags"])
fflags = ' '.join(spec.compiler_flags['fflags'])
if fflags:
entries.append(cmake_cache_string("CMAKE_Fortran_FLAGS", fflags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags["ldflags"]:
ld_flags = " ".join(flags["ldflags"])
ld_format_string = "CMAKE_{0}_LINKER_FLAGS"
# CMake has separate linker arguments for types of builds.
for ld_type in ["EXE", "MODULE", "SHARED", "STATIC"]:
ld_string = ld_format_string.format(ld_type)
entries.append(cmake_cache_string(ld_string, ld_flags))
# CMake has libs options separated by language. Apply ours to each.
if flags["ldlibs"]:
libs_flags = " ".join(flags["ldlibs"])
libs_format_string = "CMAKE_{0}_STANDARD_LIBRARIES"
langs = ["C", "CXX", "Fortran"]
for lang in langs:
libs_string = libs_format_string.format(lang)
entries.append(cmake_cache_string(libs_string, libs_flags))
return entries
def initconfig_mpi_entries(self):
spec = self.spec
if not spec.satisfies("^mpi"):
if not spec.satisfies('^mpi'):
return []
entries = [
@@ -137,27 +122,32 @@ def initconfig_mpi_entries(self):
"#------------------{0}\n".format("-" * 60),
]
entries.append(cmake_cache_path("MPI_C_COMPILER", spec["mpi"].mpicc))
entries.append(cmake_cache_path("MPI_CXX_COMPILER", spec["mpi"].mpicxx))
entries.append(cmake_cache_path("MPI_Fortran_COMPILER", spec["mpi"].mpifc))
entries.append(cmake_cache_path("MPI_C_COMPILER",
spec['mpi'].mpicc))
entries.append(cmake_cache_path("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
entries.append(cmake_cache_path("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
# Check for slurm
using_slurm = False
slurm_checks = ["+slurm", "schedulers=slurm", "process_managers=slurm"]
if any(spec["mpi"].satisfies(variant) for variant in slurm_checks):
slurm_checks = ['+slurm',
'schedulers=slurm',
'process_managers=slurm']
if any(spec['mpi'].satisfies(variant) for variant in slurm_checks):
using_slurm = True
# Determine MPIEXEC
if using_slurm:
if spec["mpi"].external:
if spec['mpi'].external:
# Heuristic until we have dependents on externals
mpiexec = "/usr/bin/srun"
mpiexec = '/usr/bin/srun'
else:
mpiexec = os.path.join(spec["slurm"].prefix.bin, "srun")
mpiexec = os.path.join(spec['slurm'].prefix.bin, 'srun')
else:
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpirun')
if not os.path.exists(mpiexec):
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpiexec")
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpiexec')
if not os.path.exists(mpiexec):
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.name
@@ -166,8 +156,9 @@ def initconfig_mpi_entries(self):
else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies("@3.10:"):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
if self.spec["cmake"].satisfies('@3.10:'):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE",
mpiexec))
else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
@@ -188,22 +179,24 @@ def initconfig_hardware_entries(self):
"#------------------{0}\n".format("-" * 60),
]
if spec.satisfies("^cuda"):
if spec.satisfies('^cuda'):
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Cuda")
entries.append("#------------------{0}\n".format("-" * 30))
cudatoolkitdir = spec["cuda"].prefix
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
cudatoolkitdir = spec['cuda'].prefix
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR",
cudatoolkitdir))
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER", cudacompiler))
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER",
cudacompiler))
if spec.satisfies("^mpi"):
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${MPI_CXX_COMPILER}"))
if spec.satisfies('^mpi'):
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${MPI_CXX_COMPILER}"))
else:
entries.append(
cmake_cache_path("CMAKE_CUDA_HOST_COMPILER", "${CMAKE_CXX_COMPILER}")
)
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${CMAKE_CXX_COMPILER}"))
return entries
@@ -212,35 +205,30 @@ def std_initconfig_entries(self):
"#------------------{0}".format("-" * 60),
"# !!!! This is a generated file, edit at own risk !!!!",
"#------------------{0}".format("-" * 60),
"# CMake executable path: {0}".format(self.spec["cmake"].command.path),
"# CMake executable path: {0}".format(
self.spec['cmake'].command.path),
"#------------------{0}\n".format("-" * 60),
]
def initconfig_package_entries(self):
"""This method is to be overwritten by the package"""
return []
def initconfig(self, spec, prefix):
cache_entries = (
self.std_initconfig_entries()
+ self.initconfig_compiler_entries()
+ self.initconfig_mpi_entries()
+ self.initconfig_hardware_entries()
+ self.initconfig_package_entries()
)
cache_entries = (self.std_initconfig_entries() +
self.initconfig_compiler_entries() +
self.initconfig_mpi_entries() +
self.initconfig_hardware_entries() +
self.initconfig_package_entries())
with open(self.cache_name, "w") as f:
with open(self.cache_name, 'w') as f:
for entry in cache_entries:
f.write("%s\n" % entry)
f.write("\n")
f.write('%s\n' % entry)
f.write('\n')
@property
def std_cmake_args(self):
args = super(CachedCMakePackage, self).std_cmake_args
args.extend(["-C", self.cache_path])
args.extend(['-C', self.cache_path])
return args
@run_after("install")
@run_after('install')
def install_cmake_cache(self):
mkdirp(self.spec.prefix.share.cmake)
install(self.cache_path, self.spec.prefix.share.cmake)

View File

@@ -18,11 +18,12 @@
import spack.build_environment
from spack.directives import conflicts, depends_on, variant
from spack.package_base import InstallError, PackageBase, run_after
from spack.package import InstallError, PackageBase, run_after
from spack.util.path import convert_to_posix_path
# Regex to extract the primary generator from the CMake generator
# string.
_primary_generator_extractor = re.compile(r"(?:.* - )?(.*)")
_primary_generator_extractor = re.compile(r'(?:.* - )?(.*)')
def _extract_primary_generator(generator):
@@ -73,17 +74,16 @@ class CMakePackage(PackageBase):
if the generator string does not follow the prescribed format, or if
the primary generator is not supported.
"""
#: Phases of a CMake package
phases = ["cmake", "build", "install"]
phases = ['cmake', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "CMakePackage"
build_system_class = 'CMakePackage'
build_targets = [] # type: List[str]
install_targets = ["install"]
install_targets = ['install']
build_time_test_callbacks = ["check"]
build_time_test_callbacks = ['check']
#: The build system generator to use.
#:
@@ -94,31 +94,32 @@ class CMakePackage(PackageBase):
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = "Unix Makefiles"
# generator = "Unix Makefiles"
generator = "Ninja"
depends_on('ninja', type='build')
if sys.platform == "win32":
if sys.platform == 'win32':
generator = "Ninja"
depends_on("ninja")
depends_on('ninja', type='build')
# https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
variant(
"build_type",
default="RelWithDebInfo",
description="CMake build type",
values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"),
)
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
# https://cmake.org/cmake/help/latest/variable/CMAKE_INTERPROCEDURAL_OPTIMIZATION.html
variant("ipo", default=False, description="CMake interprocedural optimization")
variant('ipo', default=False,
description='CMake interprocedural optimization')
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
conflicts("+ipo", when="^cmake@:3.8", msg="+ipo is not supported by CMake < 3.9")
conflicts('+ipo', when='^cmake@:3.8',
msg='+ipo is not supported by CMake < 3.9')
depends_on("cmake", type="build")
depends_on('cmake', type='build')
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
return [os.path.join(self.build_directory, "CMakeCache.txt")]
return [os.path.join(self.build_directory, 'CMakeCache.txt')]
@property
def root_cmakelists_dir(self):
@@ -140,7 +141,7 @@ def std_cmake_args(self):
"""
# standard CMake arguments
std_cmake_args = CMakePackage._std_args(self)
std_cmake_args += getattr(self, "cmake_flag_args", [])
std_cmake_args += getattr(self, 'cmake_flag_args', [])
return std_cmake_args
@staticmethod
@@ -153,56 +154,53 @@ def _std_args(pkg):
generator = CMakePackage.generator
# Make sure a valid generator was chosen
valid_primary_generators = ["Unix Makefiles", "Ninja"]
valid_primary_generators = ['Unix Makefiles', 'Ninja']
primary_generator = _extract_primary_generator(generator)
if primary_generator not in valid_primary_generators:
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg += "CMakePackage currently supports the following "
msg += "primary generators: '{0}'".format("', '".join(valid_primary_generators))
msg += "primary generators: '{0}'".\
format("', '".join(valid_primary_generators))
raise InstallError(msg)
try:
build_type = pkg.spec.variants["build_type"].value
build_type = pkg.spec.variants['build_type'].value
except KeyError:
build_type = "RelWithDebInfo"
build_type = 'RelWithDebInfo'
try:
ipo = pkg.spec.variants["ipo"].value
ipo = pkg.spec.variants['ipo'].value
except KeyError:
ipo = False
define = CMakePackage.define
args = [
"-G",
generator,
define("CMAKE_INSTALL_PREFIX", pkg.prefix),
define("CMAKE_BUILD_TYPE", build_type),
define("BUILD_TESTING", pkg.run_tests),
'-G', generator,
define('CMAKE_INSTALL_PREFIX', convert_to_posix_path(pkg.prefix)),
define('CMAKE_BUILD_TYPE', build_type),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
if pkg.spec.satisfies("^cmake@3.9:"):
args.append(define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
if pkg.spec.satisfies('^cmake@3.9:'):
args.append(define('CMAKE_INTERPROCEDURAL_OPTIMIZATION', ipo))
if primary_generator == "Unix Makefiles":
args.append(define("CMAKE_VERBOSE_MAKEFILE", True))
if primary_generator == 'Unix Makefiles':
args.append(define('CMAKE_VERBOSE_MAKEFILE', True))
if platform.mac_ver()[0]:
args.extend(
[
define("CMAKE_FIND_FRAMEWORK", "LAST"),
define("CMAKE_FIND_APPBUNDLE", "LAST"),
]
)
args.extend([
define('CMAKE_FIND_FRAMEWORK', "LAST"),
define('CMAKE_FIND_APPBUNDLE', "LAST"),
])
# Set up CMake rpath
args.extend(
[
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
define("CMAKE_INSTALL_RPATH", spack.build_environment.get_rpaths(pkg)),
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
]
)
args.extend([
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', True),
define('CMAKE_INSTALL_RPATH',
spack.build_environment.get_rpaths(pkg)),
define('CMAKE_PREFIX_PATH',
spack.build_environment.get_cmake_prefix_path(pkg))
])
return args
@staticmethod
@@ -233,10 +231,10 @@ def define(cmake_var, value):
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if isinstance(value, bool):
kind = "BOOL"
kind = 'BOOL'
value = "ON" if value else "OFF"
else:
kind = "STRING"
kind = 'STRING'
if isinstance(value, Sequence) and not isinstance(value, six.string_types):
value = ";".join(str(v) for v in value)
else:
@@ -292,10 +290,11 @@ def define_from_variant(self, cmake_var, variant=None):
variant = cmake_var.lower()
if variant not in self.variants:
raise KeyError('"{0}" is not a variant of "{1}"'.format(variant, self.name))
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
if variant not in self.spec.variants:
return ""
return ''
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
@@ -310,34 +309,37 @@ def flags_to_build_system_args(self, flags):
so cppflags will be added to cflags, cxxflags, and fflags to mimic the
behavior in other tools."""
# Has to be dynamic attribute due to caching
setattr(self, "cmake_flag_args", [])
setattr(self, 'cmake_flag_args', [])
flag_string = "-DCMAKE_{0}_FLAGS={1}"
langs = {"C": "c", "CXX": "cxx", "Fortran": "f"}
flag_string = '-DCMAKE_{0}_FLAGS={1}'
langs = {'C': 'c', 'CXX': 'cxx', 'Fortran': 'f'}
# Handle language compiler flags
for lang, pre in langs.items():
flag = pre + "flags"
flag = pre + 'flags'
# cmake has no explicit cppflags support -> add it to all langs
lang_flags = " ".join(flags.get(flag, []) + flags.get("cppflags", []))
lang_flags = ' '.join(flags.get(flag, []) + flags.get('cppflags',
[]))
if lang_flags:
self.cmake_flag_args.append(flag_string.format(lang, lang_flags))
self.cmake_flag_args.append(flag_string.format(lang,
lang_flags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags["ldflags"]:
ldflags = " ".join(flags["ldflags"])
ld_string = "-DCMAKE_{0}_LINKER_FLAGS={1}"
if flags['ldflags']:
ldflags = ' '.join(flags['ldflags'])
ld_string = '-DCMAKE_{0}_LINKER_FLAGS={1}'
# cmake has separate linker arguments for types of builds.
for type in ["EXE", "MODULE", "SHARED", "STATIC"]:
for type in ['EXE', 'MODULE', 'SHARED', 'STATIC']:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags["ldlibs"]:
libs_flags = " ".join(flags["ldlibs"])
libs_string = "-DCMAKE_{0}_STANDARD_LIBRARIES={1}"
if flags['ldlibs']:
libs_flags = ' '.join(flags['ldlibs'])
libs_string = '-DCMAKE_{0}_STANDARD_LIBRARIES={1}'
for lang in langs:
self.cmake_flag_args.append(libs_string.format(lang, libs_flags))
self.cmake_flag_args.append(libs_string.format(lang,
libs_flags))
@property
def build_dirname(self):
@@ -345,7 +347,7 @@ def build_dirname(self):
:return: name of the subdirectory for building the package
"""
return "spack-build-%s" % self.spec.dag_hash(7)
return 'spack-build-%s' % self.spec.dag_hash(7)
@property
def build_directory(self):
@@ -361,7 +363,6 @@ def cmake_args(self):
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
* BUILD_TESTING
which will be set automatically.
@@ -375,38 +376,43 @@ def cmake(self, spec, prefix):
options += self.cmake_args()
options.append(os.path.abspath(self.root_cmakelists_dir))
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).cmake(*options)
if self.spec.satisfies('%emscripten'):
inspect.getmodule(self).emcmake(*options)
else:
inspect.getmodule(self).cmake(*options)
def build(self, spec, prefix):
"""Make the build targets"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.build_targets)
elif self.generator == "Ninja":
elif self.generator == 'Ninja':
self.build_targets.append("-v")
inspect.getmodule(self).ninja(*self.build_targets)
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.install_targets)
elif self.generator == "Ninja":
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.install_targets)
run_after("build")(PackageBase._run_default_build_time_test_callbacks)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the CMake-generated Makefile for the target ``test``
and runs it if found.
"""
with working_dir(self.build_directory):
if self.generator == "Unix Makefiles":
self._if_make_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL")
self._if_make_target_execute("check")
elif self.generator == "Ninja":
self._if_ninja_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL")
self._if_ninja_target_execute("check")
if self.generator == 'Unix Makefiles':
self._if_make_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_make_target_execute('check')
elif self.generator == 'Ninja':
self._if_ninja_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_ninja_target_execute('check')
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)

View File

@@ -6,7 +6,7 @@
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.multimethod import when
from spack.package_base import PackageBase
from spack.package import PackageBase
class CudaPackage(PackageBase):
@@ -20,93 +20,65 @@ class CudaPackage(PackageBase):
# https://developer.nvidia.com/cuda-gpus
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
cuda_arch_values = (
"10",
"11",
"12",
"13",
"20",
"21",
"30",
"32",
"35",
"37",
"50",
"52",
"53",
"60",
"61",
"62",
"70",
"72",
"75",
"80",
"86",
"87",
"89",
"90",
'10', '11', '12', '13',
'20', '21',
'30', '32', '35', '37',
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
'80', '86'
)
# FIXME: keep cuda and cuda_arch separate to make usage easier until
# Spack has depends_on(cuda, when='cuda_arch!=None') or alike
variant("cuda", default=False, description="Build with CUDA")
variant('cuda', default=False,
description='Build with CUDA')
variant(
"cuda_arch",
description="CUDA architecture",
values=spack.variant.any_combination_of(*cuda_arch_values),
sticky=True,
when="+cuda",
)
variant('cuda_arch',
description='CUDA architecture',
values=spack.variant.any_combination_of(*cuda_arch_values),
when='+cuda')
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples
# https://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code
@staticmethod
def cuda_flags(arch_list):
return [
(
"--generate-code arch=compute_{0},code=sm_{0} "
"--generate-code arch=compute_{0},code=compute_{0}"
).format(s)
for s in arch_list
]
return [('--generate-code arch=compute_{0},code=sm_{0} '
'--generate-code arch=compute_{0},code=compute_{0}').format(s)
for s in arch_list]
depends_on("cuda", when="+cuda")
depends_on('cuda', when='+cuda')
# CUDA version vs Architecture
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
depends_on("cuda@:6.0", when="cuda_arch=10")
depends_on("cuda@:6.5", when="cuda_arch=11")
depends_on("cuda@2.1:6.5", when="cuda_arch=12")
depends_on("cuda@2.1:6.5", when="cuda_arch=13")
depends_on('cuda@:6.0', when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
depends_on('cuda@2.1:6.5', when='cuda_arch=13')
depends_on("cuda@3.0:8.0", when="cuda_arch=20")
depends_on("cuda@3.2:8.0", when="cuda_arch=21")
depends_on('cuda@3.0:8.0', when='cuda_arch=20')
depends_on('cuda@3.2:8.0', when='cuda_arch=21')
depends_on("cuda@5.0:10.2", when="cuda_arch=30")
depends_on("cuda@5.0:10.2", when="cuda_arch=32")
depends_on("cuda@5.0:", when="cuda_arch=35")
depends_on("cuda@6.5:", when="cuda_arch=37")
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
depends_on('cuda@5.0:', when='cuda_arch=35')
depends_on('cuda@6.5:', when='cuda_arch=37')
depends_on("cuda@6.0:", when="cuda_arch=50")
depends_on("cuda@6.5:", when="cuda_arch=52")
depends_on("cuda@6.5:", when="cuda_arch=53")
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
depends_on('cuda@6.5:', when='cuda_arch=53')
depends_on("cuda@8.0:", when="cuda_arch=60")
depends_on("cuda@8.0:", when="cuda_arch=61")
depends_on("cuda@8.0:", when="cuda_arch=62")
depends_on('cuda@8.0:', when='cuda_arch=60')
depends_on('cuda@8.0:', when='cuda_arch=61')
depends_on('cuda@8.0:', when='cuda_arch=62')
depends_on("cuda@9.0:", when="cuda_arch=70")
depends_on("cuda@9.0:", when="cuda_arch=72")
depends_on("cuda@10.0:", when="cuda_arch=75")
depends_on('cuda@9.0:', when='cuda_arch=70')
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
depends_on("cuda@11.0:", when="cuda_arch=80")
depends_on("cuda@11.1:", when="cuda_arch=86")
depends_on("cuda@11.4:", when="cuda_arch=87")
depends_on("cuda@11.8:", when="cuda_arch=89")
depends_on("cuda@11.8:", when="cuda_arch=90")
depends_on('cuda@11.0:', when='cuda_arch=80')
depends_on('cuda@11.1:', when='cuda_arch=86')
# From the NVIDIA install guide we know of conflicts for particular
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
@@ -117,15 +89,15 @@ def cuda_flags(arch_list):
# Linux x86_64 compiler conflicts from here:
# https://gist.github.com/ax3l/9489132
with when("^cuda~allow-unsupported-compilers"):
with when('^cuda~allow-unsupported-compilers'):
# GCC
# According to
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
# these conflicts are valid independently from the architecture
# minimum supported versions
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
# maximum supported version
# NOTE:
@@ -133,40 +105,39 @@ def cuda_flags(arch_list):
# it has been decided to use an upper bound for the latest version.
# This implies that the last one in the list has to be updated at
# each release of a new cuda minor version.
conflicts("%gcc@10:", when="+cuda ^cuda@:11.0")
conflicts("%gcc@11:", when="+cuda ^cuda@:11.4.0")
conflicts("%gcc@11.2:", when="+cuda ^cuda@:11.5")
conflicts("%gcc@12:", when="+cuda ^cuda@:11.8")
conflicts("%clang@12:", when="+cuda ^cuda@:11.4.0")
conflicts("%clang@13:", when="+cuda ^cuda@:11.5")
conflicts("%clang@14:", when="+cuda ^cuda@:11.7")
conflicts("%clang@15:", when="+cuda ^cuda@:11.8")
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4.0')
conflicts('%gcc@12:', when='+cuda ^cuda@:11.6')
conflicts('%clang@12:', when='+cuda ^cuda@:11.4.0')
conflicts('%clang@13:', when='+cuda ^cuda@:11.5')
conflicts('%clang@14:', when='+cuda ^cuda@:11.6')
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
conflicts("%gcc@10", when="+cuda ^cuda@:11.4.0")
conflicts("%gcc@5:", when="+cuda ^cuda@:7.5 target=x86_64:")
conflicts("%gcc@6:", when="+cuda ^cuda@:8 target=x86_64:")
conflicts("%gcc@7:", when="+cuda ^cuda@:9.1 target=x86_64:")
conflicts("%gcc@8:", when="+cuda ^cuda@:10.0.130 target=x86_64:")
conflicts("%gcc@9:", when="+cuda ^cuda@:10.2.89 target=x86_64:")
conflicts("%pgi@:14.8", when="+cuda ^cuda@:7.0.27 target=x86_64:")
conflicts("%pgi@:15.3,15.5:", when="+cuda ^cuda@7.5 target=x86_64:")
conflicts("%pgi@:16.2,16.0:16.3", when="+cuda ^cuda@8 target=x86_64:")
conflicts("%pgi@:15,18:", when="+cuda ^cuda@9.0:9.1 target=x86_64:")
conflicts("%pgi@:16,19:", when="+cuda ^cuda@9.2.88:10 target=x86_64:")
conflicts("%pgi@:17,20:", when="+cuda ^cuda@10.1.105:10.2.89 target=x86_64:")
conflicts("%pgi@:17,21:", when="+cuda ^cuda@11.0.2:11.1.0 target=x86_64:")
conflicts("%clang@:3.4", when="+cuda ^cuda@:7.5 target=x86_64:")
conflicts("%clang@:3.7,4:", when="+cuda ^cuda@8.0:9.0 target=x86_64:")
conflicts("%clang@:3.7,4.1:", when="+cuda ^cuda@9.1 target=x86_64:")
conflicts("%clang@:3.7,5.1:", when="+cuda ^cuda@9.2 target=x86_64:")
conflicts("%clang@:3.7,6.1:", when="+cuda ^cuda@10.0.130 target=x86_64:")
conflicts("%clang@:3.7,7.1:", when="+cuda ^cuda@10.1.105 target=x86_64:")
conflicts("%clang@:3.7,8.1:", when="+cuda ^cuda@10.1.105:10.1.243 target=x86_64:")
conflicts("%clang@:3.2,9:", when="+cuda ^cuda@10.2.89 target=x86_64:")
conflicts("%clang@:5", when="+cuda ^cuda@11.0.2: target=x86_64:")
conflicts("%clang@10:", when="+cuda ^cuda@:11.0.3 target=x86_64:")
conflicts("%clang@11:", when="+cuda ^cuda@:11.1.0 target=x86_64:")
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
conflicts('%clang@:3.7,8.1:',
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
# x86_64 vs. ppc64le differ according to NVidia docs
# Linux ppc64le compiler conflicts from Table from the docs below:
@@ -177,43 +148,43 @@ def cuda_flags(arch_list):
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
# information prior to CUDA 9 difficult to find
conflicts("%gcc@6:", when="+cuda ^cuda@:9 target=ppc64le:")
conflicts("%gcc@8:", when="+cuda ^cuda@:10.0.130 target=ppc64le:")
conflicts("%gcc@9:", when="+cuda ^cuda@:10.1.243 target=ppc64le:")
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
conflicts("%pgi", when="+cuda ^cuda@:8 target=ppc64le:")
conflicts("%pgi@:16", when="+cuda ^cuda@:9.1.185 target=ppc64le:")
conflicts("%pgi@:17", when="+cuda ^cuda@:10 target=ppc64le:")
conflicts("%clang@4:", when="+cuda ^cuda@:9.0.176 target=ppc64le:")
conflicts("%clang@5:", when="+cuda ^cuda@:9.1 target=ppc64le:")
conflicts("%clang@6:", when="+cuda ^cuda@:9.2 target=ppc64le:")
conflicts("%clang@7:", when="+cuda ^cuda@10.0.130 target=ppc64le:")
conflicts("%clang@7.1:", when="+cuda ^cuda@:10.1.105 target=ppc64le:")
conflicts("%clang@8.1:", when="+cuda ^cuda@:10.2.89 target=ppc64le:")
conflicts("%clang@:5", when="+cuda ^cuda@11.0.2: target=ppc64le:")
conflicts("%clang@10:", when="+cuda ^cuda@:11.0.2 target=ppc64le:")
conflicts("%clang@11:", when="+cuda ^cuda@:11.1.0 target=ppc64le:")
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
# Intel is mostly relevant for x86_64 Linux, even though it also
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
conflicts("%intel@:11.0", when="+cuda ^cuda@:3.1")
conflicts("%intel@:12.0", when="+cuda ^cuda@5.5:")
conflicts("%intel@:13.0", when="+cuda ^cuda@6.0:")
conflicts("%intel@:13.2", when="+cuda ^cuda@6.5:")
conflicts("%intel@:14.9", when="+cuda ^cuda@7:")
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
# Intel 15.x is compatible with CUDA 7 thru current CUDA
conflicts("%intel@16.0:", when="+cuda ^cuda@:8.0.43")
conflicts("%intel@17.0:", when="+cuda ^cuda@:8.0.60")
conflicts("%intel@18.0:", when="+cuda ^cuda@:9.9")
conflicts("%intel@19.0:", when="+cuda ^cuda@:10.0")
conflicts("%intel@19.1:", when="+cuda ^cuda@:10.1")
conflicts("%intel@19.2:", when="+cuda ^cuda@:11.1.0")
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
# XL is mostly relevant for ppc64le Linux
conflicts("%xl@:12,14:", when="+cuda ^cuda@:9.1")
conflicts("%xl@:12,14:15,17:", when="+cuda ^cuda@9.2")
conflicts("%xl@:12,17:", when="+cuda ^cuda@:11.1.0")
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
# Darwin.
# TODO: add missing conflicts for %apple-clang cuda@:10
conflicts("platform=darwin", when="+cuda ^cuda@11.0.2: ")
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')

View File

@@ -3,25 +3,22 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import Optional
import spack.package_base
import spack.package
import spack.util.url
class GNUMirrorPackage(spack.package_base.PackageBase):
class GNUMirrorPackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for GNU packages."""
#: Path of the package in a GNU mirror
gnu_mirror_path = None # type: Optional[str]
gnu_mirror_path = None
#: List of GNU mirrors used by Spack
base_mirrors = [
"https://ftpmirror.gnu.org/",
"https://ftp.gnu.org/gnu/",
'https://ftpmirror.gnu.org/',
'https://ftp.gnu.org/gnu/',
# Fall back to http if https didn't work (for instance because
# Spack is bootstrapping curl)
"http://ftpmirror.gnu.org/",
'http://ftpmirror.gnu.org/'
]
@property
@@ -35,5 +32,6 @@ def urls(self):
def _ensure_gnu_mirror_path_is_set_or_raise(self):
if self.gnu_mirror_path is None:
cls_name = type(self).__name__
msg = "{0} must define a `gnu_mirror_path` attribute" " [none defined]"
msg = ('{0} must define a `gnu_mirror_path` attribute'
' [none defined]')
raise AttributeError(msg.format(cls_name))

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from llnl.util.filesystem import find
from spack.directives import depends_on, extends
from spack.multimethod import when
from spack.package_base import PackageBase
from spack.util.executable import Executable
class LuaPackage(PackageBase):
"""Specialized class for lua packages"""
phases = ["unpack", "generate_luarocks_config", "preprocess", "install"]
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "LuaPackage"
list_depth = 1 # LuaRocks requires at least one level of spidering to find versions
depends_on("lua-lang")
extends("lua", when="^lua")
with when("^lua-luajit"):
extends("lua-luajit")
depends_on("luajit")
depends_on("lua-luajit+lualinks")
with when("^lua-luajit-openresty"):
extends("lua-luajit-openresty")
depends_on("luajit")
depends_on("lua-luajit-openresty+lualinks")
def unpack(self, spec, prefix):
if os.path.splitext(self.stage.archive_file)[1] == ".rock":
directory = self.luarocks("unpack", self.stage.archive_file, output=str)
dirlines = directory.split("\n")
# TODO: figure out how to scope this better
os.chdir(dirlines[2])
def _generate_tree_line(self, name, prefix):
return """{{ name = "{name}", root = "{prefix}" }};""".format(
name=name,
prefix=prefix,
)
def _luarocks_config_path(self):
return os.path.join(self.stage.source_path, "spack_luarocks.lua")
def generate_luarocks_config(self, spec, prefix):
spec = self.spec
table_entries = []
for d in spec.traverse(deptypes=("build", "run"), deptype_query="run"):
if d.package.extends(self.extendee_spec):
table_entries.append(self._generate_tree_line(d.name, d.prefix))
path = self._luarocks_config_path()
with open(path, "w") as config:
config.write(
"""
deps_mode="all"
rocks_trees={{
{}
}}
""".format(
"\n".join(table_entries)
)
)
return path
def setup_build_environment(self, env):
env.set("LUAROCKS_CONFIG", self._luarocks_config_path())
def preprocess(self, spec, prefix):
"""Override this to preprocess source before building with luarocks"""
pass
@property
def lua(self):
return Executable(self.spec["lua-lang"].prefix.bin.lua)
@property
def luarocks(self):
lr = Executable(self.spec["lua-lang"].prefix.bin.luarocks)
return lr
def luarocks_args(self):
return []
def install(self, spec, prefix):
rock = "."
specs = find(".", "*.rockspec", recursive=False)
if specs:
rock = specs[0]
rocks_args = self.luarocks_args()
rocks_args.append(rock)
self.luarocks("--tree=" + prefix, "make", *rocks_args)

View File

@@ -11,7 +11,7 @@
from llnl.util.filesystem import working_dir
from spack.directives import conflicts
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class MakefilePackage(PackageBase):
@@ -43,26 +43,25 @@ class MakefilePackage(PackageBase):
| | Makefile is located|
+-----------------------------------------------+--------------------+
"""
#: Phases of a package that is built with an hand-written Makefile
phases = ["edit", "build", "install"]
phases = ['edit', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "MakefilePackage"
build_system_class = 'MakefilePackage'
#: Targets for ``make`` during the :py:meth:`~.MakefilePackage.build`
#: phase
build_targets = [] # type: List[str]
#: Targets for ``make`` during the :py:meth:`~.MakefilePackage.install`
#: phase
install_targets = ["install"]
install_targets = ['install']
conflicts("platform=windows")
conflicts('platform=windows')
#: Callback names for build-time test
build_time_test_callbacks = ["check"]
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ["installcheck"]
install_time_test_callbacks = ['installcheck']
@property
def build_directory(self):
@@ -76,43 +75,49 @@ def edit(self, spec, prefix):
"""Edits the Makefile before calling make. This phase cannot
be defaulted.
"""
tty.msg("Using default implementation: skipping edit phase.")
tty.msg('Using default implementation: skipping edit phase.')
def build(self, spec, prefix):
"""Calls make, passing :py:attr:`~.MakefilePackage.build_targets`
as targets.
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.build_targets)
if self.spec.satisfies('%emscripten'):
inspect.getmodule(self).emmake(*self.build_targets)
else:
inspect.getmodule(self).make(*self.build_targets)
def install(self, spec, prefix):
"""Calls make, passing :py:attr:`~.MakefilePackage.install_targets`
as targets.
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
if self.spec.satisfies('%emscripten'):
inspect.getmodule(self).emmake(*self.install_targets)
else:
inspect.getmodule(self).make(*self.install_targets)
run_after("build")(PackageBase._run_default_build_time_test_callbacks)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("test")
self._if_make_target_execute("check")
self._if_make_target_execute('test')
self._if_make_target_execute('check')
run_after("install")(PackageBase._run_default_install_time_test_callbacks)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute("installcheck")
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
run_after("install")(PackageBase.apply_macos_rpath_fixups)
run_after('install')(PackageBase.apply_macos_rpath_fixups)

View File

@@ -7,7 +7,7 @@
from llnl.util.filesystem import install_tree, working_dir
from spack.directives import depends_on
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
from spack.util.executable import which
@@ -21,16 +21,15 @@ class MavenPackage(PackageBase):
* build
* install
"""
# Default phases
phases = ["build", "install"]
phases = ['build', 'install']
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = "MavenPackage"
build_system_class = 'MavenPackage'
depends_on("java", type=("build", "run"))
depends_on("maven", type="build")
depends_on('java', type=('build', 'run'))
depends_on('maven', type='build')
@property
def build_directory(self):
@@ -45,17 +44,17 @@ def build(self, spec, prefix):
"""Compile code and package into a JAR file."""
with working_dir(self.build_directory):
mvn = which("mvn")
mvn = which('mvn')
if self.run_tests:
mvn("verify", *self.build_args())
mvn('verify', *self.build_args())
else:
mvn("package", "-DskipTests", *self.build_args())
mvn('package', '-DskipTests', *self.build_args())
def install(self, spec, prefix):
"""Copy to installation prefix."""
with working_dir(self.build_directory):
install_tree(".", prefix)
install_tree('.', prefix)
# Check that self.prefix is there after installation
run_after("install")(PackageBase.sanity_check_prefix)
run_after('install')(PackageBase.sanity_check_prefix)

Some files were not shown because too many files have changed in this diff Show More