Compare commits

..

2 Commits

Author SHA1 Message Date
Gregory Becker
d82b537339 more thread safety 2022-07-11 14:00:20 -04:00
Gregory Becker
3d8d1fe924 update binary index before threading 2022-07-11 13:19:45 -04:00
7836 changed files with 180099 additions and 236332 deletions

43
.flake8
View File

@@ -1,25 +1,43 @@
# -*- conf -*- # -*- conf -*-
# flake8 settings for Spack. # flake8 settings for Spack core files.
# #
# These exceptions are for Spack core files. We're slightly more lenient # These exceptions are for Spack core files. We're slightly more lenient
# with packages. See .flake8_packages for that. # with packages. See .flake8_packages for that.
# #
# This is the only flake8 rule Spack violates somewhat flagrantly # E1: Indentation
# - E129: visually indented line with same indent as next logical line
#
# E2: Whitespace
# - E221: multiple spaces before operator
# - E241: multiple spaces after ','
# - E272: multiple spaces before keyword
#
# E7: Statement
# - E731: do not assign a lambda expression, use a def # - E731: do not assign a lambda expression, use a def
# #
# This is the only flake8 exception needed when using Black. # W5: Line break warning
# - E203: white space around slice operators can be required, ignore : warn # - W503: line break before binary operator
# - W504: line break after binary operator
# #
# We still allow these in packages (Would like to get rid of them or rely on mypy # These are required to get the package.py files to test clean:
# in the future) # - F999: syntax error in doctest
# - F403: from/import * used; unable to detect undefined names #
# N8: PEP8-naming
# - N801: class names should use CapWords convention
# - N813: camelcase imported as lowercase
# - N814: camelcase imported as constant
#
# F4: pyflakes import checks, these are now checked by mypy more precisely
# - F403: from module import *
# - F405: undefined name or from * # - F405: undefined name or from *
# - F821: undefined name (needed with from/import *) #
# Black ignores, these are incompatible with black style and do not follow PEP-8
# - E203: white space around slice operators can be required, ignore : warn
# - W503: see above, already ignored for line-breaks
# #
[flake8] [flake8]
#ignore = E129,,W503,W504,F999,N801,N813,N814,F403,F405,E203 ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814,F403,F405
extend-ignore = E731,E203 max-line-length = 88
max-line-length = 99
# F4: Import # F4: Import
# - F405: `name` may be undefined, or undefined from star imports: `module` # - F405: `name` may be undefined, or undefined from star imports: `module`
@@ -28,8 +46,7 @@ max-line-length = 99
# - F821: undefined name `name` # - F821: undefined name `name`
# #
per-file-ignores = per-file-ignores =
var/spack/repos/*/package.py:F403,F405,F821 var/spack/repos/*/package.py:F405,F821
*-ci-package.py:F403,F405,F821
# exclude things we usually do not want linting for. # exclude things we usually do not want linting for.
# These still get linted when passed explicitly, as when spack flake8 passes # These still get linted when passed explicitly, as when spack flake8 passes

View File

@@ -1,3 +0,0 @@
# .git-blame-ignore-revs
# Formatted entire codebase with black
f52f6e99dbf1131886a80112b8c79dfc414afb7c

View File

@@ -1,62 +0,0 @@
name: "\U0001F4A5 Tests error"
description: Some package in Spack had stand-alone tests that didn't pass
title: "Testing issue: "
labels: [test-error]
body:
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce the failure(s) or link(s) to test output(s)
description: |
Fill in the test output from the exact spec that is having stand-alone test failures. Links to test outputs (e.g., CDash) can also be provided.
value: |
```console
$ spack spec -I <spec>
...
```
- type: textarea
id: error
attributes:
label: Error message
description: |
Please post the error message from spack inside the `<details>` tag below:
value: |
<details><summary>Error message</summary><pre>
...
</pre></details>
validations:
required: true
- type: textarea
id: information
attributes:
label: Information on your system or the test runner
description: Please include the output of `spack debug report` for your system.
validations:
required: true
- type: markdown
attributes:
value: |
If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well.
- type: textarea
id: additional_information
attributes:
label: Additional information
description: |
Please upload test logs or any additional information about the problem.
- type: markdown
attributes:
value: |
Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and **@mention** them here if they exist.
- type: checkboxes
id: checks
attributes:
label: General information
options:
- label: I have reported the version of Spack/Python/Platform/Runner
required: true
- label: I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers
required: true
- label: I have uploaded any available logs
required: true
- label: I have searched the issues of this repo and believe this is not a duplicate
required: true

View File

@@ -1,44 +0,0 @@
name: audit
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
python_version:
required: true
type: string
concurrency:
group: audit-${{inputs.python_version}}-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]
- name: Package audits (with coverage)
if: ${{ inputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,7 +0,0 @@
#!/bin/bash
set -ex
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store
exit 0

View File

@@ -3,19 +3,33 @@ name: Bootstrapping
on: on:
# This Workflow can be triggered manually # This Workflow can be triggered manually
workflow_dispatch: workflow_dispatch:
workflow_call: pull_request:
branches:
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the
# built-in repository or documentation
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
- '!var/spack/repos/builtin/packages/clingo/**'
- '!var/spack/repos/builtin/packages/python/**'
- '!var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/docs/**'
schedule: schedule:
# nightly at 2:16 AM # nightly at 2:16 AM
- cron: '16 2 * * *' - cron: '16 2 * * *'
concurrency: concurrency:
group: bootstrap-${{github.ref}}-${{github.event.pull_request.number || github.run_number}} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
fedora-clingo-sources: fedora-clingo-sources:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "fedora:latest" container: "fedora:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -24,9 +38,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static cmake bison bison-devel libstdc++-static
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
run: | run: |
# See [1] below # See [1] below
@@ -37,13 +49,13 @@ jobs:
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack external find cmake bison spack external find cmake bison
spack -d solve zlib spack -d solve zlib
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
@@ -51,6 +63,7 @@ jobs:
ubuntu-clingo-sources: ubuntu-clingo-sources:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "ubuntu:latest" container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
env: env:
@@ -62,9 +75,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
run: | run: |
# See [1] below # See [1] below
@@ -75,13 +86,13 @@ jobs:
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack external find cmake bison spack external find cmake bison
spack -d solve zlib spack -d solve zlib
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
@@ -89,6 +100,7 @@ jobs:
ubuntu-clingo-binaries-and-patchelf: ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "ubuntu:latest" container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
env: env:
@@ -99,9 +111,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \ bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
run: | run: |
# See [1] below # See [1] below
@@ -112,6 +122,7 @@ jobs:
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
@@ -123,6 +134,7 @@ jobs:
opensuse-clingo-sources: opensuse-clingo-sources:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "opensuse/leap:latest" container: "opensuse/leap:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -133,38 +145,36 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup repo - name: Setup repo
run: | run: |
# See [1] below # See [1] below
git config --global --add safe.directory /__w/spack/spack git config --global --add safe.directory /__w/spack/spack
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack external find cmake bison spack external find cmake bison
spack -d solve zlib spack -d solve zlib
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
macos-clingo-sources: macos-clingo-sources:
runs-on: macos-latest runs-on: macos-latest
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
brew install cmake bison@2.7 tree brew install cmake bison@2.7 tree
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack external find --not-buildable cmake bison spack external find --not-buildable cmake bison
spack -d solve zlib spack -d solve zlib
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
@@ -173,70 +183,53 @@ jobs:
runs-on: ${{ matrix.macos-version }} runs-on: ${{ matrix.macos-version }}
strategy: strategy:
matrix: matrix:
macos-version: ['macos-11', 'macos-12'] python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
macos-version: ['macos-10.15', 'macos-11', 'macos-12']
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
brew install tree brew install tree
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
set -ex source share/spack/setup-env.sh
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do spack bootstrap untrust spack-install
not_found=1 spack -d solve zlib
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)" tree ~/.spack/bootstrap/store/
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
ubuntu-clingo-binaries: ubuntu-clingo-binaries:
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
fetch-depth: 0 python-version: ${{ matrix.python-version }}
- name: Setup repo - name: Setup repo
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
set -ex source share/spack/setup-env.sh
for ver in '2.7' '3.6' '3.7' '3.8' '3.9' '3.10' ; do spack bootstrap untrust spack-install
not_found=1 spack -d solve zlib
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)" tree ~/.spack/bootstrap/store/
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
if (($not_found)) ; then
echo Required python version $ver not found in runner!
exit 1
fi
done
ubuntu-gnupg-binaries: ubuntu-gnupg-binaries:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "ubuntu:latest" container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
env: env:
@@ -247,9 +240,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \ bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
run: | run: |
# See [1] below # See [1] below
@@ -260,18 +251,20 @@ jobs:
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable spack-install spack bootstrap untrust spack-install
spack -d gpg list spack -d gpg list
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
ubuntu-gnupg-sources: ubuntu-gnupg-sources:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: "ubuntu:latest" container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
env: env:
@@ -283,9 +276,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
gawk gawk
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
run: | run: |
# See [1] below # See [1] below
@@ -296,19 +287,20 @@ jobs:
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
git --version git --version
git fetch --unshallow
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0} shell: runuser -u spack-test -- bash {0}
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack solve zlib spack solve zlib
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack -d gpg list spack -d gpg list
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
macos-gnupg-binaries: macos-gnupg-binaries:
runs-on: macos-latest runs-on: macos-latest
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -316,16 +308,17 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable spack-install spack bootstrap untrust spack-install
spack -d gpg list spack -d gpg list
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
macos-gnupg-sources: macos-gnupg-sources:
runs-on: macos-latest runs-on: macos-latest
if: github.repository == 'spack/spack'
steps: steps:
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -333,13 +326,12 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack solve zlib spack solve zlib
spack bootstrap disable github-actions-v0.4 spack bootstrap untrust github-actions-v0.2
spack bootstrap disable github-actions-v0.3
spack -d gpg list spack -d gpg list
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/

View File

@@ -13,14 +13,14 @@ on:
paths: paths:
- '.github/workflows/build-containers.yml' - '.github/workflows/build-containers.yml'
- 'share/spack/docker/*' - 'share/spack/docker/*'
- 'share/spack/templates/container/*' - 'share/templates/container/*'
- 'lib/spack/spack/container/*' - 'lib/spack/spack/container/*'
# Let's also build & tag Spack containers on releases. # Let's also build & tag Spack containers on releases.
release: release:
types: [published] types: [published]
concurrency: concurrency:
group: build_containers-${{github.ref}}-${{github.event.pull_request.number || github.run_number}} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@@ -50,7 +50,7 @@ jobs:
if: github.repository == 'spack/spack' if: github.repository == 'spack/spack'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- name: Set Container Tag Normal (Nightly) - name: Set Container Tag Normal (Nightly)
run: | run: |
@@ -80,19 +80,19 @@ jobs:
fi fi
- name: Upload Dockerfile - name: Upload Dockerfile
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with: with:
name: dockerfiles name: dockerfiles
path: dockerfiles path: dockerfiles
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # @v1 uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # @v1 uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # @v1
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1 uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
@@ -100,13 +100,13 @@ jobs:
- name: Log in to DockerHub - name: Log in to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1 uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }} - name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # @v2 uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # @v2
with: with:
context: dockerfiles/${{ matrix.dockerfile[0] }} context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }} platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -1,92 +0,0 @@
name: ci
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: ci-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
with:
with_coverage: ${{ needs.changes.outputs.core }}
audit-ancient-python:
uses: ./.github/workflows/audit.yaml
needs: [ changes ]
with:
with_coverage: ${{ needs.changes.outputs.core }}
python_version: 2.7
all-prechecks:
needs: [ prechecks ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
bootstrap: ${{ steps.filter.outputs.bootstrap }}
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
# Don't run if we only modified packages in the
# built-in repository or documentation
filters: |
bootstrap:
- 'var/spack/repos/builtin/packages/clingo-bootstrap/**'
- 'var/spack/repos/builtin/packages/clingo/**'
- 'var/spack/repos/builtin/packages/python/**'
- 'var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/**'
- 'share/spack/**'
- '.github/workflows/bootstrap.yml'
- '.github/workflows/ci.yaml'
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
bootstrap:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml
unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
all:
needs: [ windows, unit-tests, bootstrap, audit-ancient-python ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"

71
.github/workflows/macos_python.yml vendored Normal file
View File

@@ -0,0 +1,71 @@
# These are nightly package tests for macOS
# focus areas:
# - initial user experience
# - scientific python stack
name: macOS builds nightly
on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
pull_request:
branches:
- develop
paths:
# Run if we modify this yaml file
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
jobs:
install_gcc:
name: gcc with clang
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
# 9.2.0 is the latest version on which we apply homebrew patch
spack install -v --fail-fast gcc@11.2.0 %apple-clang
install_jupyter_clang:
name: jupyter
if: github.repository == 'spack/spack'
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-jupyterlab %apple-clang
install_scipy_clang:
name: scipy, mpl, pd
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-scipy %apple-clang
spack install -v --fail-fast py-matplotlib %apple-clang
spack install -v --fail-fast py-pandas %apple-clang

View File

@@ -6,10 +6,6 @@ git config --global user.email "spack@example.com"
git config --global user.name "Test User" git config --global user.name "Test User"
git config --global core.longpaths true git config --global core.longpaths true
# See https://github.com/git/git/security/advisories/GHSA-3wp6-j8xr-qw85 (CVE-2022-39253)
# This is needed to let some fixture in our unit-test suite run
git config --global protocol.file.allow always
if ($(git branch --show-current) -ne "develop") if ($(git branch --show-current) -ne "develop")
{ {
git branch develop origin/develop git branch develop origin/develop

View File

@@ -2,10 +2,6 @@
git config --global user.email "spack@example.com" git config --global user.email "spack@example.com"
git config --global user.name "Test User" git config --global user.name "Test User"
# See https://github.com/git/git/security/advisories/GHSA-3wp6-j8xr-qw85 (CVE-2022-39253)
# This is needed to let some fixture in our unit-test suite run
git config --global protocol.file.allow always
# create a local pr base branch # create a local pr base branch
if [[ -n $GITHUB_BASE_REF ]]; then if [[ -n $GITHUB_BASE_REF ]]; then
git fetch origin "${GITHUB_BASE_REF}:${GITHUB_BASE_REF}" git fetch origin "${GITHUB_BASE_REF}:${GITHUB_BASE_REF}"

View File

@@ -1,49 +1,120 @@
name: unit tests name: linux tests
on: on:
workflow_dispatch: push:
workflow_call: branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency: concurrency:
group: unit_tests-${{github.ref}}-${{github.event.pull_request.number || github.run_number}} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: '3.10'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.5- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools types-six
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@b2feaf19c27470162a626bd6fa8438ae5b263721
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
filters: |
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
- id: coverage
# Run the subsequent jobs with coverage if core has been modified,
# regardless of whether this is a pull request or a push to a branch
run: |
echo Core changes: ${{ steps.filter.outputs.core }}
echo Event name: ${{ github.event_name }}
if [ "${{ steps.filter.outputs.core }}" == "true" ]
then
echo "::set-output name=with_coverage::true"
else
echo "::set-output name=with_coverage::false"
fi
# Run unit tests with different configurations on linux # Run unit tests with different configurations on linux
ubuntu: unittests:
runs-on: ubuntu-20.04 needs: [ validate, style, changes ]
runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10', '3.11'] python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
concretizer: ['clingo'] concretizer: ['clingo']
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
include: include:
- python-version: 2.7 - python-version: 2.7
concretizer: original concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }} - python-version: 3.6
- python-version: '3.11' concretizer: original
- python-version: 3.9
concretizer: original concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
exclude:
- python-version: '3.7'
concretizer: 'clingo'
on_develop: false
- python-version: '3.8'
concretizer: 'clingo'
on_develop: false
- python-version: '3.9'
concretizer: 'clingo'
on_develop: false
- python-version: '3.10'
concretizer: 'clingo'
on_develop: false
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install System packages - name: Install System packages
@@ -55,21 +126,16 @@ jobs:
patchelf cmake bison libbison-dev kcov patchelf cmake bison libbison-dev kcov
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip six setuptools pytest codecov[toml] pytest-xdist pip install --upgrade pip six setuptools pytest codecov "coverage[toml]<=6.2"
# Install pytest-cov only on recent Python, to avoid stalling on Python 2.7 due
# to bugs on an unmaintained version of the package when used with xdist.
if [[ ${{ matrix.python-version }} != "2.7" ]]; then
pip install --upgrade pytest-cov
fi
# ensure style checks are not skipped in unit tests for python >= 3.6 # ensure style checks are not skipped in unit tests for python >= 3.6
# note that true/false (i.e., 1/0) are opposite in conditions in python and bash # note that true/false (i.e., 1/0) are opposite in conditions in python and bash
if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click==8.0.4" "black<=21.12b0" pip install --upgrade flake8 isort>=4.3.5 mypy>=0.900 black
fi fi
- name: Pin pathlib for Python 2.7 - name: Pin pathlib for Python 2.7
if: ${{ matrix.python-version == 2.7 }} if: ${{ matrix.python-version == 2.7 }}
run: | run: |
pip install -U pathlib2==2.3.6 toml pip install -U pathlib2==2.3.6
- name: Setup git configuration - name: Setup git configuration
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
@@ -81,30 +147,41 @@ jobs:
SPACK_PYTHON: python SPACK_PYTHON: python
run: | run: |
. share/spack/setup-env.sh . share/spack/setup-env.sh
spack bootstrap disable spack-install spack bootstrap untrust spack-install
spack -v solve zlib spack -v solve zlib
- name: Run unit tests - name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env: env:
SPACK_PYTHON: python SPACK_PYTHON: python
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
SPACK_TEST_PARALLEL: 2
COVERAGE: true COVERAGE: true
UNIT_TEST_COVERAGE: ${{ (matrix.python-version == '3.11') }} SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 coverage combine
coverage xml
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
SPACK_PYTHON: python
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with: with:
flags: unittests,linux,${{ matrix.concretizer }} flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration # Test shell integration
shell: shell:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with: with:
python-version: '3.11' python-version: '3.10'
- name: Install System packages - name: Install System packages
run: | run: |
sudo apt-get -y update sudo apt-get -y update
@@ -112,25 +189,33 @@ jobs:
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml] pytest-xdist pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Setup git configuration - name: Setup git configuration
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
git --version git --version
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Run shell tests - name: Run shell tests (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
share/spack/qa/run-shell-tests
- name: Run shell tests (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env: env:
COVERAGE: true COVERAGE: true
run: | run: |
share/spack/qa/run-shell-tests share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 - uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with: with:
flags: shelltests,linux flags: shelltests,linux
# Test RHEL8 UBI with platform Python. This job is run # Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack # only on PRs modifying core Spack
rhel8-platform-python: rhel8-platform-python:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
container: registry.access.redhat.com/ubi8/ubi container: registry.access.redhat.com/ubi8/ubi
steps: steps:
- name: Install dependencies - name: Install dependencies
@@ -138,7 +223,7 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version
@@ -154,14 +239,15 @@ jobs:
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
# Test for the clingo based solver (using clingo-cffi) # Test for the clingo based solver (using clingo-cffi)
clingo-cffi: clingo-cffi:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with: with:
python-version: '3.11' python-version: '3.10'
- name: Install System packages - name: Install System packages
run: | run: |
sudo apt-get -y update sudo apt-get -y update
@@ -171,53 +257,105 @@ jobs:
patchelf kcov patchelf kcov
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml] pytest-cov clingo pytest-xdist pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2 clingo
- name: Setup git configuration - name: Setup git configuration
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
git --version git --version
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
- name: Run unit tests (full suite with coverage) - name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env: env:
COVERAGE: true COVERAGE: true
SPACK_TEST_SOLVER: clingo SPACK_TEST_SOLVER: clingo
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0 coverage combine
coverage xml
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with: with:
flags: unittests,linux,clingo flags: unittests,linux,clingo
# Run unit tests on MacOS # Run unit tests on MacOS
macos: build:
needs: [ validate, style, changes ]
runs-on: macos-latest runs-on: macos-latest
strategy: strategy:
matrix: matrix:
python-version: ["3.10"] python-version: [3.8]
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip six setuptools pip install --upgrade pip six setuptools
pip install --upgrade pytest codecov coverage[toml] pytest-xdist pytest-cov pip install --upgrade pytest codecov coverage[toml]==6.2
- name: Setup Homebrew packages - name: Setup Homebrew packages
run: | run: |
brew install dash fish gcc gnupg2 kcov brew install dash fish gcc gnupg2 kcov
- name: Run unit tests - name: Run unit tests
env: env:
SPACK_TEST_SOLVER: clingo SPACK_TEST_SOLVER: clingo
SPACK_TEST_PARALLEL: 4
run: | run: |
git --version git --version
. .github/workflows/setup_git.sh . .github/workflows/setup_git.sh
. share/spack/setup-env.sh . share/spack/setup-env.sh
$(which spack) bootstrap disable spack-install $(which spack) bootstrap untrust spack-install
$(which spack) solve zlib $(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x) if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
$(which spack) unit-test --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}" then
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 coverage run $(which spack) unit-test -x
coverage combine
coverage xml
# Delete the symlink going from ./lib/spack/docs/_spack_root back to
# the initial directory, since it causes ELOOP errors with codecov/actions@2
rm lib/spack/docs/_spack_root
else
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with: with:
files: ./coverage.xml
flags: unittests,macos flags: unittests,macos
# Run audits on all the packages in the built-in repository
package-audits:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Package audits (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,60 +0,0 @@
name: style
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
concurrency:
group: style-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python packages
run: |
python3 -m pip install --upgrade pip six setuptools types-six click==8.0.2 'black==21.12b0' mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11'

View File

@@ -1,10 +1,17 @@
name: windows name: windows tests
on: on:
workflow_call: push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency: concurrency:
group: windows-${{github.ref}}-${{github.event.pull_request.number || github.run_number}} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true cancel-in-progress: true
defaults: defaults:
@@ -12,64 +19,91 @@ defaults:
shell: shell:
powershell Invoke-Expression -Command ".\share\spack\qa\windows_test_setup.ps1"; {0} powershell Invoke-Expression -Command ".\share\spack\qa\windows_test_setup.ps1"; {0}
jobs: jobs:
unit-tests: validate:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with:
python-version: 3.9
- name: Install Python Packages
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.5-' -v spack/lib/spack/spack/ spack/lib/spack/llnl/ spack/bin/
- name: vermin (Repositories)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.5-' -v spack/var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
run: | run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov pytest-cov clingo python -m pip install --upgrade pip six setuptools flake8 isort>=4.3.5 mypy>=0.800 black pywin32 types-python-dateutil
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Run style tests
run: |
spack style
- name: Verify license headers
run: |
python spack\bin\spack license verify
unittest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop - name: Create local develop
run: | run: |
.\spack\.github\workflows\setup_git.ps1 .\spack\.github\workflows\setup_git.ps1
- name: Unit Test - name: Unit Test
run: | run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack spack unit-test --verbose --ignore=lib/spack/spack/test/cmd
dir unittest-cmd:
spack unit-test -x --verbose --cov --cov-config=pyproject.toml --ignore=lib/spack/spack/test/cmd needs: [ validate, style ]
coverage combine -a
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
unit-tests-cmd:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
run: | run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage pytest-cov clingo python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop - name: Create local develop
run: | run: |
.\spack\.github\workflows\setup_git.ps1 .\spack\.github\workflows\setup_git.ps1
- name: Command Unit Test - name: Command Unit Test
run: | run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack spack unit-test lib/spack/spack/test/cmd --verbose
spack unit-test -x --verbose --cov --cov-config=pyproject.toml lib/spack/spack/test/cmd buildtest:
coverage combine -a needs: [ validate, style ]
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
build-abseil:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
@@ -81,8 +115,9 @@ jobs:
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
spack external find cmake spack external find cmake
spack external find ninja spack external find ninja
spack -d install abseil-cpp spack install abseil-cpp
make-installer: generate-installer-test:
needs: [ validate, style ]
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Disable Windows Symlinks - name: Disable Windows Symlinks
@@ -90,15 +125,15 @@ jobs:
git config --global core.symlinks false git config --global core.symlinks false
shell: shell:
powershell powershell
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
run: | run: |
python -m pip install --upgrade pip six pywin32 setuptools python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Add Light and Candle to Path - name: Add Light and Candle to Path
run: | run: |
$env:WIX >> $GITHUB_PATH $env:WIX >> $GITHUB_PATH
@@ -109,27 +144,27 @@ jobs:
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env: env:
ProgressPreference: SilentlyContinue ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with: with:
name: Windows Spack Installer Bundle name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with: with:
name: Windows Spack Installer name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi path: ${{ env.installer_root}}\pkg\Spack.msi
execute-installer: execute-installer:
needs: make-installer needs: generate-installer-test
runs-on: windows-latest runs-on: windows-latest
defaults: defaults:
run: run:
shell: pwsh shell: pwsh
steps: steps:
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 - uses: actions/setup-python@d09bd5e6005b175076f227b13d9730d56e9dcfcb
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
run: | run: |
python -m pip install --upgrade pip six pywin32 setuptools python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Setup installer directory - name: Setup installer directory
run: | run: |
mkdir -p spack_installer mkdir -p spack_installer
@@ -155,4 +190,4 @@ jobs:
$proc = Start-Process ${{ env.spack_installer }}\spack.msi "/quiet" -Passthru $proc = Start-Process ${{ env.spack_installer }}\spack.msi "/quiet" -Passthru
$handle = $proc.Handle # cache proc.Handle $handle = $proc.Handle # cache proc.Handle
$proc.WaitForExit(); $proc.WaitForExit();
$LASTEXITCODE $LASTEXITCODE

View File

@@ -1,335 +1,3 @@
# v0.19.2 (2023-04-04)
### Spack Bugfixes
* Ignore global variant requirement for packages that do not define it (#35037)
* Compiler wrapper: improved parsing of linker arguments (#35929, #35912)
* Do not detect apple-clang as cce on macOS (#35974)
* Views: fix support for optional Python extensions (#35489)
* Views: fix issue where Python executable gets symlinked instead of copied (#34661)
* Fix a bug where tests were not added when concretizing together (#35290)
* Compiler flags: fix clang/apple-clang c/c++ standard flags (#35062)
* Increase db timeout from 3s to 60s to improve stability of parallel installs (#35517)
* Buildcache: improve error handling in downloads (#35568)
* Module files for packages installed from buildcache have long placeholder paths abbreviated in configure args section (#36611)
* Reduce verbosity of error messages regarding non-existing module files (#35502)
* Ensure file with build environment variables is truncated when writing to it (#35673)
* `spack config update` now works on active environments (#36542)
* Fix an issue where spack.yaml got reformatted incorrectly (#36698)
* Packages UPC++ and GASNet-EX were updated (#36629)
# v0.19.1 (2023-02-07)
### Spack Bugfixes
* `buildcache create`: make "file exists" less verbose (#35019)
* `spack mirror create`: don't change paths to urls (#34992)
* Improve error message for requirements (#33988)
* uninstall: fix accidental cubic complexity (#34005)
* scons: fix signature for `install_args` (#34481)
* Fix `combine_phase_logs` text encoding issues (#34657)
* Use a module-like object to propagate changes in the MRO, when setting build env (#34059)
* PackageBase should not define builder legacy attributes (#33942)
* Forward lookup of the "run_tests" attribute (#34531)
* Bugfix for timers (#33917, #33900)
* Fix path handling in prefix inspections (#35318)
* Fix libtool filter for Fujitsu compilers (#34916)
* Bug fix for duplicate rpath errors on macOS when creating build caches (#34375)
* FileCache: delete the new cache file on exception (#34623)
* Propagate exceptions from Spack python console (#34547)
* Tests: Fix a bug/typo in a `config_values.py` fixture (#33886)
* Various CI fixes (#33953, #34560, #34560, #34828)
* Docs: remove monitors and analyzers, typos (#34358, #33926)
* bump release version for tutorial command (#33859)
# v0.19.0 (2022-11-11)
`v0.19.0` is a major feature release.
## Major features in this release
1. **Package requirements**
Spack's traditional [package preferences](
https://spack.readthedocs.io/en/latest/build_settings.html#package-preferences)
are soft, but we've added hard requriements to `packages.yaml` and `spack.yaml`
(#32528, #32369). Package requirements use the same syntax as specs:
```yaml
packages:
libfabric:
require: "@1.13.2"
mpich:
require:
- one_of: ["+cuda", "+rocm"]
```
More details in [the docs](
https://spack.readthedocs.io/en/latest/build_settings.html#package-requirements).
2. **Environment UI Improvements**
* Fewer surprising modifications to `spack.yaml` (#33711):
* `spack install` in an environment will no longer add to the `specs:` list; you'll
need to either use `spack add <spec>` or `spack install --add <spec>`.
* Similarly, `spack uninstall` will not remove from your environment's `specs:`
list; you'll need to use `spack remove` or `spack uninstall --remove`.
This will make it easier to manage an environment, as there is clear separation
between the stack to be installed (`spack.yaml`/`spack.lock`) and which parts of
it should be installed (`spack install` / `spack uninstall`).
* `concretizer:unify:true` is now the default mode for new environments (#31787)
We see more users creating `unify:true` environments now. Users who need
`unify:false` can add it to their environment to get the old behavior. This will
concretize every spec in the environment independently.
* Include environment configuration from URLs (#29026, [docs](
https://spack.readthedocs.io/en/latest/environments.html#included-configurations))
You can now include configuration in your environment directly from a URL:
```yaml
spack:
include:
- https://github.com/path/to/raw/config/compilers.yaml
```
4. **Multiple Build Systems**
An increasing number of packages in the ecosystem need the ability to support
multiple build systems (#30738, [docs](
https://spack.readthedocs.io/en/latest/packaging_guide.html#multiple-build-systems)),
either across versions, across platforms, or within the same version of the software.
This has been hard to support through multiple inheritance, as methods from different
build system superclasses would conflict. `package.py` files can now define separate
builder classes with installation logic for different build systems, e.g.:
```python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system(
conditional("cmake", when="@0.64:"),
conditional("autotools", when="@:0.63"),
default="cmake",
)
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
pass
class Autotoolsbuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
pass
```
5. **Compiler and variant propagation**
Currently, compiler flags and variants are inconsistent: compiler flags set for a
package are inherited by its dependencies, while variants are not. We should have
these be consistent by allowing for inheritance to be enabled or disabled for both
variants and compiler flags.
Example syntax:
- `package ++variant`:
enabled variant that will be propagated to dependencies
- `package +variant`:
enabled variant that will NOT be propagated to dependencies
- `package ~~variant`:
disabled variant that will be propagated to dependencies
- `package ~variant`:
disabled variant that will NOT be propagated to dependencies
- `package cflags==-g`:
`cflags` will be propagated to dependencies
- `package cflags=-g`:
`cflags` will NOT be propagated to dependencies
Syntax for non-boolan variants is similar to compiler flags. More in the docs for
[variants](
https://spack.readthedocs.io/en/latest/basic_usage.html#variants) and [compiler flags](
https://spack.readthedocs.io/en/latest/basic_usage.html#compiler-flags).
6. **Enhancements to git version specifiers**
* `v0.18.0` added the ability to use git commits as versions. You can now use the
`git.` prefix to specify git tags or branches as versions. All of these are valid git
versions in `v0.19` (#31200):
```console
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # raw commit
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234 # commit with git prefix
foo@git.develop # the develop branch
foo@git.0.19 # use the 0.19 tag
```
* `v0.19` also gives you more control over how Spack interprets git versions, in case
Spack cannot detect the version from the git repository. You can suffix a git
version with `=<version>` to force Spack to concretize it as a particular version
(#30998, #31914, #32257):
```console
# use mybranch, but treat it as version 3.2 for version comparison
foo@git.mybranch=3.2
# use the given commit, but treat it as develop for version comparison
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop
```
More in [the docs](
https://spack.readthedocs.io/en/latest/basic_usage.html#version-specifier)
7. **Changes to Cray EX Support**
Cray machines have historically had their own "platform" within Spack, because we
needed to go through the module system to leverage compilers and MPI installations on
these machines. The Cray EX programming environment now provides standalone `craycc`
executables and proper `mpicc` wrappers, so Spack can treat EX machines like Linux
with extra packages (#29392).
We expect this to greatly reduce bugs, as external packages and compilers can now be
used by prefix instead of through modules. We will also no longer be subject to
reproducibility issues when modules change from Cray PE release to release and from
site to site. This also simplifies dealing with the underlying Linux OS on cray
systems, as Spack will properly model the machine's OS as either SuSE or RHEL.
8. **Improvements to tests and testing in CI**
* `spack ci generate --tests` will generate a `.gitlab-ci.yml` file that not only does
builds but also runs tests for built packages (#27877). Public GitHub pipelines now
also run tests in CI.
* `spack test run --explicit` will only run tests for packages that are explicitly
installed, instead of all packages.
9. **Experimental binding link model**
You can add a new option to `config.yaml` to make Spack embed absolute paths to
needed shared libraries in ELF executables and shared libraries on Linux (#31948, [docs](
https://spack.readthedocs.io/en/latest/config_yaml.html#shared-linking-bind)):
```yaml
config:
shared_linking:
type: rpath
bind: true
```
This can improve launch time at scale for parallel applications, and it can make
installations less susceptible to environment variables like `LD_LIBRARY_PATH`, even
especially when dealing with external libraries that use `RUNPATH`. You can think of
this as a faster, even higher-precedence version of `RPATH`.
## Other new features of note
* `spack spec` prints dependencies more legibly. Dependencies in the output now appear
at the *earliest* level of indentation possible (#33406)
* You can override `package.py` attributes like `url`, directly in `packages.yaml`
(#33275, [docs](
https://spack.readthedocs.io/en/latest/build_settings.html#assigning-package-attributes))
* There are a number of new architecture-related format strings you can use in Spack
configuration files to specify paths (#29810, [docs](
https://spack.readthedocs.io/en/latest/configuration.html#config-file-variables))
* Spack now supports bootstrapping Clingo on Windows (#33400)
* There is now support for an `RPATH`-like library model on Windows (#31930)
## Performance Improvements
* Major performance improvements for installation from binary caches (#27610, #33628,
#33636, #33608, #33590, #33496)
* Test suite can now be parallelized using `xdist` (used in GitHub Actions) (#32361)
* Reduce lock contention for parallel builds in environments (#31643)
## New binary caches and stacks
* We now build nearly all of E4S with `oneapi` in our buildcache (#31781, #31804,
#31804, #31803, #31840, #31991, #32117, #32107, #32239)
* Added 3 new machine learning-centric stacks to binary cache: `x86_64_v3`, CUDA, ROCm
(#31592, #33463)
## Removals and Deprecations
* Support for Python 3.5 is dropped (#31908). Only Python 2.7 and 3.6+ are officially
supported.
* This is the last Spack release that will support Python 2 (#32615). Spack `v0.19`
will emit a deprecation warning if you run it with Python 2, and Python 2 support will
soon be removed from the `develop` branch.
* `LD_LIBRARY_PATH` is no longer set by default by `spack load` or module loads.
Setting `LD_LIBRARY_PATH` in Spack environments/modules can cause binaries from
outside of Spack to crash, and Spack's own builds use `RPATH` and do not need
`LD_LIBRARY_PATH` set in order to run. If you still want the old behavior, you
can run these commands to configure Spack to set `LD_LIBRARY_PATH`:
```console
spack config add modules:prefix_inspections:lib64:[LD_LIBRARY_PATH]
spack config add modules:prefix_inspections:lib:[LD_LIBRARY_PATH]
```
* The `spack:concretization:[together|separately]` has been removed after being
deprecated in `v0.18`. Use `concretizer:unify:[true|false]`.
* `config:module_roots` is no longer supported after being deprecated in `v0.18`. Use
configuration in module sets instead (#28659, [docs](
https://spack.readthedocs.io/en/latest/module_file_support.html)).
* `spack activate` and `spack deactivate` are no longer supported, having been
deprecated in `v0.18`. Use an environment with a view instead of
activating/deactivating ([docs](
https://spack.readthedocs.io/en/latest/environments.html#configuration-in-spack-yaml)).
* The old YAML format for buildcaches is now deprecated (#33707). If you are using an
old buildcache with YAML metadata you will need to regenerate it with JSON metadata.
* `spack bootstrap trust` and `spack bootstrap untrust` are deprecated in favor of
`spack bootstrap enable` and `spack bootstrap disable` and will be removed in `v0.20`.
(#33600)
* The `graviton2` architecture has been renamed to `neoverse_n1`, and `graviton3`
is now `neoverse_v1`. Buildcaches using the old architecture names will need to be rebuilt.
* The terms `blacklist` and `whitelist` have been replaced with `include` and `exclude`
in all configuration files (#31569). You can use `spack config update` to
automatically fix your configuration files.
## Notable Bugfixes
* Permission setting on installation now handles effective uid properly (#19980)
* `buildable:true` for an MPI implementation now overrides `buildable:false` for `mpi` (#18269)
* Improved error messages when attempting to use an unconfigured compiler (#32084)
* Do not punish explicitly requested compiler mismatches in the solver (#30074)
* `spack stage`: add missing --fresh and --reuse (#31626)
* Fixes for adding build system executables like `cmake` to package scope (#31739)
* Bugfix for binary relocation with aliased strings produced by newer `binutils` (#32253)
## Spack community stats
* 6,751 total packages, 335 new since `v0.18.0`
* 141 new Python packages
* 89 new R packages
* 303 people contributed to this release
* 287 committers to packages
* 57 committers to core
# v0.18.1 (2022-07-19)
### Spack Bugfixes
* Fix several bugs related to bootstrapping (#30834,#31042,#31180)
* Fix a regression that was causing spec hashes to differ between
Python 2 and Python 3 (#31092)
* Fixed compiler flags for oneAPI and DPC++ (#30856)
* Fixed several issues related to concretization (#31142,#31153,#31170,#31226)
* Improved support for Cray manifest file and `spack external find` (#31144,#31201,#31173,#31186)
* Assign a version to openSUSE Tumbleweed according to the GLIBC version
in the system (#19895)
* Improved Dockerfile generation for `spack containerize` (#29741,#31321)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
### Package updates
* WarpX: add v22.06, fixed libs property (#30866,#31102)
* openPMD: add v0.14.5, update recipe for @develop (#29484,#31023)
# v0.18.0 (2022-05-28) # v0.18.0 (2022-05-28)
`v0.18.0` is a major feature release. `v0.18.0` is a major feature release.
@@ -531,15 +199,6 @@
* 337 committers to packages * 337 committers to packages
* 85 committers to core * 85 committers to core
# v0.17.3 (2022-07-14)
### Spack bugfixes
* Fix missing chgrp on symlinks in package installations (#30743)
* Allow having non-existing upstreams (#30744, #30746)
* Fix `spack stage` with custom paths (#30448)
* Fix failing call for `spack buildcache save-specfile` (#30637)
* Fix globbing in compiler wrapper (#30699)
# v0.17.2 (2022-04-13) # v0.17.2 (2022-04-13)

View File

@@ -2,10 +2,10 @@
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions) [![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml) [![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack) [![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml) [![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io) [![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io) [![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
Spack is a multi-platform package manager that builds and installs Spack is a multi-platform package manager that builds and installs
@@ -62,7 +62,6 @@ Resources:
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com). * **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
To get an invitation, visit [slack.spack.io](https://slack.spack.io). To get an invitation, visit [slack.spack.io](https://slack.spack.io).
* [**Github Discussions**](https://github.com/spack/spack/discussions): not just for discussions, also Q&A.
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack) * **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to * **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us! `@mention` us!

View File

@@ -10,8 +10,8 @@ For more on Spack's release structure, see
| Version | Supported | | Version | Supported |
| ------- | ------------------ | | ------- | ------------------ |
| develop | :white_check_mark: | | develop | :white_check_mark: |
| 0.19.x | :white_check_mark: | | 0.17.x | :white_check_mark: |
| 0.18.x | :white_check_mark: | | 0.16.x | :white_check_mark: |
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@@ -8,11 +8,13 @@
def getpywin(): def getpywin():
try: try:
import win32con # noqa: F401 import win32con # noqa
except ImportError: except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "--upgrade", "pip"]) subprocess.check_call(
subprocess.check_call([sys.executable, "-m", "pip", "-q", "install", "pywin32"]) [sys.executable, "-m", "pip", "-q", "install", "--upgrade", "pip"])
subprocess.check_call(
[sys.executable, "-m", "pip", "-q", "install", "pywin32"])
if __name__ == "__main__": if __name__ == '__main__':
getpywin() getpywin()

View File

@@ -49,8 +49,50 @@ spack_prefix = os.path.dirname(os.path.dirname(spack_file))
spack_lib_path = os.path.join(spack_prefix, "lib", "spack") spack_lib_path = os.path.join(spack_prefix, "lib", "spack")
sys.path.insert(0, spack_lib_path) sys.path.insert(0, spack_lib_path)
from spack_installable.main import main # noqa: E402 # Add external libs
spack_external_libs = os.path.join(spack_lib_path, "external")
if sys.version_info[:2] <= (2, 7):
sys.path.insert(0, os.path.join(spack_external_libs, "py2"))
sys.path.insert(0, spack_external_libs)
# Here we delete ruamel.yaml in case it has been already imported from site
# (see #9206 for a broader description of the issue).
#
# Briefly: ruamel.yaml produces a .pth file when installed with pip that
# makes the site installed package the preferred one, even though sys.path
# is modified to point to another version of ruamel.yaml.
if "ruamel.yaml" in sys.modules:
del sys.modules["ruamel.yaml"]
if "ruamel" in sys.modules:
del sys.modules["ruamel"]
# The following code is here to avoid failures when updating
# the develop version, due to spurious argparse.pyc files remaining
# in the libs/spack/external directory, see:
# https://github.com/spack/spack/pull/25376
# TODO: Remove in v0.18.0 or later
try:
import argparse
except ImportError:
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
if not os.path.exists(argparse_pyc):
raise
try:
os.remove(argparse_pyc)
import argparse # noqa
except Exception:
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
'Either delete it manually or ask some administrator to '
'delete it for you.')
print(msg.format(argparse_pyc))
sys.exit(1)
import spack.main # noqa
# Once we've set up the system path, run the spack main method # Once we've set up the system path, run the spack main method
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(spack.main.main())

View File

@@ -1,95 +0,0 @@
#!/bin/bash
set -euo pipefail
[[ -n "${TMPCONFIG_DEBUG:=}" ]] && set -x
DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
mkdir -p "${XDG_RUNTIME_DIR:=/tmp}/spack-tests"
export TMPDIR="${XDG_RUNTIME_DIR}"
export TMP_DIR="$(mktemp -d -t spack-test-XXXXX)"
clean_up() {
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "cleaning up: $TMP_DIR\n"
rm -rf "$TMP_DIR"
}
trap clean_up EXIT
trap clean_up ERR
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "Redirecting TMP_DIR and spack directories to $TMP_DIR\n"
export BOOTSTRAP="${SPACK_USER_CACHE_PATH:=$HOME/.spack}/bootstrap"
export SPACK_USER_CACHE_PATH="$TMP_DIR/user_cache"
mkdir -p "$SPACK_USER_CACHE_PATH"
private_bootstrap="$SPACK_USER_CACHE_PATH/bootstrap"
use_spack=''
use_bwrap=''
# argument handling
while (($# >= 1)) ; do
case "$1" in
-b) # privatize bootstrap too, useful for CI but not always cheap
shift
export BOOTSTRAP="$private_bootstrap"
;;
-B) # use specified bootstrap dir
export BOOTSTRAP="$2"
shift 2
;;
-s) # run spack directly with remaining args
shift
use_spack=1
;;
--contain=bwrap)
if bwrap --help 2>&1 > /dev/null ; then
use_bwrap=1
else
echo Bubblewrap containment requested, but no bwrap command found
exit 1
fi
shift
;;
--)
shift
break
;;
*)
break
;;
esac
done
typeset -a CMD
if [[ -n "$use_spack" ]] ; then
CMD=("$DIR/spack" "$@")
else
CMD=("$@")
fi
mkdir -p "$BOOTSTRAP"
export SPACK_SYSTEM_CONFIG_PATH="$TMP_DIR/sys_conf"
export SPACK_USER_CONFIG_PATH="$TMP_DIR/user_conf"
mkdir -p "$SPACK_USER_CONFIG_PATH"
cat >"$SPACK_USER_CONFIG_PATH/config.yaml" <<EOF
config:
install_tree:
root: $TMP_DIR/install
misc_cache: $$user_cache_path/cache
source_cache: $$user_cache_path/source
EOF
cat >"$SPACK_USER_CONFIG_PATH/bootstrap.yaml" <<EOF
bootstrap:
root: $BOOTSTRAP
EOF
if [[ -n "$use_bwrap" ]] ; then
CMD=(
bwrap
--dev-bind / /
--ro-bind "$DIR/.." "$DIR/.." # do not touch spack root
--ro-bind $HOME/.spack $HOME/.spack # do not touch user config/cache dir
--bind "$TMP_DIR" "$TMP_DIR"
--bind "$BOOTSTRAP" "$BOOTSTRAP"
--die-with-parent
"${CMD[@]}"
)
fi
(( ${TMPCONFIG_DEBUG:=0} > 1)) && echo "Running: ${CMD[@]}"
"${CMD[@]}"

View File

@@ -9,15 +9,14 @@ bootstrap:
# may not be able to bootstrap all the software that Spack needs, # may not be able to bootstrap all the software that Spack needs,
# depending on its type. # depending on its type.
sources: sources:
- name: 'github-actions-v0.4' - name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.4 metadata: $spack/share/spack/bootstrap/github-actions-v0.2
- name: 'github-actions-v0.3' - name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.3 metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install' - name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install metadata: $spack/share/spack/bootstrap/spack-install
trusted: trusted:
# By default we trust bootstrapping from sources and from binaries # By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow # produced on Github via the workflow
github-actions-v0.4: true github-actions-v0.2: true
github-actions-v0.3: true
spack-install: true spack-install: true

View File

@@ -33,4 +33,4 @@ concretizer:
# environments can always be activated. When "false" perform concretization separately # environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in # on each root spec, allowing different versions and variants of the same package in
# an environment. # an environment.
unify: true unify: false

View File

@@ -176,7 +176,7 @@ config:
# when Spack needs to manage its own package metadata and all operations are # when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should # expected to complete within the default time limit. The timeout should
# therefore generally be left untouched. # therefore generally be left untouched.
db_lock_timeout: 60 db_lock_timeout: 3
# How long to wait when attempting to modify a package (e.g. to install it). # How long to wait when attempting to modify a package (e.g. to install it).
@@ -187,20 +187,10 @@ config:
package_lock_timeout: null package_lock_timeout: null
# Control how shared libraries are located at runtime on Linux. See the # Control whether Spack embeds RPATH or RUNPATH attributes in ELF binaries.
# the Spack documentation for details. # Has no effect on macOS. DO NOT MIX these within the same install tree.
shared_linking: # See the Spack documentation for details.
# Spack automatically embeds runtime search paths in ELF binaries for their shared_linking: 'rpath'
# dependencies. Their type can either be "rpath" or "runpath". For glibc, rpath is
# inherited and has precedence over LD_LIBRARY_PATH; runpath is not inherited
# and of lower precedence. DO NOT MIX these within the same install tree.
type: rpath
# (Experimental) Embed absolute paths of dependent libraries directly in ELF
# binaries to avoid runtime search. This can improve startup time of
# executables with many dependencies, in particular on slow filesystems.
bind: false
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit # Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
@@ -211,7 +201,3 @@ config:
# building and installing packages. This gives information about Spack's # building and installing packages. This gives information about Spack's
# current progress as well as the current and total number of packages. # current progress as well as the current and total number of packages.
terminal_title: false terminal_title: false
# Number of seconds a buildcache's index.json is cached locally before probing
# for updates, within a single Spack invocation. Defaults to 10 minutes.
binary_index_ttl: 600

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings): # Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml # ~/.spack/modules.yaml
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
modules: {} modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -15,7 +15,7 @@
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
modules: modules:
prefix_inspections: prefix_inspections:
./lib: lib:
- DYLD_FALLBACK_LIBRARY_PATH - DYLD_FALLBACK_LIBRARY_PATH
./lib64: lib64:
- DYLD_FALLBACK_LIBRARY_PATH - DYLD_FALLBACK_LIBRARY_PATH

View File

@@ -13,4 +13,9 @@
# Per-user settings (overrides default and site settings): # Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml # ~/.spack/modules.yaml
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
modules: {} modules:
prefix_inspections:
lib:
- LD_LIBRARY_PATH
lib64:
- LD_LIBRARY_PATH

View File

@@ -14,24 +14,23 @@
# ~/.spack/modules.yaml # ~/.spack/modules.yaml
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
modules: modules:
# This maps paths in the package install prefix to environment variables # Paths to check when creating modules for all module sets
# they should be added to. For example, <prefix>/bin should be in PATH.
prefix_inspections: prefix_inspections:
./bin: bin:
- PATH - PATH
./man: man:
- MANPATH - MANPATH
./share/man: share/man:
- MANPATH - MANPATH
./share/aclocal: share/aclocal:
- ACLOCAL_PATH - ACLOCAL_PATH
./lib/pkgconfig: lib/pkgconfig:
- PKG_CONFIG_PATH - PKG_CONFIG_PATH
./lib64/pkgconfig: lib64/pkgconfig:
- PKG_CONFIG_PATH - PKG_CONFIG_PATH
./share/pkgconfig: share/pkgconfig:
- PKG_CONFIG_PATH - PKG_CONFIG_PATH
./: '':
- CMAKE_PREFIX_PATH - CMAKE_PREFIX_PATH
# These are configurations for the module set named "default" # These are configurations for the module set named "default"

View File

@@ -25,18 +25,16 @@ packages:
fftw-api: [fftw, amdfftw] fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame] flame: [libflame, amdlibflame]
fuse: [libfuse] fuse: [libfuse]
gl: [glx, osmesa] gl: [mesa+opengl, mesa18, opengl]
glu: [mesa-glu, openglu] glu: [mesa-glu, openglu]
golang: [go, gcc] glx: [mesa+glx, mesa18+glx, opengl]
go-external-or-gccgo-bootstrap: [go-bootstrap, gcc] golang: [gcc]
iconv: [libiconv] iconv: [libiconv]
ipp: [intel-ipp] ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java] java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg] jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame] lapack: [openblas, amdlibflame]
libglx: [mesa+glx, mesa18+glx] libllvm: [llvm, llvm-amdgpu]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit] lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit] luajit: [lua-luajit-openresty, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb] mariadb-client: [mariadb-c-client, mariadb]
@@ -46,6 +44,7 @@ packages:
mysql-client: [mysql, mariadb-c-client] mysql-client: [mysql, mariadb-c-client]
opencl: [pocl] opencl: [pocl]
onedal: [intel-oneapi-dal] onedal: [intel-oneapi-dal]
osmesa: [mesa+osmesa, mesa18+osmesa]
pbs: [openpbs, torque] pbs: [openpbs, torque]
pil: [py-pillow] pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config] pkgconfig: [pkgconf, pkg-config]

View File

@@ -1,5 +1,5 @@
config: config:
locks: false locks: false
concretizer: clingo concretizer: original
build_stage:: build_stage::
- '$spack/.staging' - '$spack/.staging'

1
lib/spack/docs/_spack_root Symbolic link
View File

@@ -0,0 +1 @@
../../..

162
lib/spack/docs/analyze.rst Normal file
View File

@@ -0,0 +1,162 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _analyze:
=======
Analyze
=======
The analyze command is a front-end to various tools that let us analyze
package installations. Each analyzer is a module for a different kind
of analysis that can be done on a package installation, including (but not
limited to) binary, log, or text analysis. Thus, the analyze command group
allows you to take an existing package install, choose an analyzer,
and extract some output for the package using it.
-----------------
Analyzer Metadata
-----------------
For all analyzers, we write to an ``analyzers`` folder in ``~/.spack``, or the
value that you specify in your spack config at ``config:analyzers_dir``.
For example, here we see the results of running an analysis on zlib:
.. code-block:: console
$ tree ~/.spack/analyzers/
└── linux-ubuntu20.04-skylake
└── gcc-9.3.0
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
├── environment_variables
│   └── spack-analyzer-environment-variables.json
├── install_files
│   └── spack-analyzer-install-files.json
└── libabigail
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
This means that you can always find analyzer output in this folder, and it
is organized with the same logic as the package install it was run for.
If you want to customize this top level folder, simply provide the ``--path``
argument to ``spack analyze run``. The nested organization will be maintained
within your custom root.
-----------------
Listing Analyzers
-----------------
If you aren't familiar with Spack's analyzers, you can quickly list those that
are available:
.. code-block:: console
$ spack analyze list-analyzers
install_files : install file listing read from install_manifest.json
environment_variables : environment variables parsed from spack-build-env.txt
config_args : config args loaded from spack-configure-args.txt
libabigail : Application Binary Interface (ABI) features for objects
In the above, the first three are fairly simple - parsing metadata files from
a package install directory to save
-------------------
Analyzing a Package
-------------------
The analyze command, akin to install, will accept a package spec to perform
an analysis for. The package must be installed. Let's walk through an example
with zlib. We first ask to analyze it. However, since we have more than one
install, we are asked to disambiguate:
.. code-block:: console
$ spack analyze run zlib
==> Error: zlib matches multiple packages.
Matching packages:
fz2bs56 zlib@1.2.11%gcc@7.5.0 arch=linux-ubuntu18.04-skylake
sl7m27m zlib@1.2.11%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
Use a more specific spec.
We can then specify the spec version that we want to analyze:
.. code-block:: console
$ spack analyze run zlib/fz2bs56
If you don't provide any specific analyzer names, by default all analyzers
(shown in the ``list-analyzers`` subcommand list) will be run. If an analyzer does not
have any result, it will be skipped. For example, here is a result running for
zlib:
.. code-block:: console
$ ls ~/.spack/analyzers/linux-ubuntu20.04-skylake/gcc-9.3.0/zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2/
spack-analyzer-environment-variables.json
spack-analyzer-install-files.json
spack-analyzer-libabigail-libz.so.1.2.11.xml
If you want to run a specific analyzer, ask for it with `--analyzer`. Here we run
spack analyze on libabigail (already installed) _using_ libabigail1
.. code-block:: console
$ spack analyze run --analyzer abigail libabigail
.. _analyze_monitoring:
----------------------
Monitoring An Analysis
----------------------
For any kind of analysis, you can
use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
as a server to upload the same run metadata to. You can
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
to first create a server along with a username and token for yourself.
You can then use this guide to interact with the server.
You should first export our spack monitor token and username to the environment:
.. code-block:: console
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
$ export SPACKMON_USER=spacky
By default, the host for your server is expected to be at ``http://127.0.0.1``
with a prefix of ``ms1``, and if this is the case, you can simply add the
``--monitor`` flag to the install command:
.. code-block:: console
$ spack analyze run --monitor wget
If you need to customize the host or the prefix, you can do that as well:
.. code-block:: console
$ spack analyze run --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io wget
If your server doesn't have authentication, you can skip it:
.. code-block:: console
$ spack analyze run --monitor --monitor-disable-auth wget
Regardless of your choice, when you run analyze on an installed package (whether
it was installed with ``--monitor`` or not, you'll see the results generating as they did
before, and a message that the monitor server was pinged:
.. code-block:: console
$ spack analyze --monitor wget
...
==> Sending result for wget bin/wget to monitor.

View File

@@ -85,7 +85,7 @@ All packages whose names or descriptions contain documentation:
To get more information on a particular package from `spack list`, use To get more information on a particular package from `spack list`, use
`spack info`. Just supply the name of a package: `spack info`. Just supply the name of a package:
.. command-output:: spack info --all mpich .. command-output:: spack info mpich
Most of the information is self-explanatory. The *safe versions* are Most of the information is self-explanatory. The *safe versions* are
versions that Spack knows the checksum for, and it will use the versions that Spack knows the checksum for, and it will use the
@@ -896,8 +896,8 @@ your path:
$ which mpicc $ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH`` These commands will add appropriate directories to your ``PATH``,
and ``MANPATH`` according to the ``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
:ref:`prefix inspections <customize-env-modifications>` defined in your :ref:`prefix inspections <customize-env-modifications>` defined in your
modules configuration. modules configuration.
When you no longer want to use a package, you can type unload or When you no longer want to use a package, you can type unload or
@@ -998,15 +998,11 @@ More formally, a spec consists of the following pieces:
* ``%`` Optional compiler specifier, with an optional compiler version * ``%`` Optional compiler specifier, with an optional compiler version
(``gcc`` or ``gcc@4.7.3``) (``gcc`` or ``gcc@4.7.3``)
* ``+`` or ``-`` or ``~`` Optional variant specifiers (``+debug``, * ``+`` or ``-`` or ``~`` Optional variant specifiers (``+debug``,
``-qt``, or ``~qt``) for boolean variants. Use ``++`` or ``--`` or ``-qt``, or ``~qt``) for boolean variants
``~~`` to propagate variants through the dependencies (``++debug``,
``--qt``, or ``~~qt``).
* ``name=<value>`` Optional variant specifiers that are not restricted to * ``name=<value>`` Optional variant specifiers that are not restricted to
boolean variants. Use ``name==<value>`` to propagate variant through the boolean variants
dependencies.
* ``name=<value>`` Optional compiler flag specifiers. Valid flag names are * ``name=<value>`` Optional compiler flag specifiers. Valid flag names are
``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``. ``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``.
Use ``name==<value>`` to propagate compiler flags through the dependencies.
* ``target=<value> os=<value>`` Optional architecture specifier * ``target=<value> os=<value>`` Optional architecture specifier
(``target=haswell os=CNL10``) (``target=haswell os=CNL10``)
* ``^`` Dependency specs (``^callpath@1.1``) * ``^`` Dependency specs (``^callpath@1.1``)
@@ -1097,8 +1093,6 @@ could depend on ``mpich@1.2:`` if it can only build with version
Below are more details about the specifiers that you can add to specs. Below are more details about the specifiers that you can add to specs.
.. _version-specifier:
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
Version specifier Version specifier
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
@@ -1114,37 +1108,6 @@ set of arbitrary versions, such as ``@1.0,1.5,1.7`` (``1.0``, ``1.5``,
or ``1.7``). When you supply such a specifier to ``spack install``, or ``1.7``). When you supply such a specifier to ``spack install``,
it constrains the set of versions that Spack will install. it constrains the set of versions that Spack will install.
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
reference provided. Acceptable syntaxes for this are:
.. code-block:: sh
# branches and tags
foo@git.develop # use the develop branch
foo@git.0.19 # use the 0.19 tag
# commit hashes
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234
Spack versions from git reference either have an associated version supplied by the user,
or infer a relationship to known versions from the structure of the git repository. If an
associated version is supplied by the user, Spack treats the git version as equivalent to that
version for all version comparisons in the package logic (e.g. ``depends_on('foo', when='@1.5')``).
The associated version can be assigned with ``[git ref]=[version]`` syntax, with the caveat that the specified version is known to Spack from either the package definition, or in the configuration preferences (i.e. ``packages.yaml``).
.. code-block:: sh
foo@git.my_ref=3.2 # use the my_ref tag or branch, but treat it as version 3.2 for version comparisons
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop # use the given commit, but treat it as develop for version comparisons
If an associated version is not supplied then the tags in the git repo are used to determine
the most recent previous version known to Spack. Details about how versions are compared
and how Spack determines if one version is less than another are discussed in the developer guide.
If the version spec is not provided, then Spack will choose one If the version spec is not provided, then Spack will choose one
according to policies set for the particular spack installation. If according to policies set for the particular spack installation. If
the spec is ambiguous, i.e. it could match multiple versions, Spack the spec is ambiguous, i.e. it could match multiple versions, Spack
@@ -1230,23 +1193,6 @@ variants using the backwards compatibility syntax and uses only ``~``
for disabled boolean variants. The ``-`` and spaces on the command for disabled boolean variants. The ``-`` and spaces on the command
line are provided for convenience and legibility. line are provided for convenience and legibility.
Spack allows variants to propagate their value to the package's
dependency by using ``++``, ``--``, and ``~~`` for boolean variants.
For example, for a ``debug`` variant:
.. code-block:: sh
mpileaks ++debug # enabled debug will be propagated to dependencies
mpileaks +debug # only mpileaks will have debug enabled
To propagate the value of non-boolean variants Spack uses ``name==value``.
For example, for the ``stackstart`` variant:
.. code-block:: sh
mpileaks stackstart==4 # variant will be propagated to dependencies
mpileaks stackstart=4 # only mpileaks will have this variant value
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
Compiler Flags Compiler Flags
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
@@ -1254,15 +1200,10 @@ Compiler Flags
Compiler flags are specified using the same syntax as non-boolean variants, Compiler flags are specified using the same syntax as non-boolean variants,
but fulfill a different purpose. While the function of a variant is set by but fulfill a different purpose. While the function of a variant is set by
the package, compiler flags are used by the compiler wrappers to inject the package, compiler flags are used by the compiler wrappers to inject
flags into the compile line of the build. Additionally, compiler flags can flags into the compile line of the build. Additionally, compiler flags are
be inherited by dependencies by using ``==``. inherited by dependencies. ``spack install libdwarf cppflags="-g"`` will
``spack install libdwarf cppflags=="-g"`` will install both libdwarf and install both libdwarf and libelf with the ``-g`` flag injected into their
libelf with the ``-g`` flag injected into their compile line. compile line.
.. note::
versions of spack prior to 0.19.0 will propagate compiler flags using
the ``=`` syntax.
Notice that the value of the compiler flags must be quoted if it Notice that the value of the compiler flags must be quoted if it
contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``, contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``,
@@ -1464,7 +1405,7 @@ built.
You can see what virtual packages a particular package provides by You can see what virtual packages a particular package provides by
getting info on it: getting info on it:
.. command-output:: spack info --virtuals mpich .. command-output:: spack info mpich
Spack is unique in that its virtual packages can be versioned, just Spack is unique in that its virtual packages can be versioned, just
like regular packages. A particular version of a package may provide like regular packages. A particular version of a package may provide
@@ -1672,13 +1613,9 @@ own install prefix. However, certain packages are typically installed
`Python <https://www.python.org>`_ packages are typically installed in the `Python <https://www.python.org>`_ packages are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory. ``$prefix/lib/python-2.7/site-packages`` directory.
In Spack, installation prefixes are immutable, so this type of installation Spack has support for this type of installation as well. In Spack,
is not directly supported. However, it is possible to create views that a package that can live inside the prefix of another package is called
allow you to merge install prefixes of multiple packages into a single new prefix. an *extension*. Suppose you have Python installed like so:
Views are a convenient way to get a more traditional filesystem structure.
Using *extensions*, you can ensure that Python packages always share the
same prefix in the view as Python itself. Suppose you have
Python installed like so:
.. code-block:: console .. code-block:: console
@@ -1716,6 +1653,8 @@ You can find extensions for your Python installation like this:
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1 py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0 py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
==> None activated.
The extensions are a subset of what's returned by ``spack list``, and The extensions are a subset of what's returned by ``spack list``, and
they are packages like any other. They are installed into their own they are packages like any other. They are installed into their own
prefixes, and you can see this with ``spack find --paths``: prefixes, and you can see this with ``spack find --paths``:
@@ -1743,72 +1682,32 @@ directly when you run ``python``:
ImportError: No module named numpy ImportError: No module named numpy
>>> >>>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
Using Extensions in Environments Using Extensions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
The recommended way of working with extensions such as ``py-numpy`` There are four ways to get ``numpy`` working in Python. The first is
above is through :ref:`Environments <environments>`. For example, to use :ref:`shell-support`. You can simply ``load`` the extension,
the following creates an environment in the current working directory and it will be added to the ``PYTHONPATH`` in your current shell:
with a filesystem view in the ``./view`` directory:
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
We recommend environments for two reasons. Firstly, environments
can be activated (requires :ref:`shell-support`):
.. code-block:: console
$ spack env activate .
which sets all the right environment variables such as ``PATH`` and
``PYTHONPATH``. This ensures that
.. code-block:: console
$ python
>>> import numpy
works. Secondly, even without shell support, the view ensures
that Python can locate its extensions:
.. code-block:: console
$ ./view/bin/python
>>> import numpy
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
^^^^^^^^^^^^^^^^^^^^
Using ``spack load``
^^^^^^^^^^^^^^^^^^^^
A more traditional way of using Spack and extensions is ``spack load``
(requires :ref:`shell-support`). This will add the extension to ``PYTHONPATH``
in your current shell, and Python itself will be available in the ``PATH``:
.. code-block:: console .. code-block:: console
$ spack load python
$ spack load py-numpy $ spack load py-numpy
$ python
>>> import numpy
Now ``import numpy`` will succeed for as long as you keep your current
session open.
The loaded packages can be checked using ``spack find --loaded`` The loaded packages can be checked using ``spack find --loaded``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Apart from ``spack env activate`` and ``spack load``, you can load numpy Instead of using Spack's environment modification capabilities through
through your environment modules (using ``environment-modules`` or the ``spack load`` command, you can load numpy through your
``lmod``). This will also add the extension to the ``PYTHONPATH`` in environment modules (using ``environment-modules`` or ``lmod``). This
your current shell. will also add the extension to the ``PYTHONPATH`` in your current
shell.
.. code-block:: console .. code-block:: console
@@ -1818,6 +1717,130 @@ If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec. the name of the module from the Spack spec.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions in a View
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Another way to use extensions is to create a view, which merges the
python installation along with the extensions into a single prefix.
See :ref:`configuring_environment_views` for a more in-depth description
of views.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions Globally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As an alternative to creating a merged prefix with Python and its extensions,
and prior to support for views, Spack has provided a means to install the
extension into the Spack installation prefix for the extendee. This has
typically been useful since extendable packages typically search their own
installation path for addons by default.
Global activations are performed with the ``spack activate`` command:
.. _cmd-spack-activate:
^^^^^^^^^^^^^^^^^^
``spack activate``
^^^^^^^^^^^^^^^^^^
.. code-block:: console
$ spack activate py-numpy
==> Activated extension py-setuptools@11.3.1%gcc@4.4.7 arch=linux-debian7-x86_64-3c74eb69 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-nose@1.3.4%gcc@4.4.7 arch=linux-debian7-x86_64-5f70f816 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=linux-debian7-x86_64-66733244 for python@2.7.8%gcc@4.4.7.
Several things have happened here. The user requested that
``py-numpy`` be activated in the ``python`` installation it was built
with. Spack knows that ``py-numpy`` depends on ``py-nose`` and
``py-setuptools``, so it activated those packages first. Finally,
once all dependencies were activated in the ``python`` installation,
``py-numpy`` was activated as well.
If we run ``spack extensions`` again, we now see the three new
packages listed as activated:
.. code-block:: console
$ spack extensions python
==> python@2.7.8%gcc@4.4.7 arch=linux-debian7-x86_64-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
py-biopython py-mako py-pmw py-rpy2 py-sympy
py-cython py-matplotlib py-pychecker py-scientificpython py-virtualenv
py-dateutil py-mpi4py py-pygments py-scikit-learn
py-epydoc py-mx py-pylint py-scipy
py-gnuplot py-nose py-pyparsing py-setuptools
py-h5py py-numpy py-pyqt py-shiboken
==> 12 installed:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-dateutil@2.4.0 py-nose@1.3.4 py-pyside@1.2.2
py-dateutil@2.4.0 py-numpy@1.9.1 py-pytz@2014.10
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
==> 3 currently activated:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-nose@1.3.4 py-numpy@1.9.1 py-setuptools@11.3.1
Now, when a user runs python, ``numpy`` will be available for import
*without* the user having to explicitly load it. ``python@2.7.8`` now
acts like a system Python installation with ``numpy`` installed inside
of it.
Spack accomplishes this by symbolically linking the *entire* prefix of
the ``py-numpy`` package into the prefix of the ``python`` package. To the
python interpreter, it looks like ``numpy`` is installed in the
``site-packages`` directory.
The only limitation of global activation is that you can only have a *single*
version of an extension activated at a time. This is because multiple
versions of the same extension would conflict if symbolically linked
into the same prefix. Users who want a different version of a package
can still get it by using environment modules or views, but they will have to
explicitly load their preferred version.
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack activate --force``
^^^^^^^^^^^^^^^^^^^^^^^^^^
If, for some reason, you want to activate a package *without* its
dependencies, you can use ``spack activate --force``:
.. code-block:: console
$ spack activate --force py-numpy
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=linux-debian7-x86_64-66733244 for python@2.7.8%gcc@4.4.7.
.. _cmd-spack-deactivate:
^^^^^^^^^^^^^^^^^^^^
``spack deactivate``
^^^^^^^^^^^^^^^^^^^^
We've seen how activating an extension can be used to set up a default
version of a Python module. Obviously, you may want to change that at
some point. ``spack deactivate`` is the command for this. There are
several variants:
* ``spack deactivate <extension>`` will deactivate a single
extension. If another activated extension depends on this one,
Spack will warn you and exit with an error.
* ``spack deactivate --force <extension>`` deactivates an extension
regardless of packages that depend on it.
* ``spack deactivate --all <extension>`` deactivates an extension and
all of its dependencies. Use ``--force`` to disregard dependents.
* ``spack deactivate --all <extendee>`` deactivates *all* activated
extensions of a package. For example, to deactivate *all* python
extensions, use:
.. code-block:: console
$ spack deactivate --all python
----------------------- -----------------------
Filesystem requirements Filesystem requirements
----------------------- -----------------------

View File

@@ -15,13 +15,15 @@ is an entire command dedicated to the management of every aspect of bootstrappin
.. command-output:: spack bootstrap --help .. command-output:: spack bootstrap --help
Spack is configured to bootstrap its dependencies lazily by default; i.e. the first time they are needed and The first thing to know to understand bootstrapping in Spack is that each of
can't be found. You can readily check if any prerequisite for using Spack is missing by running: Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console .. code-block:: console
% spack bootstrap status % spack bootstrap status
Spack v0.19.0 - python@3.8 Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities [FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs [B] MISSING "clingo": required to concretize specs
@@ -46,21 +48,6 @@ they can be bootstrapped. Running a command that concretize a spec, like:
triggers the bootstrapping of clingo from pre-built binaries as expected. triggers the bootstrapping of clingo from pre-built binaries as expected.
Users can also bootstrap all the dependencies needed by Spack in a single command, which
might be useful to setup containers or other similar environments:
.. code-block:: console
$ spack bootstrap now
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-shqedxgvjnhiwdcdrvjhbd73jaevv7wt.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64/gcc-10.2.1/clingo-bootstrap-spack/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-shqedxgvjnhiwdcdrvjhbd73jaevv7wt.spack
==> Installing "clingo-bootstrap@spack%gcc@10.2.1~docs~ipo+python+static_libstdcpp build_type=Release arch=linux-centos7-x86_64" from a buildcache
==> Bootstrapping patchelf from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.15.0-htk62k7efo2z22kh6kmhaselru7bfkuc.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64/gcc-10.2.1/patchelf-0.15.0/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.15.0-htk62k7efo2z22kh6kmhaselru7bfkuc.spack
==> Installing "patchelf@0.15.0%gcc@10.2.1 ldflags="-static-libstdc++ -static-libgcc" arch=linux-centos7-x86_64" from a buildcache
----------------------- -----------------------
The Bootstrapping store The Bootstrapping store
----------------------- -----------------------
@@ -120,19 +107,19 @@ If need be, you can disable bootstrapping altogether by running:
in which case it's your responsibility to ensure Spack runs in an in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by disabling also configure Spack to skip certain bootstrapping methods by *untrusting*
them specifically: them. For instance:
.. code-block:: console .. code-block:: console
% spack bootstrap disable github-actions % spack bootstrap untrust github-actions
==> "github-actions" is now disabled and will not be used for bootstrapping ==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can: tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console .. code-block:: console
% spack bootstrap enable github-actions % spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults: There is also an option to reset the bootstrapping configuration to Spack's defaults:

View File

@@ -49,8 +49,9 @@ packages rather than building its own packages. This may be desirable
if machines ship with system packages, such as a customized MPI if machines ship with system packages, such as a customized MPI
that should be used instead of Spack building its own MPI. that should be used instead of Spack building its own MPI.
External packages are configured through the ``packages.yaml`` file. External packages are configured through the ``packages.yaml`` file found
Here's an example of an external configuration: in a Spack installation's ``etc/spack/`` or a user's ``~/.spack/``
directory. Here's an example of an external configuration:
.. code-block:: yaml .. code-block:: yaml
@@ -96,14 +97,11 @@ Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built. though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The packages configuration can tell Spack to use an external location
Prevent packages from being built from sources for certain package versions, but it does not restrict Spack to using
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ external packages. In the above example, since newer versions of OpenMPI
are available, Spack will choose to start building and linking with the
Adding an external spec in ``packages.yaml`` allows Spack to use an external location, latest version rather than continue using the pre-installed OpenMPI versions.
but it does not prevent Spack from building packages from sources. In the above example,
Spack might choose for many valid reasons to start building and linking with the
latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions.
To prevent this, the ``packages.yaml`` configuration also allows packages To prevent this, the ``packages.yaml`` configuration also allows packages
to be flagged as non-buildable. The previous example could be modified to to be flagged as non-buildable. The previous example could be modified to
@@ -123,15 +121,9 @@ be:
buildable: False buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build The addition of the ``buildable`` flag tells Spack that it should never build
its own version of OpenMPI from sources, and it will instead always rely on a pre-built its own version of OpenMPI, and it will instead always rely on a pre-built
OpenMPI. OpenMPI. Similar to ``paths``, ``buildable`` is specified as a property under
a package name.
.. note::
If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag)
pre-built specs include specs already available from a local store, an upstream store, a registered
buildcache or specs marked as externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only
external specs in ``packages.yaml`` are included in the list of pre-built specs.
If an external module is specified as not buildable, then Spack will load the If an external module is specified as not buildable, then Spack will load the
external module into the build environment which can be used for linking. external module into the build environment which can be used for linking.
@@ -140,10 +132,6 @@ The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be It could also be used alone to forbid packages that may be
buggy or otherwise undesirable. buggy or otherwise undesirable.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-buildable virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Virtual packages in Spack can also be specified as not buildable, and Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above, external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer OpenMPI is configured as not buildable, but Spack will often prefer
@@ -165,37 +153,21 @@ but more conveniently:
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel prefix: /opt/openmpi-1.6.5-intel
Spack can then use any of the listed external implementations of MPI Implementations can also be listed immediately under the virtual they provide:
to satisfy a dependency, and will choose depending on the compiler and
architecture.
In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers
(available via stores or buildcaches) are not wanted, Spack can be configured to require
specs matching only the available externals:
.. code-block:: yaml .. code-block:: yaml
packages: packages:
mpi: mpi:
buildable: False buildable: False
require: openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
- one_of: [ openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64", openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug", mpich@3.3 %clang@9.0.0 arch=linux-debian7-x86_64: /opt/mpich-3.3-intel
"openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
]
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused, Spack can then use any of the listed external implementations of MPI
unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see to satisfy a dependency, and will choose depending on the compiler and
:ref:`package-requirements`. architecture.
.. _cmd-spack-external-find: .. _cmd-spack-external-find:
@@ -222,6 +194,11 @@ Specific limitations include:
* Packages are not discoverable by default: For a package to be * Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details. logic. See :ref:`here <make-package-findable>` for more details.
* The current implementation only collects and examines executable files,
so it is typically only useful for build/run dependencies (in some cases
if a library package also provides an executable, it may be possible to
extract a meaningful Spec by running the executable - for example the
compiler wrappers in MPI implementations).
* The logic does not search through module files, it can only detect * The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for externals which use module files by loading any associated modules for
@@ -302,143 +279,17 @@ microarchitectures considered during the solve are constrained to be compatible
host Spack is currently running on. For instance, if this option is set to ``true``, a host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node. user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-requirements:
--------------------
Package Requirements
--------------------
Spack can be configured to always use certain compilers, package
versions, and variants during concretization through package
requirements.
Package requirements are useful when you find yourself repeatedly
specifying the same constraints on the command line, and wish that
Spack respects these constraints whether you mention them explicitly
or not. Another use case is specifying constraints that should apply
to all root specs in an environment, without having to repeat the
constraint everywhere.
Apart from that, requirements config is more flexible than constraints
on the command line, because it can specify constraints on packages
*when they occur* as a dependency. In contrast, on the command line it
is not possible to specify constraints on dependencies while also keeping
those dependencies optional.
The package requirements configuration is specified in ``packages.yaml``
keyed by package name:
.. code-block:: yaml
packages:
libfabric:
require: "@1.13.2"
openmpi:
require:
- any_of: ["~cuda", "%gcc"]
mpich:
require:
- one_of: ["+cuda", "+rocm"]
Requirements are expressed using Spec syntax (the same as what is provided
to ``spack install``). In the simplest case, you can specify attributes
that you always want the package to have by providing a single spec to
``require``; in the above example, ``libfabric`` will always build
with version 1.13.2.
You can provide a more-relaxed constraint and allow the concretizer to
choose between a set of options using ``any_of`` or ``one_of``:
* ``any_of`` is a list of specs. One of those specs must be satisfied
and it is also allowed for the concretized spec to match more than one.
In the above example, that means you could build ``openmpi+cuda%gcc``,
``openmpi~cuda%clang`` or ``openmpi~cuda%gcc`` (in the last case,
note that both specs in the ``any_of`` for ``openmpi`` are
satisfied).
* ``one_of`` is also a list of specs, and the final concretized spec
must match exactly one of them. In the above example, that means
you could build ``mpich+cuda`` or ``mpich+rocm`` but not
``mpich+cuda+rocm`` (note the current package definition for
``mpich`` already includes a conflict, so this is redundant but
still demonstrates the concept).
.. note::
For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting default requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also set default requirements for all packages under ``all``
like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
which means every spec will be required to use ``clang`` as a compiler.
Note that in this case ``all`` represents a *default set of requirements* -
if there are specific package requirements, then the default requirements
under ``all`` are disregarded. For example, with a configuration like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
cmake:
require: '%gcc'
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake``
dependencies) to use ``clang``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting requirements on virtual specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A requirement on a virtual spec applies whenever that virtual is present in the DAG.
This can be useful for fixing which virtual provider you want to use:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
With the configuration above the only allowed ``mpi`` provider is ``mvapich2 %gcc``.
Requirements on the virtual spec and on the specific provider are both applied, if
present. For instance with a configuration like:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
mvapich2:
require: '~cuda'
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
.. _package-preferences: .. _package-preferences:
------------------- -------------------
Package Preferences Package Preferences
------------------- -------------------
In some cases package requirements can be too strong, and package Spack can be configured to prefer certain compilers, package
preferences are the better option. Package preferences do not impose versions, dependencies, and variants during concretization.
constraints on packages for particular versions or variants values, The preferred configuration can be controlled via the
they rather only set defaults -- the concretizer is free to change ``~/.spack/packages.yaml`` file for user configurations, or the
them if it must due to other constraints. Also note that package ``etc/spack/packages.yaml`` site configuration.
preferences are of lower priority than reuse of already installed
packages.
Here's an example ``packages.yaml`` file that sets preferred packages: Here's an example ``packages.yaml`` file that sets preferred packages:
@@ -456,7 +307,7 @@ Here's an example ``packages.yaml`` file that sets preferred packages:
providers: providers:
mpi: [mvapich2, mpich, openmpi] mpi: [mvapich2, mpich, openmpi]
At a high level, this example is specifying how packages are preferably At a high level, this example is specifying how packages should be
concretized. The opencv package should prefer using GCC 4.9 and concretized. The opencv package should prefer using GCC 4.9 and
be built with debug options. The gperftools package should prefer version be built with debug options. The gperftools package should prefer version
2.2 over 2.4. Every package on the system should prefer mvapich2 for 2.2 over 2.4. Every package on the system should prefer mvapich2 for
@@ -464,11 +315,13 @@ its MPI and GCC 4.4.7 (except for opencv, which overrides this by preferring GCC
These options are used to fill in implicit defaults. Any of them can be overwritten These options are used to fill in implicit defaults. Any of them can be overwritten
on the command line if explicitly requested. on the command line if explicitly requested.
Package preferences accept the follow keys or components under Each ``packages.yaml`` file begins with the string ``packages:`` and
the specific package (or ``all``) section: ``compiler``, ``variants``, package names are specified on the next level. The special string ``all``
``version``, ``providers``, and ``target``. Each component has an applies settings to *all* packages. Underneath each package name is one
ordered list of spec ``constraints``, with earlier entries in the or more components: ``compiler``, ``variants``, ``version``,
list being preferred over later entries. ``providers``, and ``target``. Each component has an ordered list of
spec ``constraints``, with earlier entries in the list being preferred
over later entries.
Sometimes a package installation may have constraints that forbid Sometimes a package installation may have constraints that forbid
the first concretization rule, in which case Spack will use the first the first concretization rule, in which case Spack will use the first
@@ -483,9 +336,10 @@ gcc to pgi will thus be preferred over the xlc compiler.
The syntax for the ``provider`` section differs slightly from other The syntax for the ``provider`` section differs slightly from other
concretization rules. A provider lists a value that packages may concretization rules. A provider lists a value that packages may
``depends_on`` (e.g, MPI) and a list of rules for fulfilling that ``depend_on`` (e.g, MPI) and a list of rules for fulfilling that
dependency. dependency.
.. _package_permissions: .. _package_permissions:
------------------- -------------------
@@ -534,25 +388,3 @@ directories inside the install prefix. This will ensure that even
manually placed files within the install prefix are owned by the manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected. default behavior to go as expected.
----------------------------
Assigning Package Attributes
----------------------------
You can assign class-level attributes in the configuration:
.. code-block:: yaml
packages:
mpileaks:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these
attributes may be any value parseable by yaml.
These can only be applied to specific packages, not "all" or
virtual packages.

View File

@@ -62,11 +62,11 @@ on these ideas for each distinct build system that Spack supports:
build_systems/bundlepackage build_systems/bundlepackage
build_systems/cudapackage build_systems/cudapackage
build_systems/custompackage
build_systems/inteloneapipackage build_systems/inteloneapipackage
build_systems/intelpackage build_systems/intelpackage
build_systems/rocmpackage build_systems/rocmpackage
build_systems/sourceforgepackage build_systems/custompackage
build_systems/multiplepackage
For reference, the :py:mod:`Build System API docs <spack.build_systems>` For reference, the :py:mod:`Build System API docs <spack.build_systems>`
provide a list of build systems and methods/attributes that can be provide a list of build systems and methods/attributes that can be

View File

@@ -5,9 +5,9 @@
.. _autotoolspackage: .. _autotoolspackage:
--------- ----------------
Autotools AutotoolsPackage
--------- ----------------
Autotools is a GNU build system that provides a build-script generator. Autotools is a GNU build system that provides a build-script generator.
By running the platform-independent ``./configure`` script that comes By running the platform-independent ``./configure`` script that comes
@@ -17,7 +17,7 @@ with the package, you can generate a platform-dependent Makefile.
Phases Phases
^^^^^^ ^^^^^^
The ``AutotoolsBuilder`` and ``AutotoolsPackage`` base classes come with the following phases: The ``AutotoolsPackage`` base class comes with the following phases:
#. ``autoreconf`` - generate the configure script #. ``autoreconf`` - generate the configure script
#. ``configure`` - generate the Makefiles #. ``configure`` - generate the Makefiles

View File

@@ -5,9 +5,9 @@
.. _bundlepackage: .. _bundlepackage:
------ -------------
Bundle BundlePackage
------ -------------
``BundlePackage`` represents a set of packages that are expected to work well ``BundlePackage`` represents a set of packages that are expected to work well
together, such as a collection of commonly used software libraries. The together, such as a collection of commonly used software libraries. The

View File

@@ -5,9 +5,9 @@
.. _cmakepackage: .. _cmakepackage:
----- ------------
CMake CMakePackage
----- ------------
Like Autotools, CMake is a widely-used build-script generator. Designed Like Autotools, CMake is a widely-used build-script generator. Designed
by Kitware, CMake is the most popular build system for new C, C++, and by Kitware, CMake is the most popular build system for new C, C++, and
@@ -21,7 +21,7 @@ whereas Autotools is Unix-only.
Phases Phases
^^^^^^ ^^^^^^
The ``CMakeBuilder`` and ``CMakePackage`` base classes come with the following phases: The ``CMakePackage`` base class comes with the following phases:
#. ``cmake`` - generate the Makefile #. ``cmake`` - generate the Makefile
#. ``build`` - build the package #. ``build`` - build the package
@@ -130,8 +130,8 @@ Adding flags to cmake
To add additional flags to the ``cmake`` call, simply override the To add additional flags to the ``cmake`` call, simply override the
``cmake_args`` function. The following example defines values for the flags ``cmake_args`` function. The following example defines values for the flags
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with ``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
and without the :meth:`~spack.build_systems.cmake.CMakeBuilder.define` and and without the :meth:`~spack.build_systems.cmake.CMakePackage.define` and
:meth:`~spack.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions: :meth:`~spack.build_systems.cmake.CMakePackage.define_from_variant` helper functions:
.. code-block:: python .. code-block:: python

View File

@@ -32,7 +32,7 @@ oneAPI packages or use::
For more information on a specific package, do:: For more information on a specific package, do::
spack info --all <package-name> spack info <package-name>
Intel no longer releases new versions of Parallel Studio, which can be Intel no longer releases new versions of Parallel Studio, which can be
used in Spack via the :ref:`intelpackage`. All of its components can used in Spack via the :ref:`intelpackage`. All of its components can

View File

@@ -5,11 +5,11 @@
.. _luapackage: .. _luapackage:
--- ------------
Lua LuaPackage
--- ------------
The ``Lua`` build-system is a helper for the common case of Lua packages that provide LuaPackage is a helper for the common case of Lua packages that provide
a rockspec file. This is not meant to take a rock archive, but to build a rockspec file. This is not meant to take a rock archive, but to build
a source archive or repository that provides a rockspec, which should cover a source archive or repository that provides a rockspec, which should cover
most lua packages. In the case a Lua package builds by Make rather than most lua packages. In the case a Lua package builds by Make rather than
@@ -19,7 +19,7 @@ luarocks, prefer MakefilePackage.
Phases Phases
^^^^^^ ^^^^^^
The ``LuaBuilder`` and `LuaPackage`` base classes come with the following phases: The ``LuaPackage`` base class comes with the following phases:
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory #. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
#. ``preprocess`` - adjust sources or rockspec to fix build #. ``preprocess`` - adjust sources or rockspec to fix build

View File

@@ -5,9 +5,9 @@
.. _makefilepackage: .. _makefilepackage:
-------- ---------------
Makefile MakefilePackage
-------- ---------------
The most primitive build system a package can use is a plain Makefile. The most primitive build system a package can use is a plain Makefile.
Makefiles are simple to write for small projects, but they usually Makefiles are simple to write for small projects, but they usually
@@ -18,7 +18,7 @@ variables.
Phases Phases
^^^^^^ ^^^^^^
The ``MakefileBuilder`` and ``MakefilePackage`` base classes come with 3 phases: The ``MakefilePackage`` base class comes with 3 phases:
#. ``edit`` - edit the Makefile #. ``edit`` - edit the Makefile
#. ``build`` - build the project #. ``build`` - build the project

View File

@@ -5,9 +5,9 @@
.. _mavenpackage: .. _mavenpackage:
----- ------------
Maven MavenPackage
----- ------------
Apache Maven is a general-purpose build system that does not rely Apache Maven is a general-purpose build system that does not rely
on Makefiles to build software. It is designed for building and on Makefiles to build software. It is designed for building and
@@ -17,7 +17,7 @@ managing and Java-based project.
Phases Phases
^^^^^^ ^^^^^^
The ``MavenBuilder`` and ``MavenPackage`` base classes come with the following phases: The ``MavenPackage`` base class comes with the following phases:
#. ``build`` - compile code and package into a JAR file #. ``build`` - compile code and package into a JAR file
#. ``install`` - copy to installation prefix #. ``install`` - copy to installation prefix

View File

@@ -5,9 +5,9 @@
.. _mesonpackage: .. _mesonpackage:
----- ------------
Meson MesonPackage
----- ------------
Much like Autotools and CMake, Meson is a build system. But it is Much like Autotools and CMake, Meson is a build system. But it is
meant to be both fast and as user friendly as possible. GNOME's goal meant to be both fast and as user friendly as possible. GNOME's goal
@@ -17,7 +17,7 @@ is to port modules to use the Meson build system.
Phases Phases
^^^^^^ ^^^^^^
The ``MesonBuilder`` and ``MesonPackage`` base classes come with the following phases: The ``MesonPackage`` base class comes with the following phases:
#. ``meson`` - generate ninja files #. ``meson`` - generate ninja files
#. ``build`` - build the project #. ``build`` - build the project

View File

@@ -0,0 +1,350 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _multiplepackage:
----------------------
Multiple Build Systems
----------------------
Quite frequently, a package will change build systems from one version to the
next. For example, a small project that once used a single Makefile to build
may now require Autotools to handle the increased number of files that need to
be compiled. Or, a package that once used Autotools may switch to CMake for
Windows support. In this case, it becomes a bit more challenging to write a
single build recipe for this package in Spack.
There are several ways that this can be handled in Spack:
#. Subclass the new build system, and override phases as needed (preferred)
#. Subclass ``Package`` and implement ``install`` as needed
#. Create separate ``*-cmake``, ``*-autotools``, etc. packages for each build system
#. Rename the old package to ``*-legacy`` and create a new package
#. Move the old package to a ``legacy`` repository and create a new package
#. Drop older versions that only support the older build system
Of these options, 1 is preferred, and will be demonstrated in this
documentation. Options 3-5 have issues with concretization, so shouldn't be
used. Options 4-5 also don't support more than two build systems. Option 6 only
works if the old versions are no longer needed. Option 1 is preferred over 2
because it makes it easier to drop the old build system entirely.
The exact syntax of the package depends on which build systems you need to
support. Below are a couple of common examples.
^^^^^^^^^^^^^^^^^^^^^
Makefile -> Autotools
^^^^^^^^^^^^^^^^^^^^^
Let's say we have the following package:
.. code-block:: python
class Foo(MakefilePackage):
version("1.2.0", sha256="...")
def edit(self, spec, prefix):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
def install(self, spec, prefix):
install_tree(".", prefix)
The package subclasses from :ref:`makefilepackage`, which has three phases:
#. ``edit`` (does nothing by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
In this case, the ``install`` phase needed to be overridden because the
Makefile did not have an install target. We also modify the Makefile to use
Spack's compiler wrappers. The default ``build`` phase is not changed.
Starting with version 1.3.0, we want to use Autotools to build instead.
:ref:`autotoolspackage` has four phases:
#. ``autoreconf`` (does not if a configure script already exists)
#. ``configure`` (runs ``./configure --prefix=...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
If the only version we need to support is 1.3.0, the package would look as
simple as:
.. code-block:: python
class Foo(AutotoolsPackage):
version("1.3.0", sha256="...")
def configure_args(self):
return ["--enable-shared"]
In this case, we use the default methods for each phase and only override
``configure_args`` to specify additional flags to pass to ``./configure``.
If we wanted to write a single package that supports both versions 1.2.0 and
1.3.0, it would look something like:
.. code-block:: python
class Foo(AutotoolsPackage):
version("1.3.0", sha256="...")
version("1.2.0", sha256="...", deprecated=True)
def configure_args(self):
return ["--enable-shared"]
# Remove the following once version 1.2.0 is dropped
@when("@:1.2")
def patch(self):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
@when("@:1.2")
def autoreconf(self, spec, prefix):
pass
@when("@:1.2")
def configure(self, spec, prefix):
pass
@when("@:1.2")
def install(self, spec, prefix):
install_tree(".", prefix)
There are a few interesting things to note here:
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
on version 1.2.0, it's still there and builds just fine.
* We moved the contents of the ``edit`` phase to the ``patch`` function. Since
``AutotoolsPackage`` doesn't have an ``edit`` phase, the only way for this
step to be executed is to move it to the ``patch`` function, which always
gets run.
* The ``autoreconf`` and ``configure`` phases become no-ops. Since the old
Makefile-based build system doesn't use these, we ignore these phases when
building ``foo@1.2.0``.
* The ``@when`` decorator is used to override these phases only for older
versions. The default methods are used for ``foo@1.3:``.
Once a new Spack release comes out, version 1.2.0 and everything below the
comment can be safely deleted. The result is the same as if we had written a
package for version 1.3.0 from scratch.
^^^^^^^^^^^^^^^^^^
Autotools -> CMake
^^^^^^^^^^^^^^^^^^
Let's say we have the following package:
.. code-block:: python
class Bar(AutotoolsPackage):
version("1.2.0", sha256="...")
def configure_args(self):
return ["--enable-shared"]
The package subclasses from :ref:`autotoolspackage`, which has four phases:
#. ``autoreconf`` (does not if a configure script already exists)
#. ``configure`` (runs ``./configure --prefix=...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
In this case, we use the default methods for each phase and only override
``configure_args`` to specify additional flags to pass to ``./configure``.
Starting with version 1.3.0, we want to use CMake to build instead.
:ref:`cmakepackage` has three phases:
#. ``cmake`` (runs ``cmake ...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
If the only version we need to support is 1.3.0, the package would look as
simple as:
.. code-block:: python
class Bar(CMakePackage):
version("1.3.0", sha256="...")
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
In this case, we use the default methods for each phase and only override
``cmake_args`` to specify additional flags to pass to ``cmake``.
If we wanted to write a single package that supports both versions 1.2.0 and
1.3.0, it would look something like:
.. code-block:: python
class Bar(CMakePackage):
version("1.3.0", sha256="...")
version("1.2.0", sha256="...", deprecated=True)
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
# Remove the following once version 1.2.0 is dropped
def configure_args(self):
return ["--enable-shared"]
@when("@:1.2")
def cmake(self, spec, prefix):
configure("--prefix=" + prefix, *self.configure_args())
There are a few interesting things to note here:
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
on version 1.2.0, it's still there and builds just fine.
* Since CMake and Autotools are so similar, we only need to override the
``cmake`` phase, we can use the default ``build`` and ``install`` phases.
* We override ``cmake`` to run ``./configure`` for older versions.
``configure_args`` remains the same.
* The ``@when`` decorator is used to override these phases only for older
versions. The default methods are used for ``bar@1.3:``.
Once a new Spack release comes out, version 1.2.0 and everything below the
comment can be safely deleted. The result is the same as if we had written a
package for version 1.3.0 from scratch.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Multiple build systems for the same version
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
During the transition from one build system to another, developers often
support multiple build systems at the same time. Spack can only use a single
build system for a single version. To decide which build system to use for a
particular version, take the following things into account:
1. If the developers explicitly state that one build system is preferred over
another, use that one.
2. If one build system is considered "experimental" while another is considered
"stable", use the stable build system.
3. Otherwise, use the newer build system.
The developer preference for which build system to use can change over time as
a newer build system becomes stable/recommended.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Dropping support for old build systems
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When older versions of a package don't support a newer build system, it can be
tempting to simply delete them from a package. This significantly reduces
package complexity and makes the build recipe much easier to maintain. However,
other packages or Spack users may rely on these older versions. The recommended
approach is to first support both build systems (as demonstrated above),
:ref:`deprecate <deprecate>` versions that rely on the old build system, and
remove those versions and any phases that needed to be overridden in the next
Spack release.
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Three or more build systems
^^^^^^^^^^^^^^^^^^^^^^^^^^^
In rare cases, a package may change build systems multiple times. For example,
a package may start with Makefiles, then switch to Autotools, then switch to
CMake. The same logic used above can be extended to any number of build systems.
For example:
.. code-block:: python
class Baz(CMakePackage):
version("1.4.0", sha256="...") # CMake
version("1.3.0", sha256="...") # Autotools
version("1.2.0", sha256="...") # Makefile
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
# Remove the following once version 1.3.0 is dropped
def configure_args(self):
return ["--enable-shared"]
@when("@1.3")
def cmake(self, spec, prefix):
configure("--prefix=" + prefix, *self.configure_args())
# Remove the following once version 1.2.0 is dropped
@when("@:1.2")
def patch(self):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
@when("@:1.2")
def cmake(self, spec, prefix):
pass
@when("@:1.2")
def install(self, spec, prefix):
install_tree(".", prefix)
^^^^^^^^^^^^^^^^^^^
Additional examples
^^^^^^^^^^^^^^^^^^^
When writing new packages, it often helps to see examples of existing packages.
Here is an incomplete list of existing Spack packages that have changed build
systems before:
================ ===================== ================
Package Previous Build System New Build System
================ ===================== ================
amber custom CMake
arpack-ng Autotools CMake
atk Autotools Meson
blast None Autotools
dyninst Autotools CMake
evtgen Autotools CMake
fish Autotools CMake
gdk-pixbuf Autotools Meson
glib Autotools Meson
glog Autotools CMake
gmt Autotools CMake
gtkplus Autotools Meson
hpl Makefile Autotools
interproscan Perl Maven
jasper Autotools CMake
kahip SCons CMake
kokkos Makefile CMake
kokkos-kernels Makefile CMake
leveldb Makefile CMake
libdrm Autotools Meson
libjpeg-turbo Autotools CMake
mesa Autotools Meson
metis None CMake
mpifileutils Autotools CMake
muparser Autotools CMake
mxnet Makefile CMake
nest Autotools CMake
neuron Autotools CMake
nsimd CMake nsconfig
opennurbs Makefile CMake
optional-lite None CMake
plasma Makefile CMake
preseq Makefile Autotools
protobuf Autotools CMake
py-pygobject Autotools Python
singularity Autotools Makefile
span-lite None CMake
ssht Makefile CMake
string-view-lite None CMake
superlu Makefile CMake
superlu-dist Makefile CMake
uncrustify Autotools CMake
================ ===================== ================
Packages that support multiple build systems can be a bit confusing to write.
Don't hesitate to open an issue or draft pull request and ask for advice from
other Spack developers!

View File

@@ -5,9 +5,9 @@
.. _octavepackage: .. _octavepackage:
------ -------------
Octave OctavePackage
------ -------------
Octave has its own build system for installing packages. Octave has its own build system for installing packages.
@@ -15,7 +15,7 @@ Octave has its own build system for installing packages.
Phases Phases
^^^^^^ ^^^^^^
The ``OctaveBuilder`` and ``OctavePackage`` base classes have a single phase: The ``OctavePackage`` base class has a single phase:
#. ``install`` - install the package #. ``install`` - install the package

View File

@@ -5,9 +5,9 @@
.. _perlpackage: .. _perlpackage:
---- -----------
Perl PerlPackage
---- -----------
Much like Octave, Perl has its own language-specific Much like Octave, Perl has its own language-specific
build system. build system.
@@ -16,7 +16,7 @@ build system.
Phases Phases
^^^^^^ ^^^^^^
The ``PerlBuilder`` and ``PerlPackage`` base classes come with 3 phases that can be overridden: The ``PerlPackage`` base class comes with 3 phases that can be overridden:
#. ``configure`` - configure the package #. ``configure`` - configure the package
#. ``build`` - build the package #. ``build`` - build the package

View File

@@ -48,11 +48,8 @@ important to understand.
**build backend** **build backend**
Libraries used to define how to build a wheel. Examples Libraries used to define how to build a wheel. Examples
include `setuptools <https://setuptools.pypa.io/>`__, include `setuptools <https://setuptools.pypa.io/>`__,
`flit <https://flit.pypa.io/>`_, `flit <https://flit.readthedocs.io/>`_, and
`poetry <https://python-poetry.org/>`_, `poetry <https://python-poetry.org/>`_.
`hatchling <https://hatch.pypa.io/latest/>`_,
`meson <https://meson-python.readthedocs.io/>`_, and
`pdm <https://pdm.fming.dev/latest/>`_.
^^^^^^^^^^^ ^^^^^^^^^^^
Downloading Downloading
@@ -176,9 +173,9 @@ package. The "Project description" tab may also contain a longer
description of the package. Either of these can be used to populate description of the package. Either of these can be used to populate
the package docstring. the package docstring.
^^^^^^^^^^^^ ^^^^^^^^^^^^^
Dependencies Build backend
^^^^^^^^^^^^ ^^^^^^^^^^^^^
Once you've determined the basic metadata for a package, the next Once you've determined the basic metadata for a package, the next
step is to determine the build backend. ``PythonPackage`` uses step is to determine the build backend. ``PythonPackage`` uses
@@ -216,33 +213,12 @@ Note that ``py-wheel`` is already listed as a build dependency in the
need to specify a specific version requirement or change the need to specify a specific version requirement or change the
dependency type. dependency type.
See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`__ and See `PEP 517 <https://www.python.org/dev/peps/pep-0517/>`_ and
`PEP 518 <https://www.python.org/dev/peps/pep-0518/>`_ for more `PEP 518 <https://www.python.org/dev/peps/pep-0518/>`_ for more
information on the design of ``pyproject.toml``. information on the design of ``pyproject.toml``.
Depending on which build backend a project uses, there are various Depending on which build backend a project uses, there are various
places that run-time dependencies can be listed. Most modern build places that run-time dependencies can be listed.
backends support listing dependencies directly in ``pyproject.toml``.
Look for dependencies under the following keys:
* ``requires-python`` under ``[project]``
This specifies the version of Python that is required
* ``dependencies`` under ``[project]``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``[project.optional-dependencies]``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be ``False``
by default.
Some build backends may have additional locations where dependencies
can be found.
""""""""" """""""""
distutils distutils
@@ -268,9 +244,9 @@ If the ``pyproject.toml`` lists ``setuptools.build_meta`` as a
``build-backend``, or if the package has a ``setup.py`` that imports ``build-backend``, or if the package has a ``setup.py`` that imports
``setuptools``, or if the package has a ``setup.cfg`` file, then it ``setuptools``, or if the package has a ``setup.cfg`` file, then it
uses setuptools to build. Setuptools is a replacement for the uses setuptools to build. Setuptools is a replacement for the
distutils library, and has almost the exact same API. In addition to distutils library, and has almost the exact same API. Dependencies
``pyproject.toml``, dependencies can be listed in the ``setup.py`` or can be listed in the ``setup.py`` or ``setup.cfg`` file. Look for the
``setup.cfg`` file. Look for the following arguments: following arguments:
* ``python_requires`` * ``python_requires``
@@ -315,22 +291,25 @@ listed directly in the ``pyproject.toml`` file. Older versions of
flit used to store this info in a ``flit.ini`` file, so check for flit used to store this info in a ``flit.ini`` file, so check for
this too. this too.
In addition to the default ``pyproject.toml`` keys listed above, Either of these files may contain keys like:
older versions of flit may use the following keys:
* ``requires`` under ``[tool.flit.metadata]`` * ``requires-python``
This specifies the version of Python that is required
* ``dependencies`` or ``requires``
These packages are required for building and installation. You can These packages are required for building and installation. You can
add them with ``type=('build', 'run')``. add them with ``type=('build', 'run')``.
* ``[tool.flit.metadata.requires-extra]`` * ``project.optional-dependencies`` or ``requires-extra``
This section includes keys with lists of optional dependencies This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be False optionally adds these dependencies. This variant should be False
by default. by default.
See https://flit.pypa.io/en/latest/pyproject_toml.html for See https://flit.readthedocs.io/en/latest/pyproject_toml.html for
more information. more information.
"""""" """"""
@@ -347,38 +326,6 @@ for specifying the version requirements. Note that ``~=`` works
differently in poetry than in setuptools and flit for versions that differently in poetry than in setuptools and flit for versions that
start with a zero. start with a zero.
"""""""""
hatchling
"""""""""
If the ``pyproject.toml`` lists ``hatchling.build`` as the
``build-backend``, it uses the hatchling build system. Hatchling
uses the default ``pyproject.toml`` keys to list dependencies.
See https://hatch.pypa.io/latest/config/dependency/ for more
information.
"""""
meson
"""""
If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``,
it uses the meson build system. Meson uses the default
``pyproject.toml`` keys to list dependencies.
See https://meson-python.readthedocs.io/en/latest/usage/start.html
for more information.
"""
pdm
"""
If the ``pyproject.toml`` lists ``pdm.pep517.api`` as the ``build-backend``,
it uses the PDM build system. PDM uses the default ``pyproject.toml``
keys to list dependencies.
See https://pdm.fming.dev/latest/ for more information.
"""""" """"""
wheels wheels
"""""" """"""
@@ -423,34 +370,6 @@ packages. However, the installation instructions for a package may
suggest passing certain flags to the ``setup.py`` call. The suggest passing certain flags to the ``setup.py`` call. The
``PythonPackage`` class has two techniques for doing this. ``PythonPackage`` class has two techniques for doing this.
"""""""""""""""
Config settings
"""""""""""""""
These settings are passed to
`PEP 517 <https://peps.python.org/pep-0517/>`__ build backends.
For example, ``py-scipy`` package allows you to specify the name of
the BLAS/LAPACK library you want pkg-config to search for:
.. code-block:: python
depends_on('py-pip@22.1:', type='build')
def config_settings(self, spec, prefix):
return {
'blas': spec['blas'].libs.names[0],
'lapack': spec['lapack'].libs.names[0],
}
.. note::
This flag only works for packages that define a ``build-backend``
in ``pyproject.toml``. Also, it is only supported by pip 22.1+,
which requires Python 3.7+. For packages that still support Python
3.6 and older, ``install_options`` should be used instead.
"""""""""""""" """"""""""""""
Global options Global options
"""""""""""""" """"""""""""""
@@ -470,16 +389,6 @@ has an optional dependency on ``libyaml`` that can be enabled like so:
return options return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
""""""""""""""" """""""""""""""
Install options Install options
""""""""""""""" """""""""""""""
@@ -500,16 +409,6 @@ allows you to specify the directories to search for ``libyaml``:
return options return options
.. note::
Direct invocation of ``setup.py`` is
`deprecated <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html>`_.
This flag forces pip to use a deprecated installation procedure.
It should only be used in packages that don't define a
``build-backend`` in ``pyproject.toml`` or packages that still
support Python 3.6 and older.
^^^^^^^ ^^^^^^^
Testing Testing
^^^^^^^ ^^^^^^^
@@ -582,19 +481,6 @@ libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory, "test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed. or may require test dependencies like pytest to be installed.
Instead of defining the ``import_modules`` explicity, only the subset
of module names to be skipped can be defined by using ``skip_modules``.
If a defined module has submodules, they are skipped as well, e.g.,
in case the ``plotting`` modules should be excluded from the
automatically detected ``import_modules`` ``['nilearn', 'nilearn.surface',
'nilearn.plotting', 'nilearn.plotting.data']`` set:
.. code-block:: python
skip_modules = ['nilearn.plotting']
This will set ``import_modules`` to ``['nilearn', 'nilearn.surface']``
Import tests can be run during the installation using ``spack install Import tests can be run during the installation using ``spack install
--test=root`` or at any time after the installation using --test=root`` or at any time after the installation using
``spack test run``. ``spack test run``.
@@ -724,9 +610,10 @@ extends vs. depends_on
This is very similar to the naming dilemma above, with a slight twist. This is very similar to the naming dilemma above, with a slight twist.
As mentioned in the :ref:`Packaging Guide <packaging_extensions>`, As mentioned in the :ref:`Packaging Guide <packaging_extensions>`,
``extends`` and ``depends_on`` are very similar, but ``extends`` ensures ``extends`` and ``depends_on`` are very similar, but ``extends`` adds
that the extension and extendee share the same prefix in views. the ability to *activate* the package. Activation involves symlinking
This allows the user to import a Python module without everything in the installation prefix of the package to the installation
prefix of Python. This allows the user to import a Python module without
having to add that module to ``PYTHONPATH``. having to add that module to ``PYTHONPATH``.
When deciding between ``extends`` and ``depends_on``, the best rule of When deciding between ``extends`` and ``depends_on``, the best rule of
@@ -734,7 +621,7 @@ thumb is to check the installation prefix. If Python libraries are
installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you
should use ``extends``. If Python libraries are installed elsewhere should use ``extends``. If Python libraries are installed elsewhere
or the only files that get installed reside in ``<prefix>/bin``, then or the only files that get installed reside in ``<prefix>/bin``, then
don't use ``extends``. don't use ``extends``, as symlinking the package wouldn't be useful.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack Alternatives to Spack
@@ -777,8 +664,5 @@ For more information on build and installation frontend tools, see:
For more information on build backend tools, see: For more information on build backend tools, see:
* setuptools: https://setuptools.pypa.io/ * setuptools: https://setuptools.pypa.io/
* flit: https://flit.pypa.io/ * flit: https://flit.readthedocs.io/
* poetry: https://python-poetry.org/ * poetry: https://python-poetry.org/
* hatchling: https://hatch.pypa.io/latest/
* meson: https://meson-python.readthedocs.io/
* pdm: https://pdm.fming.dev/latest/

View File

@@ -5,9 +5,9 @@
.. _qmakepackage: .. _qmakepackage:
----- ------------
QMake QMakePackage
----- ------------
Much like Autotools and CMake, QMake is a build-script generator Much like Autotools and CMake, QMake is a build-script generator
designed by the developers of Qt. In its simplest form, Spack's designed by the developers of Qt. In its simplest form, Spack's
@@ -29,7 +29,7 @@ variables or edit ``*.pro`` files to get things working properly.
Phases Phases
^^^^^^ ^^^^^^
The ``QMakeBuilder`` and ``QMakePackage`` base classes come with the following phases: The ``QMakePackage`` base class comes with the following phases:
#. ``qmake`` - generate Makefiles #. ``qmake`` - generate Makefiles
#. ``build`` - build the project #. ``build`` - build the project

View File

@@ -5,9 +5,9 @@
.. _racketpackage: .. _racketpackage:
------ -------------
Racket RacketPackage
------ -------------
Much like Python, Racket packages and modules have their own special build system. Much like Python, Racket packages and modules have their own special build system.
To learn more about the specifics of Racket package system, please refer to the To learn more about the specifics of Racket package system, please refer to the
@@ -17,7 +17,7 @@ To learn more about the specifics of Racket package system, please refer to the
Phases Phases
^^^^^^ ^^^^^^
The ``RacketBuilder`` and ``RacketPackage`` base classes provides an ``install`` phase that The ``RacketPackage`` base class provides an ``install`` phase that
can be overridden, corresponding to the use of: can be overridden, corresponding to the use of:
.. code-block:: console .. code-block:: console

View File

@@ -19,7 +19,7 @@ new Spack packages for.
Phases Phases
^^^^^^ ^^^^^^
The ``RBuilder`` and ``RPackage`` base classes have a single phase: The ``RPackage`` base class has a single phase:
#. ``install`` - install the package #. ``install`` - install the package
@@ -193,10 +193,10 @@ Build system dependencies
As an extension of the R ecosystem, your package will obviously depend As an extension of the R ecosystem, your package will obviously depend
on R to build and run. Normally, we would use ``depends_on`` to express on R to build and run. Normally, we would use ``depends_on`` to express
this, but for R packages, we use ``extends``. This implies a special this, but for R packages, we use ``extends``. ``extends`` is similar to
dependency on R, which is used to set environment variables such as ``depends_on``, but adds an additional feature: the ability to "activate"
``R_LIBS`` uniformly. Since every R package needs this, the ``RPackage`` the package by symlinking it to the R installation directory. Since
base class contains: every R package needs this, the ``RPackage`` base class contains:
.. code-block:: python .. code-block:: python

View File

@@ -5,9 +5,9 @@
.. _rubypackage: .. _rubypackage:
---- -----------
Ruby RubyPackage
---- -----------
Like Perl, Python, and R, Ruby has its own build system for Like Perl, Python, and R, Ruby has its own build system for
installing Ruby gems. installing Ruby gems.
@@ -16,7 +16,7 @@ installing Ruby gems.
Phases Phases
^^^^^^ ^^^^^^
The ``RubyBuilder`` and ``RubyPackage`` base classes provide the following phases that The ``RubyPackage`` base class provides the following phases that
can be overridden: can be overridden:
#. ``build`` - build everything needed to install #. ``build`` - build everything needed to install

View File

@@ -5,9 +5,9 @@
.. _sconspackage: .. _sconspackage:
----- ------------
SCons SConsPackage
----- ------------
SCons is a general-purpose build system that does not rely on SCons is a general-purpose build system that does not rely on
Makefiles to build software. SCons is written in Python, and handles Makefiles to build software. SCons is written in Python, and handles
@@ -42,7 +42,7 @@ As previously mentioned, SCons allows developers to add subcommands like
$ scons install $ scons install
To facilitate this, the ``SConsBuilder`` and ``SconsPackage`` base classes provide the To facilitate this, the ``SConsPackage`` base class provides the
following phases: following phases:
#. ``build`` - build the package #. ``build`` - build the package

View File

@@ -5,9 +5,9 @@
.. _sippackage: .. _sippackage:
--- ----------
SIP SIPPackage
--- ----------
SIP is a tool that makes it very easy to create Python bindings for C and C++ SIP is a tool that makes it very easy to create Python bindings for C and C++
libraries. It was originally developed to create PyQt, the Python bindings for libraries. It was originally developed to create PyQt, the Python bindings for
@@ -22,7 +22,7 @@ provides support functions to the automatically generated code.
Phases Phases
^^^^^^ ^^^^^^
The ``SIPBuilder`` and ``SIPPackage`` base classes come with the following phases: The ``SIPPackage`` base class comes with the following phases:
#. ``configure`` - configure the package #. ``configure`` - configure the package
#. ``build`` - build the package #. ``build`` - build the package

View File

@@ -1,55 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _sourceforgepackage:
------------------
SourceforgePackage
------------------
``SourceforgePackage`` is a
`mixin-class <https://en.wikipedia.org/wiki/Mixin>`_. It automatically
sets the URL based on a list of Sourceforge mirrors listed in
`sourceforge_mirror_path`, which defaults to a half dozen known mirrors.
Refer to the package source
(`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/sourceforge.py>`__) for the current list of mirrors used by Spack.
^^^^^^^
Methods
^^^^^^^
This package provides a method for populating mirror URLs.
**urls**
This method returns a list of possible URLs for package source.
It is decorated with `property` so its results are treated as
a package attribute.
Refer to
`<https://spack.readthedocs.io/en/latest/packaging_guide.html#mirrors-of-the-main-url>`__
for information on how Spack uses the `urls` attribute during
fetching.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package and defining the relative location of an archive
file for one version of your software.
.. code-block:: python
:emphasize-lines: 1,3
class MyPackage(AutotoolsPackage, SourceforgePackage):
...
sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz"
...
Over 40 packages are using ``SourceforcePackage`` this mix-in as of
July 2022 so there are multiple packages to choose from if you want
to see a real example.

View File

@@ -5,9 +5,9 @@
.. _wafpackage: .. _wafpackage:
--- ----------
Waf WafPackage
--- ----------
Like SCons, Waf is a general-purpose build system that does not rely Like SCons, Waf is a general-purpose build system that does not rely
on Makefiles to build software. on Makefiles to build software.
@@ -16,7 +16,7 @@ on Makefiles to build software.
Phases Phases
^^^^^^ ^^^^^^
The ``WafBuilder`` and ``WafPackage`` base classes come with the following phases: The ``WafPackage`` base class comes with the following phases:
#. ``configure`` - configure the project #. ``configure`` - configure the project
#. ``build`` - build the project #. ``build`` - build the project

View File

@@ -32,42 +32,37 @@
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
link_name = os.path.abspath("_spack_root") sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external'))
if not os.path.exists(link_name): sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external/pytest-fallback'))
os.symlink(os.path.abspath("../../.."), link_name, target_is_directory=True)
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external"))
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/pytest-fallback"))
if sys.version_info[0] < 3: if sys.version_info[0] < 3:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib")) sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib'))
else: else:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib3")) sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib3'))
sys.path.append(os.path.abspath("_spack_root/lib/spack/")) sys.path.append(os.path.abspath('_spack_root/lib/spack/'))
# Add the Spack bin directory to the path so that we can use its output in docs. # Add the Spack bin directory to the path so that we can use its output in docs.
os.environ["SPACK_ROOT"] = os.path.abspath("_spack_root") os.environ['SPACK_ROOT'] = os.path.abspath('_spack_root')
os.environ["PATH"] += "%s%s" % (os.pathsep, os.path.abspath("_spack_root/bin")) os.environ['PATH'] += "%s%s" % (os.pathsep, os.path.abspath('_spack_root/bin'))
# Set an environment variable so that colify will print output like it would to # Set an environment variable so that colify will print output like it would to
# a terminal. # a terminal.
os.environ["COLIFY_SIZE"] = "25x120" os.environ['COLIFY_SIZE'] = '25x120'
os.environ["COLUMNS"] = "120" os.environ['COLUMNS'] = '120'
# Generate full package list if needed # Generate full package list if needed
subprocess.call(["spack", "list", "--format=html", "--update=package_list.html"]) subprocess.call([
'spack', 'list', '--format=html', '--update=package_list.html'])
# Generate a command index if an update is needed # Generate a command index if an update is needed
subprocess.call( subprocess.call([
[ 'spack', 'commands',
"spack", '--format=rst',
"commands", '--header=command_index.in',
"--format=rst", '--update=command_index.rst'] + glob('*rst'))
"--header=command_index.in",
"--update=command_index.rst",
]
+ glob("*rst")
)
# #
# Run sphinx-apidoc # Run sphinx-apidoc
@@ -77,12 +72,12 @@
# Without this, the API Docs will never actually update # Without this, the API Docs will never actually update
# #
apidoc_args = [ apidoc_args = [
"--force", # Overwrite existing files '--force', # Overwrite existing files
"--no-toc", # Don't create a table of contents file '--no-toc', # Don't create a table of contents file
"--output-dir=.", # Directory to place all output '--output-dir=.', # Directory to place all output
] ]
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/spack"]) sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/spack'])
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/llnl"]) sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/llnl'])
# Enable todo items # Enable todo items
todo_include_todos = True todo_include_todos = True
@@ -92,12 +87,10 @@
# #
class PatchedPythonDomain(PythonDomain): class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node: if 'refspecific' in node:
del node["refspecific"] del node['refspecific']
return super(PatchedPythonDomain, self).resolve_xref( return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode env, fromdocname, builder, typ, target, node, contnode)
)
# #
# Disable tabs to space expansion in code blocks # Disable tabs to space expansion in code blocks
@@ -110,58 +103,51 @@ def parse(self, inputstring, document):
inputstring = StringList(lines, document.current_source) inputstring = StringList(lines, document.current_source)
super().parse(inputstring, document) super().parse(inputstring, document)
def setup(sphinx): def setup(sphinx):
sphinx.add_domain(PatchedPythonDomain, override=True) sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True) sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
# -- General configuration ----------------------------------------------------- # -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here. # If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.4" needs_sphinx = '3.4'
# Add any Sphinx extension module names here, as strings. They can be extensions # Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ extensions = [
"sphinx.ext.autodoc", 'sphinx.ext.autodoc',
"sphinx.ext.graphviz", 'sphinx.ext.graphviz',
"sphinx.ext.intersphinx", 'sphinx.ext.intersphinx',
"sphinx.ext.napoleon", 'sphinx.ext.napoleon',
"sphinx.ext.todo", 'sphinx.ext.todo',
"sphinx.ext.viewcode", 'sphinx.ext.viewcode',
"sphinx_design", 'sphinxcontrib.programoutput',
"sphinxcontrib.programoutput",
] ]
# Set default graphviz options # Set default graphviz options
graphviz_dot_args = [ graphviz_dot_args = [
"-Grankdir=LR", '-Grankdir=LR', '-Gbgcolor=transparent',
"-Gbgcolor=transparent", '-Nshape=box', '-Nfontname=monaco', '-Nfontsize=10']
"-Nshape=box",
"-Nfontname=monaco",
"-Nfontsize=10",
]
# Get nice vector graphics # Get nice vector graphics
graphviz_output_format = "svg" graphviz_output_format = "svg"
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"] templates_path = ['_templates']
# The suffix of source filenames. # The suffix of source filenames.
source_suffix = ".rst" source_suffix = '.rst'
# The encoding of source files. # The encoding of source files.
source_encoding = "utf-8-sig" source_encoding = 'utf-8-sig'
# The master toctree document. # The master toctree document.
master_doc = "index" master_doc = 'index'
# General information about the project. # General information about the project.
project = u"Spack" project = u'Spack'
copyright = u"2013-2021, Lawrence Livermore National Laboratory." copyright = u'2013-2021, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
@@ -170,16 +156,16 @@ def setup(sphinx):
# The short X.Y version. # The short X.Y version.
import spack import spack
version = ".".join(str(s) for s in spack.spack_version_info[:2]) version = '.'.join(str(s) for s in spack.spack_version_info[:2])
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = spack.spack_version release = spack.spack_version
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
# language = None #language = None
# Places to look for .po/.mo files for doc translations # Places to look for .po/.mo files for doc translations
# locale_dirs = [] #locale_dirs = []
# Sphinx gettext settings # Sphinx gettext settings
gettext_compact = True gettext_compact = True
@@ -187,46 +173,41 @@ def setup(sphinx):
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
# today = '' #today = ''
# Else, today_fmt is used as the format for a strftime call. # Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y' #today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = ["_build", "_spack_root", ".spack-env"] exclude_patterns = ['_build', '_spack_root', '.spack-env']
nitpicky = True nitpicky = True
nitpick_ignore = [ nitpick_ignore = [
# Python classes that intersphinx is unable to resolve # Python classes that intersphinx is unable to resolve
("py:class", "argparse.HelpFormatter"), ('py:class', 'argparse.HelpFormatter'),
("py:class", "contextlib.contextmanager"), ('py:class', 'contextlib.contextmanager'),
("py:class", "module"), ('py:class', 'module'),
("py:class", "_io.BufferedReader"), ('py:class', '_io.BufferedReader'),
("py:class", "unittest.case.TestCase"), ('py:class', 'unittest.case.TestCase'),
("py:class", "_frozen_importlib_external.SourceFileLoader"), ('py:class', '_frozen_importlib_external.SourceFileLoader'),
("py:class", "clingo.Control"),
("py:class", "six.moves.urllib.parse.ParseResult"),
# Spack classes that are private and we don't want to expose # Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"), ('py:class', 'spack.provider_index._IndexBase'),
("py:class", "spack.repo._PrependFileLoader"), ('py:class', 'spack.repo._PrependFileLoader'),
("py:class", "spack.build_systems._checks.BaseBuilder"),
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.VersionBase"),
] ]
# The reST default role (used for this markup: `text`) to use for all documents. # The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None #default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text. # If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True #add_function_parentheses = True
# If true, the current module name will be prepended to all description # If true, the current module name will be prepended to all description
# unit titles (such as .. function::). # unit titles (such as .. function::).
# add_module_names = True #add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the # If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default. # output. They are ignored by default.
# show_authors = False #show_authors = False
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications # We use our own extension of the default style with a few modifications
@@ -237,151 +218,156 @@ def setup(sphinx):
class SpackStyle(DefaultStyle): class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy() styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8" background_color = "#f4f4f8"
styles[Generic.Output] = "#355" styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9" styles[Generic.Prompt] = "bold #346ec9"
import pkg_resources import pkg_resources
dist = pkg_resources.Distribution(__file__) dist = pkg_resources.Distribution(__file__)
sys.path.append(".") # make 'conf' module findable sys.path.append('.') # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse("spack = conf:SpackStyle", dist=dist) ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
dist._ep_map = {"pygments.styles": {"plugin1": ep}} dist._ep_map = {'pygments.styles': {'plugin1': ep}}
pkg_resources.working_set.add(dist) pkg_resources.working_set.add(dist)
pygments_style = "spack" pygments_style = 'spack'
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
# modindex_common_prefix = [] #modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------- # -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
html_theme = "sphinx_rtd_theme" html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
html_theme_options = {"logo_only": True} html_theme_options = { 'logo_only' : True }
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["_themes"] # html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to # The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation". # "<project> v<release> documentation".
# html_title = None #html_title = None
# A shorter title for the navigation bar. Default is the same as html_title. # A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None #html_short_title = None
# The name of an image file (relative to this directory) to place at the top # The name of an image file (relative to this directory) to place at the top
# of the sidebar. # of the sidebar.
html_logo = "_spack_root/share/spack/logo/spack-logo-white-text.svg" html_logo = '_spack_root/share/spack/logo/spack-logo-white-text.svg'
# The name of an image file (within the static path) to use as favicon of the # The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large. # pixels large.
html_favicon = "_spack_root/share/spack/logo/favicon.ico" html_favicon = '_spack_root/share/spack/logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"] html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.
html_last_updated_fmt = "%b %d, %Y" html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to # If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities. # typographically correct entities.
# html_use_smartypants = True #html_use_smartypants = True
# Custom sidebar templates, maps document names to template names. # Custom sidebar templates, maps document names to template names.
# html_sidebars = {} #html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.
# html_additional_pages = {} #html_additional_pages = {}
# If false, no module index is generated. # If false, no module index is generated.
# html_domain_indices = True #html_domain_indices = True
# If false, no index is generated. # If false, no index is generated.
# html_use_index = True #html_use_index = True
# If true, the index is split into individual pages for each letter. # If true, the index is split into individual pages for each letter.
# html_split_index = False #html_split_index = False
# If true, links to the reST sources are added to the pages. # If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True #html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = False #html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True #html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will # If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the # contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served. # base URL from which the finished HTML is served.
# html_use_opensearch = '' #html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml"). # This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None #html_file_suffix = None
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = "Spackdoc" htmlhelp_basename = 'Spackdoc'
# -- Options for LaTeX output -------------------------------------------------- # -- Options for LaTeX output --------------------------------------------------
latex_elements = { latex_elements = {
# The paper size ('letterpaper' or 'a4paper'). # The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper', #'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt', # The font size ('10pt', '11pt' or '12pt').
# Additional stuff for the LaTeX preamble. #'pointsize': '10pt',
#'preamble': '',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
} }
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]). # (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [ latex_documents = [
("index", "Spack.tex", u"Spack Documentation", u"Todd Gamblin", "manual"), ('index', 'Spack.tex', u'Spack Documentation',
u'Todd Gamblin', 'manual'),
] ]
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
# the title page. # the title page.
# latex_logo = None #latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, # For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters. # not chapters.
# latex_use_parts = False #latex_use_parts = False
# If true, show page references after internal links. # If true, show page references after internal links.
# latex_show_pagerefs = False #latex_show_pagerefs = False
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
# latex_show_urls = False #latex_show_urls = False
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
# latex_appendices = [] #latex_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
# latex_domain_indices = True #latex_domain_indices = True
# -- Options for manual page output -------------------------------------------- # -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [("index", "spack", u"Spack Documentation", [u"Todd Gamblin"], 1)] man_pages = [
('index', 'spack', u'Spack Documentation',
[u'Todd Gamblin'], 1)
]
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
# man_show_urls = False #man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------ # -- Options for Texinfo output ------------------------------------------------
@@ -390,25 +376,19 @@ class SpackStyle(DefaultStyle):
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
( ('index', 'Spack', u'Spack Documentation',
"index", u'Todd Gamblin', 'Spack', 'One line description of project.',
"Spack", 'Miscellaneous'),
u"Spack Documentation",
u"Todd Gamblin",
"Spack",
"One line description of project.",
"Miscellaneous",
),
] ]
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
# texinfo_appendices = [] #texinfo_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
# texinfo_domain_indices = True #texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'. # How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote' #texinfo_show_urls = 'footnote'
# -- Extension configuration ------------------------------------------------- # -- Extension configuration -------------------------------------------------

View File

@@ -19,9 +19,9 @@ see the default settings by looking at
These settings can be overridden in ``etc/spack/config.yaml`` or These settings can be overridden in ``etc/spack/config.yaml`` or
``~/.spack/config.yaml``. See :ref:`configuration-scopes` for details. ``~/.spack/config.yaml``. See :ref:`configuration-scopes` for details.
--------------------- --------------------
``install_tree:root`` ``install_tree``
--------------------- --------------------
The location where Spack will install packages and their dependencies. The location where Spack will install packages and their dependencies.
Default is ``$spack/opt/spack``. Default is ``$spack/opt/spack``.
@@ -224,9 +224,9 @@ them). Please note that we currently disable ccache's ``hash_dir``
feature to avoid an issue with the stage directory (see feature to avoid an issue with the stage directory (see
https://github.com/LLNL/spack/pull/3761#issuecomment-294352232). https://github.com/LLNL/spack/pull/3761#issuecomment-294352232).
----------------------- ------------------
``shared_linking:type`` ``shared_linking``
----------------------- ------------------
Control whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries Control whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries
so that they can find their dependencies. Has no effect on macOS. so that they can find their dependencies. Has no effect on macOS.
@@ -245,52 +245,6 @@ the loading object.
DO NOT MIX the two options within the same install tree. DO NOT MIX the two options within the same install tree.
-----------------------
``shared_linking:bind``
-----------------------
This is an *experimental option* that controls whether Spack embeds absolute paths
to needed shared libraries in ELF executables and shared libraries on Linux. Setting
this option to ``true`` has two advantages:
1. **Improved startup time**: when running an executable, the dynamic loader does not
have to perform a search for needed libraries, they are loaded directly.
2. **Reliability**: libraries loaded at runtime are those that were linked to. This
minimizes the risk of accidentally picking up system libraries.
In the current implementation, Spack sets the soname (shared object name) of
libraries to their install path upon installation. This has two implications:
1. binding does not apply to libraries installed *before* the option was enabled;
2. toggling the option off does *not* prevent binding of libraries installed when
the option was still enabled.
It is also worth noting that:
1. Applications relying on ``dlopen(3)`` will continue to work, even when they open
a library by name. This is because ``RPATH``\s are retained in binaries also
when ``bind`` is enabled.
2. ``LD_PRELOAD`` continues to work for the typical use case of overriding
symbols, such as preloading a library with a more efficient ``malloc``.
However, the preloaded library will be loaded *additionally to*, instead of
*in place of* another library with the same name --- this can be problematic
in very rare cases where libraries rely on a particular ``init`` or ``fini``
order.
.. note::
In some cases packages provide *stub libraries* that only contain an interface
for linking, but lack an implementation for runtime. An example of this is
``libcuda.so``, provided by the CUDA toolkit; it can be used to link against,
but the library needed at runtime is the one installed with the CUDA driver.
To avoid binding those libraries, they can be marked as non-bindable using
a property in the package:
.. code-block:: python
class Example(Package):
non_bindable_shared_objects = ["libinterface.so"]
---------------------- ----------------------
``terminal_title`` ``terminal_title``
---------------------- ----------------------

View File

@@ -405,17 +405,6 @@ Spack understands several special variables. These are:
* ``$user``: name of the current user * ``$user``: name of the current user
* ``$user_cache_path``: user cache directory (``~/.spack`` unless * ``$user_cache_path``: user cache directory (``~/.spack`` unless
:ref:`overridden <local-config-overrides>`) :ref:`overridden <local-config-overrides>`)
* ``$architecture``: the architecture triple of the current host, as
detected by Spack.
* ``$arch``: alias for ``$architecture``.
* ``$platform``: the platform of the current host, as detected by Spack.
* ``$operating_system``: the operating system of the current host, as
detected by the ``distro`` python module.
* ``$os``: alias for ``$operating_system``.
* ``$target``: the ISA target for the current host, as detected by
ArchSpec. E.g. ``skylake`` or ``neoverse-n1``.
* ``$target_family``. The target family for the current host, as
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
Note that, as with shell variables, you can write these as ``$varname`` Note that, as with shell variables, you can write these as ``$varname``
or with braces to distinguish the variable from surrounding characters: or with braces to distinguish the variable from surrounding characters:
@@ -560,7 +549,7 @@ down the problem:
You can see above that the ``build_jobs`` and ``debug`` settings are You can see above that the ``build_jobs`` and ``debug`` settings are
built in and are not overridden by a configuration file. The built in and are not overridden by a configuration file. The
``verify_ssl`` setting comes from the ``--insecure`` option on the ``verify_ssl`` setting comes from the ``--insceure`` option on the
command line. ``dirty`` and ``install_tree`` come from the custom command line. ``dirty`` and ``install_tree`` come from the custom
scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration
options come from the default configuration files that ship with Spack. options come from the default configuration files that ship with Spack.

View File

@@ -59,7 +59,7 @@ other techniques to minimize the size of the final image:
&& echo " specs:" \ && echo " specs:" \
&& echo " - gromacs+mpi" \ && echo " - gromacs+mpi" \
&& echo " - mpich" \ && echo " - mpich" \
&& echo " concretizer:" \ && echo " concretizer: together" \
&& echo " unify: true" \ && echo " unify: true" \
&& echo " config:" \ && echo " config:" \
&& echo " install_tree: /opt/software" \ && echo " install_tree: /opt/software" \

View File

@@ -71,7 +71,7 @@ locally to speed up the review process.
new release that is causing problems. If this is the case, please file an issue. new release that is causing problems. If this is the case, please file an issue.
We currently test against Python 2.7 and 3.6-3.10 on both macOS and Linux and We currently test against Python 2.7 and 3.5-3.9 on both macOS and Linux and
perform 3 types of tests: perform 3 types of tests:
.. _cmd-spack-unit-test: .. _cmd-spack-unit-test:
@@ -253,6 +253,27 @@ to update them.
multiple runs of ``spack style`` just to re-compute line numbers and multiple runs of ``spack style`` just to re-compute line numbers and
makes it much easier to fix errors directly off of the CI output. makes it much easier to fix errors directly off of the CI output.
.. warning::
Flake8 and ``pep8-naming`` require a number of dependencies in order
to run. If you installed ``py-flake8`` and ``py-pep8-naming``, the
easiest way to ensure the right packages are on your ``PYTHONPATH`` is
to run::
spack activate py-flake8
spack activate pep8-naming
so that all of the dependencies are symlinked to a central
location. If you see an error message like:
.. code-block:: console
Traceback (most recent call last):
File: "/usr/bin/flake8", line 5, in <module>
from pkg_resources import load_entry_point
ImportError: No module named pkg_resources
that means Flake8 couldn't find setuptools in your ``PYTHONPATH``.
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
Documentation Tests Documentation Tests
@@ -288,9 +309,13 @@ All of these can be installed with Spack, e.g.
.. code-block:: console .. code-block:: console
$ spack load py-sphinx py-sphinx-rtd-theme py-sphinxcontrib-programoutput $ spack activate py-sphinx
$ spack activate py-sphinx-rtd-theme
$ spack activate py-sphinxcontrib-programoutput
so that all of the dependencies are added to PYTHONPATH. If you see an error message so that all of the dependencies are symlinked into that Python's
tree. Alternatively, you could arrange for their library
directories to be added to PYTHONPATH. If you see an error message
like: like:
.. code-block:: console .. code-block:: console

View File

@@ -107,6 +107,7 @@ with a high level view of Spack's directory structure:
llnl/ <- some general-use libraries llnl/ <- some general-use libraries
spack/ <- spack module; contains Python code spack/ <- spack module; contains Python code
analyzers/ <- modules to run analysis on installed packages
build_systems/ <- modules for different build systems build_systems/ <- modules for different build systems
cmd/ <- each file in here is a spack subcommand cmd/ <- each file in here is a spack subcommand
compilers/ <- compiler description files compilers/ <- compiler description files
@@ -149,9 +150,11 @@ grouped by functionality.
Package-related modules Package-related modules
^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.package_base` :mod:`spack.package`
Contains the :class:`~spack.package_base.PackageBase` class, which Contains the :class:`~spack.package_base.Package` class, which
is the superclass for all packages in Spack. is the superclass for all packages in Spack. Methods on ``Package``
implement all phases of the :ref:`package lifecycle
<package-lifecycle>` and manage the build process.
:mod:`spack.util.naming` :mod:`spack.util.naming`
Contains functions for mapping between Spack package names, Contains functions for mapping between Spack package names,
@@ -239,6 +242,22 @@ Unit tests
Implements Spack's test suite. Add a module and put its name in Implements Spack's test suite. Add a module and put its name in
the test suite in ``__init__.py`` to add more unit tests. the test suite in ``__init__.py`` to add more unit tests.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Research and Monitoring Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.monitor`
Contains :class:`~spack.monitor.SpackMonitorClient`. This is accessed from
the ``spack install`` and ``spack analyze`` commands to send build and
package metadata up to a `Spack Monitor
<https://github.com/spack/spack-monitor>`_ server.
:mod:`spack.analyzers`
A module folder with a :class:`~spack.analyzers.analyzer_base.AnalyzerBase`
that provides base functions to run, save, and (optionally) upload analysis
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
Other Modules Other Modules
@@ -282,6 +301,240 @@ Most spack commands look something like this:
The information in Package files is used at all stages in this The information in Package files is used at all stages in this
process. process.
Conceptually, packages are overloaded. They contain:
-------------
Stage objects
-------------
.. _writing-analyzers:
-----------------
Writing analyzers
-----------------
To write an analyzer, you should add a new python file to the
analyzers module directory at ``lib/spack/spack/analyzers`` .
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
to add an analyzer class ``Myanalyzer`` you would write to
``spack/analyzers/myanalyzer.py`` and import and
use the base as follows:
.. code-block:: python
from .analyzer_base import AnalyzerBase
class Myanalyzer(AnalyzerBase):
Note that the class name is your module file name, all lowercase
except for the first capital letter. You can look at other analyzers in
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
^^^^^^^^^^^^^^^^^^^^^^^^^
Analyzer Output Directory
^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack analyze run`` an analyzer output directory will
be created in your spack user directory in your ``$HOME``. The reason we output here
is because the install directory might not always be writable.
.. code-block:: console
~/.spack/
analyzers
Result files will be written here, organized in subfolders in the same structure
as the package, with each analyzer owning it's own subfolder. for example:
.. code-block:: console
$ tree ~/.spack/analyzers/
/home/spackuser/.spack/analyzers/
└── linux-ubuntu20.04-skylake
└── gcc-9.3.0
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
├── environment_variables
│   └── spack-analyzer-environment-variables.json
├── install_files
│   └── spack-analyzer-install-files.json
└── libabigail
└── lib
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
Notice that for the libabigail analyzer, since results are generated per object,
we honor the object's folder in case there are equivalently named files in
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
^^^^^^^^^^^^^^^^^
Analyzer Metadata
^^^^^^^^^^^^^^^^^
Your analyzer is required to have the class attributes ``name``, ``outfile``,
and ``description``. These are printed to the user with they use the subcommand
``spack analyze list-analyzers``. Here is an example.
As we mentioned above, note that this analyzer would live in a module named
``libabigail.py`` in the analyzers folder so that the class can be discovered.
.. code-block:: python
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
This means that the name and output file should be unique for your analyzer.
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
that the user wants to run all analyzers.
.. _analyzer_run_function:
^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer run Function
^^^^^^^^^^^^^^^^^^^^^^^^
The core of an analyzer is its ``run()`` function, which should accept no
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
and it's up to the run function to generate whatever analysis data you need,
and then return the object with a key as the analyzer name. The result data
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
and one of ``value`` or ``binary_value``. The install file should be for a relative
path, and not the absolute path. For example, let's say we extract a metric called
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
We might have data that looks like this:
.. code-block:: python
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
.. code-block:: python
return {self.name: result}
This will save the complete result to the analyzer metadata folder, as described
previously. If you want support for adding a different kind of metadata (e.g.,
not associated with an install file) then the monitor server would need to be updated
to support this first.
^^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer init Function
^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't need any extra dependencies or checks, you can skip defining an analyzer
init function, as the base class will handle it. Typically, it will accept
a spec, and an optional output directory (if the user does not want the default
metadata folder for analyzer results). The analyzer init function should call
it's parent init, and then do any extra checks or validation that are required to
work. For example:
.. code-block:: python
def __init__(self, spec, dirname=None):
super(Myanalyzer, self).__init__(spec, dirname)
# install extra dependencies, do extra preparation and checks here
At the end of the init, you will have available to you:
- **self.spec**: the spec object
- **self.dirname**: an optional directory name the user as provided at init to save
- **self.output_dir**: the analyzer metadata directory, where we save by default
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
And can proceed to write your analyzer.
^^^^^^^^^^^^^^^^^^^^^^^
Saving Analyzer Results
^^^^^^^^^^^^^^^^^^^^^^^
The analyzer will have ``save_result`` called, with the result object generated
to save it to the filesystem, and if the user has added the ``--monitor`` flag
to upload it to a monitor server. If your result follows an accepted result
format and you don't need to parse it further, you don't need to add this
function to your class. However, if your result data is large or otherwise
needs additional parsing, you can define it. If you define the function, it
is useful to know about the ``output_dir`` property, which you can join
with your output file relative path of choice:
.. code-block:: python
outfile = os.path.join(self.output_dir, "my-output-file.txt")
The directory will be provided by the ``output_dir`` property but it won't exist,
so you should create it:
.. code::block:: python
# Create the output directory
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
If you are generating results that match to specific files in the package
install directory, you should try to maintain those paths in the case that
there are equivalently named files in different directories that would
overwrite one another. As an example of an analyzer with a custom save,
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
folder in ``run()``, as they are either binaries, or as xml (text) would
usually be too big to pass in one request. For this reason, the files
are saved during ``run()`` and the filenames added to the result object,
and then when the result object is passed back into ``save_result()``,
we skip saving to the filesystem, and instead read the file and send
each one (separately) to the monitor:
.. code-block:: python
def save_result(self, result, monitor=None, overwrite=False):
"""ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not monitor:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
Notice that this function, if you define it, requires a result object (generated by
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
to check if a result exists first, and not write to it if the result exists and
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
yet, you might want to do that here. You should also use ``tty.info`` to give
the user a message of "Writing result to $DIRNAME."
.. _writing-commands: .. _writing-commands:
@@ -446,6 +699,23 @@ with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed, ``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors. the name of the phase, and the log file where we might find errors.
"""""""""""""""""""""""""""""""""
``on_analyzer_save(pkg, result)``
"""""""""""""""""""""""""""""""""
After an analyzer has saved some result for a package, this hook is called,
and it provides the package that we just ran the analysis for, along with
the loaded result. Typically, a result is structured to have the name
of the analyzer as key, and the result object that is defined in detail in
:ref:`analyzer_run_function`.
.. code-block:: python
def on_analyzer_save(pkg, result):
"""given a package and a result...
"""
print('Do something extra with a package analysis result here')
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
Adding a New Hook Type Adding a New Hook Type
@@ -963,13 +1233,8 @@ completed, the steps to make the point release are:
$ git checkout releases/v0.15 $ git checkout releases/v0.15
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
in the project, create it. This pull request ought to be created as early as
possible when working on a release project, so that we can build the release
commits incrementally, and identify potential conflicts at an early stage.
#. Cherry-pick each pull request in the ``Done`` column of the release #. Cherry-pick each pull request in the ``Done`` column of the release
project board onto the ``Backports vX.Y.Z`` pull request. project board onto the release branch.
This is **usually** fairly simple since we squash the commits from the This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests. That means there is only one commit vast majority of pull requests. That means there is only one commit
@@ -994,7 +1259,7 @@ completed, the steps to make the point release are:
It is important to cherry-pick commits in the order they happened, It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When otherwise you can get conflicts while cherry-picking. When
cherry-picking look at the merge date, cherry-picking onto a point release, look at the merge date,
**not** the number of the pull request or the date it was opened. **not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have Sometimes you may **still** get merge conflicts even if you have
@@ -1015,19 +1280,15 @@ completed, the steps to make the point release are:
branch if neither of the above options makes sense, but this can branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice. require a lot of work. It's seldom the right choice.
#. When all the commits from the project board are cherry-picked into #. Bump the version in ``lib/spack/spack/__init__.py``.
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
1. Bump the version in ``lib/spack/spack/__init__.py``. #. Update ``CHANGELOG.md`` with a list of the changes.
2. Update ``CHANGELOG.md`` with a list of the changes.
This is typically a summary of the commits you cherry-picked onto the This is typically a summary of the commits you cherry-picked onto the
release branch. See `the changelog from 0.14.1 release branch. See `the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_. <https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This #. Push the release branch to GitHub.
is needed to keep track in the release branch of all the commits that were
cherry-picked.
#. Make sure CI passes on the release branch, including: #. Make sure CI passes on the release branch, including:
@@ -1046,8 +1307,6 @@ completed, the steps to make the point release are:
#. Follow the steps in :ref:`announcing-releases`. #. Follow the steps in :ref:`announcing-releases`.
#. Submit a PR to update the CHANGELOG in the `develop` branch
with the addition of this point release.
.. _publishing-releases: .. _publishing-releases:

View File

@@ -233,8 +233,8 @@ packages will be listed as roots of the Environment.
All of the Spack commands that act on the list of installed specs are All of the Spack commands that act on the list of installed specs are
Environment-sensitive in this way, including ``install``, Environment-sensitive in this way, including ``install``,
``uninstall``, ``find``, ``extensions``, and more. In the ``uninstall``, ``activate``, ``deactivate``, ``find``, ``extensions``,
:ref:`environment-configuration` section we will discuss and more. In the :ref:`environment-configuration` section we will discuss
Environment-sensitive commands further. Environment-sensitive commands further.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
@@ -376,30 +376,6 @@ from being added again. At the same time, a spec that already exists in the
environment, but only as a dependency, will be added to the environment as a environment, but only as a dependency, will be added to the environment as a
root spec without the ``--no-add`` option. root spec without the ``--no-add`` option.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Developing Packages in a Spack Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``spack develop`` command allows one to develop Spack packages in
an environment. It requires a spec containing a concrete version, and
will configure Spack to install the package from local source. By
default, it will also clone the package to a subdirectory in the
environment. This package will have a special variant ``dev_path``
set, and Spack will ensure the package and its dependents are rebuilt
any time the environment is installed if the package's local source
code has been modified. Spack ensures that all instances of a
developed package in the environment are concretized to match the
version (and other constraints) passed as the spec argument to the
``spack develop`` command.
For packages with ``git`` attributes, git branches, tags, and commits can
also be used as valid concrete versions (see :ref:`version-specifier`).
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
the ``main`` branch of the package, and ``spack install`` will install from
that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
^^^^^^^ ^^^^^^^
Loading Loading
^^^^^^^ ^^^^^^^
@@ -478,21 +454,14 @@ them to the Environment.
spack: spack:
include: include:
- relative/path/to/config.yaml - relative/path/to/config.yaml
- https://github.com/path/to/raw/config/compilers.yaml
- /absolute/path/to/packages.yaml - /absolute/path/to/packages.yaml
Environments can include files or URLs. File paths can be relative or Environments can include files with either relative or absolute
absolute. URLs include the path to the text for individual files or paths. Inline configurations take precedence over included
can be the path to a directory containing configuration files. configurations, so you don't have to change shared configuration files
to make small changes to an individual Environment. Included configs
^^^^^^^^^^^^^^^^^^^^^^^^ listed earlier will have higher precedence, as the included configs are
Configuration precedence applied in reverse order.
^^^^^^^^^^^^^^^^^^^^^^^^
Inline configurations take precedence over included configurations, so
you don't have to change shared configuration files to make small changes
to an individual environment. Included configurations listed earlier will
have higher precedence, as the included configs are applied in reverse order.
------------------------------- -------------------------------
Manually Editing the Specs List Manually Editing the Specs List
@@ -519,49 +488,8 @@ available from the yaml file.
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
Spec concretization Spec concretization
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under An environment can be concretized in three different modes and the behavior active under any environment
any environment is determined by the ``concretizer:unify`` configuration option. is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
The *default* mode is to unify all specs:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This means that any package in the environment corresponds to a single concrete spec. In
the above example, when ``hdf5`` depends down the line of ``zlib``, it is required to
take ``zlib@1.2.8`` instead of a newer version. This mode of concretization is
particularly useful when environment views are used: if every package occurs in
only one flavor, it is usually possible to merge all install directories into a view.
A downside of unified concretization is that it can be overly strict. For example, a
concretization error would happen when both ``hdf5+mpi`` and ``hdf5~mpi`` are specified
in an environment.
The second mode is to *unify when possible*: this makes concretization of root specs
more independendent. Instead of requiring reuse of dependencies across different root
specs, it is only maximized:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
This means that both ``hdf5`` installations will use ``zlib@1.2.8`` as a dependency even
if newer versions of that library are available.
The third mode of operation is to concretize root specs entirely independently by
disabling unified concretization:
.. code-block:: yaml .. code-block:: yaml
@@ -573,11 +501,45 @@ disabling unified concretization:
concretizer: concretizer:
unify: false unify: false
In this example ``hdf5`` is concretized separately, and does not consider ``zlib@1.2.8`` This mode of operation permits to deploy a full software stack where multiple configurations of the same package
as a constraint or preference. Instead, it will take the latest possible version. need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
The last two concretization options are typically useful for system administrators and If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
user support groups providing a large software stack for their HPC center. i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note:: .. note::
@@ -588,10 +550,10 @@ user support groups providing a large software stack for their HPC center.
.. admonition:: Re-concretization of user specs .. admonition:: Re-concretization of user specs
When using *unified* concretization (when possible), the entire set of specs will be When concretizing specs *together* or *together where possible* the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead unified concretization is the environment remains consistent / minimal. When instead the specs are concretized
disabled, only the new specs will be concretized after any addition. separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
Spec Matrices Spec Matrices
@@ -630,6 +592,31 @@ The following two Environment manifests are identical:
Spec matrices can be used to install swaths of software across various Spec matrices can be used to install swaths of software across various
toolchains. toolchains.
The concretization logic for spec matrices differs slightly from the
rest of Spack. If a variant or dependency constraint from a matrix is
invalid, Spack will reject the constraint and try again without
it. For example, the following two Environment manifests will produce
the same specs:
.. code-block:: yaml
spack:
specs:
- matrix:
- [zlib, libelf, hdf5+mpi]
- [^mvapich2@2.2, ^openmpi@3.1.0]
spack:
specs:
- zlib
- libelf
- hdf5+mpi ^mvapich2@2.2
- hdf5+mpi ^openmpi@3.1.0
This allows one to create toolchains out of combinations of
constraints and apply them somewhat indiscriminately to packages,
without regard for the applicability of the constraint.
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
Spec List References Spec List References
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
@@ -961,6 +948,9 @@ Variable Paths
PATH bin PATH bin
MANPATH man, share/man MANPATH man, share/man
ACLOCAL_PATH share/aclocal ACLOCAL_PATH share/aclocal
LD_LIBRARY_PATH lib, lib64
LIBRARY_PATH lib, lib64
CPATH include
PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig
CMAKE_PREFIX_PATH . CMAKE_PREFIX_PATH .
=================== ========= =================== =========
@@ -993,7 +983,7 @@ A typical workflow is as follows:
spack env create -d . spack env create -d .
spack -e . add perl spack -e . add perl
spack -e . concretize spack -e . concretize
spack -e . env depfile -o Makefile spack -e . env depfile > Makefile
make -j64 make -j64
This generates a ``Makefile`` from a concretized environment in the This generates a ``Makefile`` from a concretized environment in the
@@ -1006,6 +996,7 @@ load, even when packages are built in parallel.
By default the following phony convenience targets are available: By default the following phony convenience targets are available:
- ``make all``: installs the environment (default target); - ``make all``: installs the environment (default target);
- ``make fetch-all``: only fetch sources of all packages;
- ``make clean``: cleans files used by make, but does not uninstall packages. - ``make clean``: cleans files used by make, but does not uninstall packages.
.. tip:: .. tip::
@@ -1015,23 +1006,14 @@ By default the following phony convenience targets are available:
printed orderly per package install. To get synchronized output with colors, printed orderly per package install. To get synchronized output with colors,
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``. use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following advanced example shows how generated targets can be used in a
Specifying dependencies on generated ``make`` targets ``Makefile``:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An interesting question is how to include generated ``Makefile``\s in your own
``Makefile``\s. This comes up when you want to install an environment that provides
executables required in a command for a make target of your own.
The example below shows how to accomplish this: the ``env`` target specifies
the generated ``spack/env`` target as a prerequisite, meaning that the environment
gets installed and is available for use in the ``env`` target.
.. code:: Makefile .. code:: Makefile
SPACK ?= spack SPACK ?= spack
.PHONY: all clean env .PHONY: all clean fetch env
all: env all: env
@@ -1040,6 +1022,9 @@ gets installed and is available for use in the ``env`` target.
env.mk: spack.lock env.mk: spack.lock
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack $(SPACK) -e . env depfile -o $@ --make-target-prefix spack
fetch: spack/fetch
$(info Environment fetched!)
env: spack/env env: spack/env
$(info Environment installed!) $(info Environment installed!)
@@ -1051,10 +1036,11 @@ gets installed and is available for use in the ``env`` target.
include env.mk include env.mk
endif endif
This works as follows: when ``make`` is invoked, it first "remakes" the missing When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
include ``env.mk`` as there is a target for it. This triggers concretization of from its rule, which triggers concretization. When done, the generated targets
the environment and makes spack output ``env.mk``. At that point the ``spack/fetch`` and ``spack/env`` are available. In the above
generated target ``spack/env`` becomes available through ``include env.mk``. example, the ``env`` target uses the latter as a prerequisite, meaning
that it can make use of the installed packages in its commands.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``, As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional. the include is conditional.
@@ -1062,27 +1048,7 @@ the include is conditional.
.. note:: .. note::
When including generated ``Makefile``\s, it is important to use When including generated ``Makefile``\s, it is important to use
the ``--make-target-prefix`` flag and use the non-phony target the ``--make-target-prefix`` flag and use the non-phony targets
``<target-prefix>/env`` as prerequisite, instead of the phony target ``<target-prefix>/env`` and ``<target-prefix>/fetch`` as
``<target-prefix>/all``. prerequisites, instead of the phony targets ``<target-prefix>/all``
and ``<target-prefix>/fetch-all`` respectively.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building a subset of the environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The generated ``Makefile``\s contain install targets for each spec. Given the hash
of a particular spec, you can use the ``.install/<hash>`` target to install the
spec with its dependencies. There is also ``.install-deps/<hash>`` to *only* install
its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently:
.. code:: console
$ spack env depfile -o Makefile --make-target-prefix my_env
# Install dependencies in parallel, only show a log on error.
$ make -j16 my_env/.install-deps/<hash> SPACK_INSTALL_FLAGS=--show-log-on-error
# Install the root spec with verbose output.
$ make -j16 my_env/.install/<hash> SPACK_INSTALL_FLAGS=--verbose

View File

@@ -98,42 +98,40 @@ For example, this command:
.. code-block:: console .. code-block:: console
$ spack create https://ftp.osuosl.org/pub/blfs/conglomeration/libelf/libelf-0.8.13.tar.gz $ spack create http://www.mr511.de/software/libelf-0.8.13.tar.gz
creates a simple python file: creates a simple python file:
.. code-block:: python .. code-block:: python
from spack.package import * from spack import *
class Libelf(AutotoolsPackage): class Libelf(Package):
"""FIXME: Put a proper description of your package here.""" """FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here. # FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com" homepage = "http://www.example.com"
url = "https://ftp.osuosl.org/pub/blfs/conglomeration/libelf/libelf-0.8.13.tar.gz" url = "http://www.mr511.de/software/libelf-0.8.13.tar.gz"
# FIXME: Add a list of GitHub accounts to version('0.8.13', '4136d7b4c04df68b686570afa26988ac')
# notify when the package is updated.
# maintainers = ["github_user1", "github_user2"]
version("0.8.13", sha256="591a9b4ec81c1f2042a97aa60564e0cb79d041c52faa7416acb38bc95bd2c76d")
# FIXME: Add dependencies if required. # FIXME: Add dependencies if required.
# depends_on("foo") # depends_on('foo')
def configure_args(self): def install(self, spec, prefix):
# FIXME: Add arguments other than --prefix # FIXME: Modify the configure line to suit your build system here.
# FIXME: If not needed delete this function configure('--prefix={0}'.format(prefix))
args = []
return args # FIXME: Add logic to build and install here.
make()
make('install')
It doesn't take much python coding to get from there to a working It doesn't take much python coding to get from there to a working
package: package:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py .. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py
:lines: 5- :lines: 6-
Spack also provides wrapper functions around common commands like Spack also provides wrapper functions around common commands like
``configure``, ``make``, and ``cmake`` to make writing packages ``configure``, ``make``, and ``cmake`` to make writing packages

View File

@@ -23,36 +23,8 @@ be present on the machine where Spack is run:
These requirements can be easily installed on most modern Linux systems; These requirements can be easily installed on most modern Linux systems;
on macOS, XCode is required. Spack is designed to run on HPC on macOS, XCode is required. Spack is designed to run on HPC
platforms like Cray. Not all packages should be expected platforms like Cray. Not all packages should be expected
to work on all platforms. to work on all platforms. A build matrix showing which packages are
working on which systems is planned but not yet available.
A build matrix showing which packages are working on which systems is shown below.
.. tab-set::
.. tab-item:: Debian/Ubuntu
.. code-block:: console
apt update
apt install build-essential ca-certificates coreutils curl environment-modules gfortran git gpg lsb-release python3 python3-distutils python3-venv unzip zip
.. tab-item:: RHEL
.. code-block:: console
yum update -y
yum install -y epel-release
yum update -y
yum --enablerepo epel groupinstall -y "Development Tools"
yum --enablerepo epel install -y curl findutils gcc-c++ gcc gcc-gfortran git gnupg2 hostname iproute redhat-lsb-core make patch python3 python3-pip python3-setuptools unzip
python3 -m pip install boto3
.. tab-item:: macOS Brew
.. code-block:: console
brew update
brew install curl gcc git gnupg zip
------------ ------------
Installation Installation
@@ -124,41 +96,88 @@ Spack provides two ways of bootstrapping ``clingo``: from pre-built binaries
(default), or from sources. The fastest way to get started is to bootstrap from (default), or from sources. The fastest way to get started is to bootstrap from
pre-built binaries. pre-built binaries.
The first time you concretize a spec, Spack will bootstrap automatically: .. note::
When bootstrapping from pre-built binaries, Spack currently requires
``patchelf`` on Linux and ``otool`` on macOS. If ``patchelf`` is not in the
``PATH``, Spack will build it from sources, and a C++ compiler is required.
The first time you concretize a spec, Spack will bootstrap in the background:
.. code-block:: console .. code-block:: console
$ spack spec zlib $ time spack spec zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-ba5ijauisd3uuixtmactc36vps7yfsrl.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64/gcc-10.2.1/clingo-bootstrap-spack/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-ba5ijauisd3uuixtmactc36vps7yfsrl.spack
==> Installing "clingo-bootstrap@spack%gcc@10.2.1~docs~ipo+python+static_libstdcpp build_type=Release arch=linux-centos7-x86_64" from a buildcache
==> Bootstrapping patchelf from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.16.1-p72zyan5wrzuabtmzq7isa5mzyh6ahdp.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64/gcc-10.2.1/patchelf-0.16.1/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.16.1-p72zyan5wrzuabtmzq7isa5mzyh6ahdp.spack
==> Installing "patchelf@0.16.1%gcc@10.2.1 ldflags="-static-libstdc++ -static-libgcc" build_system=autotools arch=linux-centos7-x86_64" from a buildcache
Input spec Input spec
-------------------------------- --------------------------------
zlib zlib
Concretized Concretized
-------------------------------- --------------------------------
zlib@1.2.13%gcc@9.4.0+optimize+pic+shared build_system=makefile arch=linux-ubuntu20.04-icelake zlib@1.2.11%gcc@7.5.0+optimize+pic+shared arch=linux-ubuntu18.04-zen
If for security concerns you cannot bootstrap ``clingo`` from pre-built real 0m20.023s
binaries, you have to disable fetching the binaries we generated with Github Actions. user 0m18.351s
sys 0m0.784s
After this command you'll see that ``clingo`` has been installed for Spack's own use:
.. code-block:: console .. code-block:: console
$ spack bootstrap disable github-actions-v0.4 $ spack find -b
==> "github-actions-v0.4" is now disabled and will not be used for bootstrapping ==> Showing internal bootstrap store at "/root/.spack/bootstrap/store"
$ spack bootstrap disable github-actions-v0.3 ==> 3 installed packages
==> "github-actions-v0.3" is now disabled and will not be used for bootstrapping -- linux-rhel5-x86_64 / gcc@9.3.0 -------------------------------
clingo-bootstrap@spack python@3.6
-- linux-ubuntu18.04-zen / gcc@7.5.0 ----------------------------
patchelf@0.13
Subsequent calls to the concretizer will then be much faster:
.. code-block:: console
$ time spack spec zlib
[ ... ]
real 0m0.490s
user 0m0.431s
sys 0m0.041s
If for security concerns you cannot bootstrap ``clingo`` from pre-built
binaries, you have to mark this bootstrapping method as untrusted. This makes
Spack fall back to bootstrapping from sources:
.. code-block:: console
$ spack bootstrap untrust github-actions-v0.2
==> "github-actions-v0.2" is now untrusted and will not be used for bootstrapping
You can verify that the new settings are effective with: You can verify that the new settings are effective with:
.. command-output:: spack bootstrap list .. code-block:: console
$ spack bootstrap list
Name: github-actions-v0.2 UNTRUSTED
Type: buildcache
Info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
Description:
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
[ ... ]
Name: spack-install TRUSTED
Type: install
Description:
Specs built from sources by Spack. May take a long time.
.. note:: .. note::
@@ -188,7 +207,9 @@ under the ``${HOME}/.spack`` directory. The software installed there can be quer
.. code-block:: console .. code-block:: console
$ spack -b find $ spack find --bootstrap
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
==> 3 installed packages
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------ -- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1 clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
@@ -197,7 +218,7 @@ In case it's needed the bootstrap store can also be cleaned with:
.. code-block:: console .. code-block:: console
$ spack clean -b $ spack clean -b
==> Removing bootstrapped software and configuration in "/home/spack/.spack/bootstrap" ==> Removing software in "/home/spack/.spack/bootstrap/store"
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
Check Installation Check Installation

Binary file not shown.

Before

Width:  |  Height:  |  Size: 658 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 449 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -67,6 +67,7 @@ or refer to the full manual below.
build_settings build_settings
environments environments
containers containers
monitoring
mirrors mirrors
module_file_support module_file_support
repositories repositories
@@ -77,6 +78,12 @@ or refer to the full manual below.
extensions extensions
pipelines pipelines
.. toctree::
:maxdepth: 2
:caption: Research
analyze
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
:caption: Contributing :caption: Contributing

View File

@@ -77,7 +77,7 @@ installation of a package.
Spack only generates modulefiles when a package is installed. If Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to will not regenerate modulefiles for the package. This may to
inconsistent modulefiles if the Spack module configuration has inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file changed since the package was installed, either by editing a file
or changing scopes or environments. or changing scopes or environments.
@@ -113,8 +113,6 @@ from language interpreters into their extensions. The latter two instead permit
fine tune the filesystem layout, content and creation of module files to meet fine tune the filesystem layout, content and creation of module files to meet
site specific conventions. site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py`` Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -136,7 +134,7 @@ The second method:
pass pass
can instead inject run-time environment modifications in the module files of packages can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired that depend on it. In both cases you need to fill ``run_env`` with the desired
list of environment modifications. list of environment modifications.
.. admonition:: The ``r`` package and callback APIs .. admonition:: The ``r`` package and callback APIs
@@ -310,7 +308,7 @@ the variable ``FOOBAR`` will be unset.
spec constraints are instead evaluated top to bottom. spec constraints are instead evaluated top to bottom.
"""""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files Blacklist or whitelist specific module files
"""""""""""""""""""""""""""""""""""""""""""" """"""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or You can use anonymous specs also to prevent module files from being written or
@@ -324,8 +322,8 @@ your system. If you write a configuration file like:
modules: modules:
default: default:
tcl: tcl:
include: ['gcc', 'llvm'] # include will have precedence over exclude whitelist: ['gcc', 'llvm'] # Whitelist will have precedence over blacklist
exclude: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler blacklist: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
you will prevent the generation of module files for any package that you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc`` is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
@@ -492,7 +490,7 @@ satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module appropriate path, and will generate a default symlink to the module
file as well. file as well.
.. warning:: .. warning::
If Spack is configured to generate multiple default packages in the If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the same directory, the last modulefile to be generated will be the
default module. default module.
@@ -520,33 +518,18 @@ inspections and customize them per-module-set.
prefix_inspections: prefix_inspections:
bin: bin:
- PATH - PATH
man: lib:
- MANPATH - LIBRARY_PATH
'': '':
- CMAKE_PREFIX_PATH - CMAKE_PREFIX_PATH
Prefix inspections are only applied if the relative path inside the Prefix inspections are only applied if the relative path inside the
installation prefix exists. In this case, for a Spack package ``foo`` installation prefix exists. In this case, for a Spack package ``foo``
installed to ``/spack/prefix/foo``, if ``foo`` installs executables to installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
``bin`` but no manpages in ``man``, the generated module file for ``bin`` but no libraries in ``lib``, the generated module file for
``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and ``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and
``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not ``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not
update ``MANPATH``. update ``LIBRARY_PATH``.
The default list of environment variables in this config section
inludes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH``
and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH``
on macOS. On Linux however, the corresponding ``LD_LIBRARY_PATH``
variable is *not* set, because it affects the behavior of
system executables too.
.. note::
In general, the ``LD_LIBRARY_PATH`` variable is not required
when using packages built with Spack, thanks to the use of RPATH.
Some packages may still need the variable, which is best handled
on a per-package basis instead of globally, as explained in
:ref:`overide-api-calls-in-package-py`.
There is a special case for prefix inspections relative to environment There is a special case for prefix inspections relative to environment
views. If all of the following conditions hold for a module set views. If all of the following conditions hold for a module set
@@ -606,7 +589,7 @@ Filter out environment modifications
Modifications to certain environment variables in module files are there by Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections. default, for instance because they are generated by prefix inspections.
If you want to prevent modifications to some environment variables, you can If you want to prevent modifications to some environment variables, you can
do so by using the ``exclude_env_vars``: do so by using the environment blacklist:
.. code-block:: yaml .. code-block:: yaml
@@ -616,7 +599,7 @@ do so by using the ``exclude_env_vars``:
all: all:
filter: filter:
# Exclude changes to any of these variables # Exclude changes to any of these variables
exclude_env_vars: ['CPATH', 'LIBRARY_PATH'] environment_blacklist: ['CPATH', 'LIBRARY_PATH']
The configuration above will generate module files that will not contain The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``. modifications to either ``CPATH`` or ``LIBRARY_PATH``.

View File

@@ -0,0 +1,265 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _monitoring:
==========
Monitoring
==========
You can use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
server to store a database of your packages, builds, and associated metadata
for provenance, research, or some other kind of development. You should
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
to first create a server along with a username and token for yourself.
You can then use this guide to interact with the server.
-------------------
Analysis Monitoring
-------------------
To read about how to monitor an analysis (meaning you want to send analysis results
to a server) see :ref:`analyze_monitoring`.
---------------------
Monitoring An Install
---------------------
Since an install is typically when you build packages, we logically want
to tell spack to monitor during this step. Let's start with an example
where we want to monitor the install of hdf5. Unless you have disabled authentication
for the server, we first want to export our spack monitor token and username to the environment:
.. code-block:: console
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
$ export SPACKMON_USER=spacky
By default, the host for your server is expected to be at ``http://127.0.0.1``
with a prefix of ``ms1``, and if this is the case, you can simply add the
``--monitor`` flag to the install command:
.. code-block:: console
$ spack install --monitor hdf5
If you need to customize the host or the prefix, you can do that as well:
.. code-block:: console
$ spack install --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io hdf5
As a precaution, we cut out early in the spack client if you have not provided
authentication credentials. For example, if you run the command above without
exporting your username or token, you'll see:
.. code-block:: console
==> Error: You are required to export SPACKMON_TOKEN and SPACKMON_USER
This extra check is to ensure that we don't start any builds,
and then discover that you forgot to export your token. However, if
your monitoring server has authentication disabled, you can tell this to
the client to skip this step:
.. code-block:: console
$ spack install --monitor --monitor-disable-auth hdf5
If the service is not running, you'll cleanly exit early - the install will
not continue if you've asked it to monitor and there is no service.
For example, here is what you'll see if the monitoring service is not running:
.. code-block:: console
[Errno 111] Connection refused
If you want to continue builds (and stop monitoring) you can set the ``--monitor-keep-going``
flag.
.. code-block:: console
$ spack install --monitor --monitor-keep-going hdf5
This could mean that if a request fails, you only have partial or no data
added to your monitoring database. This setting will not be applied to the
first request to check if the server is running, but to subsequent requests.
If you don't have a monitor server running and you want to build, simply
don't provide the ``--monitor`` flag! Finally, if you want to provide one or
more tags to your build, you can do:
.. code-block:: console
# Add one tag, "pizza"
$ spack install --monitor --monitor-tags pizza hdf5
# Add two tags, "pizza" and "pasta"
$ spack install --monitor --monitor-tags pizza,pasta hdf5
----------------------------
Monitoring with Containerize
----------------------------
The same argument group is available to add to a containerize command.
^^^^^^
Docker
^^^^^^
To add monitoring to a Docker container recipe generation using the defaults,
and assuming a monitor server running on localhost, you would
start with a spack.yaml in your present working directory:
.. code-block:: yaml
spack:
specs:
- samtools
And then do:
.. code-block:: console
# preview first
spack containerize --monitor
# and then write to a Dockerfile
spack containerize --monitor > Dockerfile
The install command will be edited to include commands for enabling monitoring.
However, getting secrets into the container for your monitor server is something
that should be done carefully. Specifically you should:
- Never try to define secrets as ENV, ARG, or using ``--build-arg``
- Do not try to get the secret into the container via a "temporary" file that you remove (it in fact will still exist in a layer)
Instead, it's recommended to use buildkit `as explained here <https://pythonspeed.com/articles/docker-build-secrets/>`_.
You'll need to again export environment variables for your spack monitor server:
.. code-block:: console
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
$ export SPACKMON_USER=spacky
And then use buildkit along with your build and identifying the name of the secret:
.. code-block:: console
$ DOCKER_BUILDKIT=1 docker build --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
The secrets are expected to come from your environment, and then will be temporarily mounted and available
at ``/run/secrets/<name>``. If you forget to supply them (and authentication is required) the build
will fail. If you need to build on your host (and interact with a spack monitor at localhost) you'll
need to tell Docker to use the host network:
.. code-block:: console
$ DOCKER_BUILDKIT=1 docker build --network="host" --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
^^^^^^^^^^^
Singularity
^^^^^^^^^^^
To add monitoring to a Singularity container build, the spack.yaml needs to
be modified slightly to specify wanting a different format:
.. code-block:: yaml
spack:
specs:
- samtools
container:
format: singularity
Again, generate the recipe:
.. code-block:: console
# preview first
$ spack containerize --monitor
# then write to a Singularity recipe
$ spack containerize --monitor > Singularity
Singularity doesn't have a direct way to define secrets at build time, so we have
to do a bit of a manual command to add a file, source secrets in it, and remove it.
Since Singularity doesn't have layers like Docker, deleting a file will truly
remove it from the container and history. So let's say we have this file,
``secrets.sh``:
.. code-block:: console
# secrets.sh
export SPACKMON_USER=spack
export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
We would then generate the Singularity recipe, and add a files section,
a source of that file at the start of ``%post``, and **importantly**
a removal of the final at the end of that same section.
.. code-block::
Bootstrap: docker
From: spack/ubuntu-bionic:latest
Stage: build
%files
secrets.sh /opt/secrets.sh
%post
. /opt/secrets.sh
# spack install commands are here
...
# Don't forget to remove here!
rm /opt/secrets.sh
You can then build the container as your normally would.
.. code-block:: console
$ sudo singularity build container.sif Singularity
------------------
Monitoring Offline
------------------
In the case that you want to save monitor results to your filesystem
and then upload them later (perhaps you are in an environment where you don't
have credentials or it isn't safe to use them) you can use the ``--monitor-save-local``
flag.
.. code-block:: console
$ spack install --monitor --monitor-save-local hdf5
This will save results in a subfolder, "monitor" in your designated spack
reports folder, which defaults to ``$HOME/.spack/reports/monitor``. When
you are ready to upload them to a spack monitor server:
.. code-block:: console
$ spack monitor upload ~/.spack/reports/monitor
You can choose the root directory of results as shown above, or a specific
subdirectory. The command accepts other arguments to specify configuration
for the monitor.

File diff suppressed because it is too large Load Diff

View File

@@ -5,9 +5,9 @@
.. _pipelines: .. _pipelines:
============ =========
CI Pipelines Pipelines
============ =========
Spack provides commands that support generating and running automated build Spack provides commands that support generating and running automated build
pipelines designed for Gitlab CI. At the highest level it works like this: pipelines designed for Gitlab CI. At the highest level it works like this:
@@ -168,7 +168,7 @@ which specs are up to date and which need to be rebuilt (it's a good idea for ot
reasons as well, but those are out of scope for this discussion). In this case we reasons as well, but those are out of scope for this discussion). In this case we
have disabled it (using ``rebuild-index: False``) because the index would only be have disabled it (using ``rebuild-index: False``) because the index would only be
generated in the artifacts mirror anyway, and consequently would not be available generated in the artifacts mirror anyway, and consequently would not be available
during subsequent pipeline runs. during subesequent pipeline runs.
.. note:: .. note::
With the addition of reproducible builds (#22887) a previously working With the addition of reproducible builds (#22887) a previously working
@@ -267,64 +267,24 @@ generated by jobs in the pipeline.
``spack ci rebuild`` ``spack ci rebuild``
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
The purpose of ``spack ci rebuild`` is straightforward: take its assigned The purpose of the ``spack ci rebuild`` is straightforward: take its assigned
spec and ensure a binary of a successful build exists on the target mirror. spec job, check whether the target mirror already has a binary for that spec,
If the binary does not already exist, it is built from source and pushed and if not, build the spec from source and push the binary to the mirror. To
to the mirror. The associated stand-alone tests are optionally run against accomplish this in a reproducible way, the sub-command prepares a ``spack install``
the new build. Additionally, files for reproducing the build outside of the command line to build a single spec in the DAG, saves that command in a
CI environment are created to facilitate debugging. shell script, ``install.sh``, in the current working directory, and then runs
it to install the spec. The shell script is also exported as an artifact to
aid in reproducing the build outside of the CI environment.
If a binary for the spec does not exist on the target mirror, an install If it was necessary to install the spec from source, ``spack ci rebuild`` will
shell script, ``install.sh``, is created and saved in the current working also subsequently create a binary package for the spec and try to push it to the
directory. The script is run in a job to install the spec from source. The mirror.
resulting binary package is pushed to the mirror. If ``cdash`` is configured
for the environment, then the build results will be uploaded to the site.
Environment variables and values in the ``gitlab-ci`` section of the The ``spack ci rebuild`` sub-command mainly expects its "input" to come either
``spack.yaml`` environment file provide inputs to this process. The from environment variables or from the ``gitlab-ci`` section of the ``spack.yaml``
two main sources of environment variables are variables written into environment file. There are two main sources of the environment variables, some
``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime. are written into ``.gitlab-ci.yml`` by ``spack ci generate``, and some are
Several key CI pipeline variables are described in provided by the GitLab CI runtime.
:ref:`ci_environment_variables`.
If the ``--tests`` option is provided, stand-alone tests are performed but
only if the build was successful *and* the package does not appear in the
list of ``broken-tests-packages``. A shell script, ``test.sh``, is created
and run to perform the tests. On completion, test logs are exported as job
artifacts for review and to facilitate debugging. If `cdash` is configured,
test results are also uploaded to the site.
A snippet from an example ``spack.yaml`` file illustrating use of this
option *and* specification of a package with broken tests is given below.
The inclusion of a spec for building ``gptune`` is not shown here. Note
that ``--tests`` is passed to ``spack ci rebuild`` as part of the
``gitlab-ci`` script.
.. code-block:: yaml
gitlab-ci:
script:
- . "./share/spack/setup-env.sh"
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
- spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
broken-tests-packages:
- gptune
In this case, even if ``gptune`` is successfully built from source, the
pipeline will *not* run its stand-alone tests since the package is listed
under ``broken-tests-packages``.
Spack's cloud pipelines provide actual, up-to-date examples of the CI/CD
configuration and environment files used by Spack. You can find them
under Spack's `stacks
<https://github.com/spack/spack/tree/develop/share/spack/gitlab/cloud_pipelines/stacks>`_ repository directory.
.. _cmd-spack-ci-rebuild-index: .. _cmd-spack-ci-rebuild-index:
@@ -487,7 +447,7 @@ Note about "no-op" jobs
^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
If no specs in an environment need to be rebuilt during a given pipeline run If no specs in an environment need to be rebuilt during a given pipeline run
(meaning all are already up to date on the mirror), a single successful job (meaning all are already up to date on the mirror), a single succesful job
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab (a NO-OP) is still generated to avoid an empty pipeline (which GitLab
considers to be an error). An optional ``service-job-attributes`` section considers to be an error). An optional ``service-job-attributes`` section
can be added to your ``spack.yaml`` where you can provide ``tags`` and can be added to your ``spack.yaml`` where you can provide ``tags`` and
@@ -765,7 +725,7 @@ above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
On the other hand, if you're pointing to a spack repository and branch under your On the other hand, if you're pointing to a spack repository and branch under your
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``, control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
and you can instead just clone using the variables you define (``SPACK_REPO`` and you can instead just clone using the variables you define (``SPACK_REPO``
and ``SPACK_REF`` in the example above). and ``SPACK_REF`` in the example aboves).
.. _custom_workflow: .. _custom_workflow:

View File

@@ -1,12 +1,10 @@
# These dependencies should be installed using pip in order # These dependencies should be installed using pip in order
# to build the documentation. # to build the documentation.
sphinx>=3.4,!=4.1.2,!=5.1.0 sphinx>=3.4,!=4.1.2
sphinxcontrib-programoutput sphinxcontrib-programoutput
sphinx-design
sphinx-rtd-theme sphinx-rtd-theme
python-levenshtein python-levenshtein
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx. # Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.
# https://stackoverflow.com/questions/67542699 # https://stackoverflow.com/questions/67542699
docutils <0.17 docutils <0.17
pygments <2.13

View File

@@ -18,10 +18,7 @@ spack:
- "py-sphinx@3.4:4.1.1,4.1.3:" - "py-sphinx@3.4:4.1.1,4.1.3:"
- py-sphinxcontrib-programoutput - py-sphinxcontrib-programoutput
- py-docutils@:0.16 - py-docutils@:0.16
- py-sphinx-design
- py-sphinx-rtd-theme - py-sphinx-rtd-theme
- py-pygments@:2.12
# VCS # VCS
- git - git
- mercurial - mercurial

View File

@@ -1,5 +1,5 @@
Name, Supported Versions, Notes, Requirement Reason Name, Supported Versions, Notes, Requirement Reason
Python, 2.7/3.6-3.11, , Interpreter for Spack Python, 2.7/3.5-3.10, , Interpreter for Spack
C/C++ Compilers, , , Building software C/C++ Compilers, , , Building software
make, , , Build software make, , , Build software
patch, , , Build software patch, , , Build software
@@ -11,7 +11,6 @@ bzip2, , , Compress/Decompress archives
xz, , , Compress/Decompress archives xz, , , Compress/Decompress archives
zstd, , Optional, Compress/Decompress archives zstd, , Optional, Compress/Decompress archives
file, , , Create/Use Buildcaches file, , , Create/Use Buildcaches
lsb-release, , , Linux: identify operating system version
gnupg2, , , Sign/Verify Buildcaches gnupg2, , , Sign/Verify Buildcaches
git, , , Manage Software Repositories git, , , Manage Software Repositories
svn, , Optional, Manage Software Repositories svn, , Optional, Manage Software Repositories
1 Name Supported Versions Notes Requirement Reason
2 Python 2.7/3.6-3.11 2.7/3.5-3.10 Interpreter for Spack
3 C/C++ Compilers Building software
4 make Build software
5 patch Build software
11 xz Compress/Decompress archives
12 zstd Optional Compress/Decompress archives
13 file Create/Use Buildcaches
lsb-release Linux: identify operating system version
14 gnupg2 Sign/Verify Buildcaches
15 git Manage Software Repositories
16 svn Optional Manage Software Repositories

186
lib/spack/env/cc vendored
View File

@@ -241,28 +241,28 @@ case "$command" in
mode=cpp mode=cpp
debug_flags="-g" debug_flags="-g"
;; ;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc) cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe)
command="$SPACK_CC" command="$SPACK_CC"
language="C" language="C"
comp="CC" comp="CC"
lang_flags=C lang_flags=C
debug_flags="-g" debug_flags="-g"
;; ;;
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC) c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++)
command="$SPACK_CXX" command="$SPACK_CXX"
language="C++" language="C++"
comp="CXX" comp="CXX"
lang_flags=CXX lang_flags=CXX
debug_flags="-g" debug_flags="-g"
;; ;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn) ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang)
command="$SPACK_FC" command="$SPACK_FC"
language="Fortran 90" language="Fortran 90"
comp="FC" comp="FC"
lang_flags=F lang_flags=F
debug_flags="-g" debug_flags="-g"
;; ;;
f77|xlf|xlf_r|pgf77) f77|xlf|xlf_r|pgf77|amdflang)
command="$SPACK_F77" command="$SPACK_F77"
language="Fortran 77" language="Fortran 77"
comp="F77" comp="F77"
@@ -427,55 +427,6 @@ isystem_include_dirs_list=""
libs_list="" libs_list=""
other_args_list="" other_args_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
parse_Wl() {
# drop -Wl
shift
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
if system_dir "$1"; then
append system_rpath_dirs_list "$1"
else
append rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
wl_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append other_args_list "-Wl,$1"
;;
esac
fi
shift
done
}
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@@ -534,77 +485,88 @@ while [ $# -ne 0 ]; do
append other_args_list "-l$arg" append other_args_list "-l$arg"
;; ;;
-Wl,*) -Wl,*)
IFS=, arg="${1#-Wl,}"
parse_Wl $1 if [ -z "$arg" ]; then shift; arg="$1"; fi
unset IFS case "$arg" in
-rpath=*) rp="${arg#-rpath=}" ;;
--rpath=*) rp="${arg#--rpath=}" ;;
-rpath,*) rp="${arg#-rpath,}" ;;
--rpath,*) rp="${arg#--rpath,}" ;;
-rpath|--rpath)
shift; arg="$1"
case "$arg" in
-Wl,*)
rp="${arg#-Wl,}"
;;
*)
die "-Wl,-rpath was not followed by -Wl,*"
;;
esac
;;
"$dtags_to_strip")
: # We want to remove explicitly this flag
;;
*)
append other_args_list "-Wl,$arg"
;;
esac
;;
-Xlinker,*)
arg="${1#-Xlinker,}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
case "$arg" in
-rpath=*) rp="${arg#-rpath=}" ;;
--rpath=*) rp="${arg#--rpath=}" ;;
-rpath|--rpath)
shift; arg="$1"
case "$arg" in
-Xlinker,*)
rp="${arg#-Xlinker,}"
;;
*)
die "-Xlinker,-rpath was not followed by -Xlinker,*"
;;
esac
;;
*)
append other_args_list "-Xlinker,$arg"
;;
esac
;; ;;
-Xlinker) -Xlinker)
shift if [ "$2" = "-rpath" ]; then
if [ $# -eq 0 ]; then if [ "$3" != "-Xlinker" ]; then
# -Xlinker without value: let the compiler error about it. die "-Xlinker,-rpath was not followed by -Xlinker,*"
append other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
if system_dir "$1"; then
append system_rpath_dirs_list "$1"
else
append rpath_dirs_list "$1"
fi fi
xlinker_expect_rpath=no shift 3;
rp="$1"
elif [ "$2" = "$dtags_to_strip" ]; then
shift # We want to remove explicitly this flag
else else
case "$1" in append other_args_list "$1"
-rpath=*)
arg="${1#-rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
if system_dir "$arg"; then
append system_rpath_dirs_list "$arg"
else
append rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append other_args_list -Xlinker
append other_args_list "$1"
;;
esac
fi fi
;; ;;
"$dtags_to_strip")
;;
*) *)
append other_args_list "$1" if [ "$1" = "$dtags_to_strip" ]; then
: # We want to remove explicitly this flag
else
append other_args_list "$1"
fi
;; ;;
esac esac
# test rpaths against system directories in one place.
if [ -n "$rp" ]; then
if system_dir "$rp"; then
append system_rpath_dirs_list "$rp"
else
append rpath_dirs_list "$rp"
fi
fi
shift shift
done done
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append other_args_list -Xlinker
append other_args_list -rpath
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append other_args_list -Wl,-rpath
fi
# #
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and # Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
# ldflags. We stick to the order that gmake puts the flags in by default. # ldflags. We stick to the order that gmake puts the flags in by default.

View File

@@ -1 +0,0 @@
../../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec * Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures * Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.0 (commit 77640e572725ad97f18e63a04857155752ace045) * Version: 0.1.4 (commit b8eea9df2b4204ff27d204452cd46f5199a0b423)
argparse argparse
-------- --------

View File

@@ -132,15 +132,9 @@ def sysctl(*args):
"model name": sysctl("-n", "machdep.cpu.brand_string"), "model name": sysctl("-n", "machdep.cpu.brand_string"),
} }
else: else:
model = "unknown" model = (
model_str = sysctl("-n", "machdep.cpu.brand_string").lower() "m1" if "Apple" in sysctl("-n", "machdep.cpu.brand_string") else "unknown"
if "m2" in model_str: )
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
info = { info = {
"vendor_id": "Apple", "vendor_id": "Apple",
"flags": [], "flags": [],
@@ -328,26 +322,14 @@ def compatibility_check_for_aarch64(info, target):
features = set(info.get("Features", "").split()) features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic") vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != "aarch64":
return False
arch_root = TARGETS[basename] arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in ( return (
vendor, (target == arch_root or arch_root in target.ancestors)
"generic", and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
) )
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family="riscv64") @compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target): def compatibility_check_for_riscv64(info, target):

View File

@@ -106,7 +106,7 @@ def __eq__(self, other):
self.name == other.name self.name == other.name
and self.vendor == other.vendor and self.vendor == other.vendor
and self.features == other.features and self.features == other.features
and self.parents == other.parents # avoid ancestors here and self.ancestors == other.ancestors
and self.compilers == other.compilers and self.compilers == other.compilers
and self.generation == other.generation and self.generation == other.generation
) )

View File

@@ -85,7 +85,7 @@
"intel": [ "intel": [
{ {
"versions": ":", "versions": ":",
"name": "pentium4", "name": "x86-64",
"flags": "-march={name} -mtune=generic" "flags": "-march={name} -mtune=generic"
} }
], ],
@@ -1099,7 +1099,8 @@
"avx512cd", "avx512cd",
"avx512vbmi", "avx512vbmi",
"avx512ifma", "avx512ifma",
"sha" "sha",
"umip"
], ],
"compilers": { "compilers": {
"gcc": [ "gcc": [
@@ -1262,6 +1263,7 @@
"avx512vbmi", "avx512vbmi",
"avx512ifma", "avx512ifma",
"sha_ni", "sha_ni",
"umip",
"clwb", "clwb",
"rdpid", "rdpid",
"gfni", "gfni",
@@ -2093,163 +2095,8 @@
] ]
} }
}, },
"armv8.1a": {
"from": ["aarch64"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "5:",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"clang": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
]
}
},
"armv8.2a": {
"from": ["armv8.1a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "6:",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"clang": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
]
}
},
"armv8.3a": {
"from": ["armv8.2a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "6:",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"clang": [
{
"versions": "6:",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.3-a -mtune=generic"
}
]
}
},
"armv8.4a": {
"from": ["armv8.3a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "8:",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"clang": [
{
"versions": "8:",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.4-a -mtune=generic"
}
]
}
},
"armv8.5a": {
"from": ["armv8.4a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "9:",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"clang": [
{
"versions": "11:",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.5-a -mtune=generic"
}
]
}
},
"thunderx2": { "thunderx2": {
"from": ["armv8.1a"], "from": ["aarch64"],
"vendor": "Cavium", "vendor": "Cavium",
"features": [ "features": [
"fp", "fp",
@@ -2296,7 +2143,7 @@
} }
}, },
"a64fx": { "a64fx": {
"from": ["armv8.2a"], "from": ["aarch64"],
"vendor": "Fujitsu", "vendor": "Fujitsu",
"features": [ "features": [
"fp", "fp",
@@ -2364,7 +2211,7 @@
] ]
} }
}, },
"cortex_a72": { "graviton": {
"from": ["aarch64"], "from": ["aarch64"],
"vendor": "ARM", "vendor": "ARM",
"features": [ "features": [
@@ -2390,19 +2237,19 @@
}, },
{ {
"versions": "6:", "versions": "6:",
"flags" : "-mcpu=cortex-a72" "flags" : "-march=armv8-a+crc+crypto -mtune=cortex-a72"
} }
], ],
"clang" : [ "clang" : [
{ {
"versions": "3.9:", "versions": "3.9:",
"flags" : "-mcpu=cortex-a72" "flags" : "-march=armv8-a+crc+crypto"
} }
] ]
} }
}, },
"neoverse_n1": { "graviton2": {
"from": ["cortex_a72", "armv8.2a"], "from": ["aarch64"],
"vendor": "ARM", "vendor": "ARM",
"features": [ "features": [
"fp", "fp",
@@ -2451,7 +2298,7 @@
}, },
{ {
"versions": "9.0:", "versions": "9.0:",
"flags" : "-mcpu=neoverse-n1" "flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto -mtune=neoverse-n1"
} }
], ],
"clang" : [ "clang" : [
@@ -2462,10 +2309,6 @@
{ {
"versions": "5:", "versions": "5:",
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto" "flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
},
{
"versions": "10:",
"flags" : "-mcpu=neoverse-n1"
} }
], ],
"arm" : [ "arm" : [
@@ -2476,113 +2319,8 @@
] ]
} }
}, },
"neoverse_v1": {
"from": ["neoverse_n1", "armv8.4a"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"paca",
"pacg",
"dcpodp",
"svei8mm",
"svebf16",
"i8mm",
"bf16",
"dgh",
"rng"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:4.8.9",
"flags": "-march=armv8-a"
},
{
"versions": "4.9:5.9",
"flags": "-march=armv8-a+crc+crypto"
},
{
"versions": "6:6.9",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7:7.9",
"flags" : "-march=armv8.2-a+crypto+fp16 -mtune=cortex-a72"
},
{
"versions": "8.0:8.9",
"flags" : "-march=armv8.2-a+fp16+dotprod+crypto -mtune=cortex-a72"
},
{
"versions": "9.0:9.9",
"flags" : "-mcpu=neoverse-v1"
},
{
"versions": "10.0:",
"flags" : "-mcpu=neoverse-v1"
}
],
"clang" : [
{
"versions": "3.9:4.9",
"flags" : "-march=armv8.2-a+fp16+crc+crypto"
},
{
"versions": "5:10",
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
},
{
"versions": "11:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
},
{
"versions": "12:",
"flags" : "-mcpu=neoverse-v1"
}
],
"arm" : [
{
"versions": "20:21.9",
"flags" : "-march=armv8.2-a+sve+fp16+rcpc+dotprod+crypto"
},
{
"versions": "22:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
}
]
}
},
"m1": { "m1": {
"from": ["armv8.4a"], "from": ["aarch64"],
"vendor": "Apple", "vendor": "Apple",
"features": [ "features": [
"fp", "fp",
@@ -2647,76 +2385,6 @@
] ]
} }
}, },
"m2": {
"from": ["m1", "armv8.5a"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint",
"ecv",
"bf16",
"i8mm",
"bti"
],
"compilers": {
"gcc": [
{
"versions": "8.0:",
"flags" : "-march=armv8.5-a -mtune=generic"
}
],
"clang" : [
{
"versions": "9.0:12.0",
"flags" : "-march=armv8.5-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"flags" : "-march=armv8.5-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=vortex"
}
]
}
},
"arm": { "arm": {
"from": [], "from": [],
"vendor": "generic", "vendor": "generic",

View File

@@ -71,8 +71,6 @@
import re import re
import math import math
import multiprocessing import multiprocessing
import sys
import threading
import time import time
from contextlib import contextmanager from contextlib import contextmanager
@@ -411,12 +409,7 @@ def parse(self, stream, context=6, jobs=None):
pool = multiprocessing.Pool(jobs) pool = multiprocessing.Pool(jobs)
try: try:
# this is a workaround for a Python bug in Pool with ctrl-C # this is a workaround for a Python bug in Pool with ctrl-C
if sys.version_info >= (3, 2): results = pool.map_async(_parse_unpack, args, 1).get(9999999)
max_timeout = threading.TIMEOUT_MAX
else:
max_timeout = 9999999
results = pool.map_async(_parse_unpack, args, 1).get(max_timeout)
errors, warnings, timings = zip(*results) errors, warnings, timings = zip(*results)
finally: finally:
pool.terminate() pool.terminate()

View File

@@ -29,8 +29,8 @@ class Command(object):
- optionals: list of optional arguments (list) - optionals: list of optional arguments (list)
- subcommands: list of subcommand parsers (list) - subcommands: list of subcommand parsers (list)
""" """
def __init__(self, prog, description, usage,
def __init__(self, prog, description, usage, positionals, optionals, subcommands): positionals, optionals, subcommands):
self.prog = prog self.prog = prog
self.description = description self.description = description
self.usage = usage self.usage = usage
@@ -71,15 +71,15 @@ def parse(self, parser, prog):
""" """
self.parser = parser self.parser = parser
split_prog = parser.prog.split(" ") split_prog = parser.prog.split(' ')
split_prog[-1] = prog split_prog[-1] = prog
prog = " ".join(split_prog) prog = ' '.join(split_prog)
description = parser.description description = parser.description
fmt = parser._get_formatter() fmt = parser._get_formatter()
actions = parser._actions actions = parser._actions
groups = parser._mutually_exclusive_groups groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip() usage = fmt._format_usage(None, actions, groups, '').strip()
# Go through actions and split them into optionals, positionals, # Go through actions and split them into optionals, positionals,
# and subcommands # and subcommands
@@ -90,8 +90,8 @@ def parse(self, parser, prog):
if action.option_strings: if action.option_strings:
flags = action.option_strings flags = action.option_strings
dest_flags = fmt._format_action_invocation(action) dest_flags = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else "" help = self._expand_help(action) if action.help else ''
help = help.replace("\n", " ") help = help.replace('\n', ' ')
optionals.append((flags, dest_flags, help)) optionals.append((flags, dest_flags, help))
elif isinstance(action, argparse._SubParsersAction): elif isinstance(action, argparse._SubParsersAction):
for subaction in action._choices_actions: for subaction in action._choices_actions:
@@ -100,19 +100,20 @@ def parse(self, parser, prog):
# Look for aliases of the form 'name (alias, ...)' # Look for aliases of the form 'name (alias, ...)'
if self.aliases: if self.aliases:
match = re.match(r"(.*) \((.*)\)", subaction.metavar) match = re.match(r'(.*) \((.*)\)', subaction.metavar)
if match: if match:
aliases = match.group(2).split(", ") aliases = match.group(2).split(', ')
for alias in aliases: for alias in aliases:
subparser = action._name_parser_map[alias] subparser = action._name_parser_map[alias]
subcommands.append((subparser, alias)) subcommands.append((subparser, alias))
else: else:
args = fmt._format_action_invocation(action) args = fmt._format_action_invocation(action)
help = self._expand_help(action) if action.help else "" help = self._expand_help(action) if action.help else ''
help = help.replace("\n", " ") help = help.replace('\n', ' ')
positionals.append((args, help)) positionals.append((args, help))
return Command(prog, description, usage, positionals, optionals, subcommands) return Command(
prog, description, usage, positionals, optionals, subcommands)
def format(self, cmd): def format(self, cmd):
"""Returns the string representation of a single node in the """Returns the string representation of a single node in the
@@ -160,13 +161,14 @@ def write(self, parser):
raise raise
_rst_levels = ["=", "-", "^", "~", ":", "`"] _rst_levels = ['=', '-', '^', '~', ':', '`']
class ArgparseRstWriter(ArgparseWriter): class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections.""" """Write argparse output as rst sections."""
def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels): def __init__(self, prog, out=None, aliases=False,
rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter. """Create a new ArgparseRstWriter.
Parameters: Parameters:
@@ -215,12 +217,11 @@ def begin_command(self, prog):
{1} {1}
{2} {2}
""".format( """.format(prog.replace(' ', '-'), prog,
prog.replace(" ", "-"), prog, self.rst_levels[self.level] * len(prog) self.rst_levels[self.level] * len(prog))
)
def description(self, description): def description(self, description):
return description + "\n\n" return description + '\n\n'
def usage(self, usage): def usage(self, usage):
return """\ return """\
@@ -228,39 +229,33 @@ def usage(self, usage):
{0} {0}
""".format( """.format(usage)
usage
)
def begin_positionals(self): def begin_positionals(self):
return "\n**Positional arguments**\n\n" return '\n**Positional arguments**\n\n'
def positional(self, name, help): def positional(self, name, help):
return """\ return """\
{0} {0}
{1} {1}
""".format( """.format(name, help)
name, help
)
def end_positionals(self): def end_positionals(self):
return "" return ''
def begin_optionals(self): def begin_optionals(self):
return "\n**Optional arguments**\n\n" return '\n**Optional arguments**\n\n'
def optional(self, opts, help): def optional(self, opts, help):
return """\ return """\
``{0}`` ``{0}``
{1} {1}
""".format( """.format(opts, help)
opts, help
)
def end_optionals(self): def end_optionals(self):
return "" return ''
def begin_subcommands(self, subcommands): def begin_subcommands(self, subcommands):
string = """ string = """
@@ -272,10 +267,11 @@ def begin_subcommands(self, subcommands):
""" """
for cmd, _ in subcommands: for cmd, _ in subcommands:
prog = re.sub(r"^[^ ]* ", "", cmd.prog) prog = re.sub(r'^[^ ]* ', '', cmd.prog)
string += " * :ref:`{0} <{1}>`\n".format(prog, cmd.prog.replace(" ", "-")) string += ' * :ref:`{0} <{1}>`\n'.format(
prog, cmd.prog.replace(' ', '-'))
return string + "\n" return string + '\n'
class ArgparseCompletionWriter(ArgparseWriter): class ArgparseCompletionWriter(ArgparseWriter):
@@ -310,11 +306,9 @@ def format(self, cmd):
# Flatten lists of lists # Flatten lists of lists
optionals = [x for xx in optionals for x in xx] optionals = [x for xx in optionals for x in xx]
return ( return (self.start_function(cmd.prog) +
self.start_function(cmd.prog) self.body(positionals, optionals, subcommands) +
+ self.body(positionals, optionals, subcommands) self.end_function(cmd.prog))
+ self.end_function(cmd.prog)
)
def start_function(self, prog): def start_function(self, prog):
"""Returns the syntax needed to begin a function definition. """Returns the syntax needed to begin a function definition.
@@ -325,8 +319,8 @@ def start_function(self, prog):
Returns: Returns:
str: the function definition beginning str: the function definition beginning
""" """
name = prog.replace("-", "_").replace(" ", "_") name = prog.replace('-', '_').replace(' ', '_')
return "\n_{0}() {{".format(name) return '\n_{0}() {{'.format(name)
def end_function(self, prog=None): def end_function(self, prog=None):
"""Returns the syntax needed to end a function definition. """Returns the syntax needed to end a function definition.
@@ -337,7 +331,7 @@ def end_function(self, prog=None):
Returns: Returns:
str: the function definition ending str: the function definition ending
""" """
return "}\n" return '}\n'
def body(self, positionals, optionals, subcommands): def body(self, positionals, optionals, subcommands):
"""Returns the body of the function. """Returns the body of the function.
@@ -350,7 +344,7 @@ def body(self, positionals, optionals, subcommands):
Returns: Returns:
str: the function body str: the function body
""" """
return "" return ''
def positionals(self, positionals): def positionals(self, positionals):
"""Returns the syntax for reporting positional arguments. """Returns the syntax for reporting positional arguments.
@@ -361,7 +355,7 @@ def positionals(self, positionals):
Returns: Returns:
str: the syntax for positional arguments str: the syntax for positional arguments
""" """
return "" return ''
def optionals(self, optionals): def optionals(self, optionals):
"""Returns the syntax for reporting optional flags. """Returns the syntax for reporting optional flags.
@@ -372,7 +366,7 @@ def optionals(self, optionals):
Returns: Returns:
str: the syntax for optional flags str: the syntax for optional flags
""" """
return "" return ''
def subcommands(self, subcommands): def subcommands(self, subcommands):
"""Returns the syntax for reporting subcommands. """Returns the syntax for reporting subcommands.
@@ -383,4 +377,4 @@ def subcommands(self, subcommands):
Returns: Returns:
str: the syntax for subcommand parsers str: the syntax for subcommand parsers
""" """
return "" return ''

View File

@@ -18,22 +18,22 @@
map = map map = map
zip = zip zip = zip
from itertools import zip_longest as zip_longest # novm # noqa: F401 from itertools import zip_longest as zip_longest # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401 from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401 from urllib.request import urlopen as urlopen # novm # noqa: F401
if sys.version_info >= (3, 3): if sys.version_info >= (3, 3):
from collections.abc import Hashable as Hashable # novm from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import MutableSequence as MutableSequence # novm from collections.abc import MutableSequence as MutableSequence # novm
from collections.abc import MutableSet as MutableSet # novm from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm from collections.abc import Sequence as Sequence # novm
else: else:
from collections import Hashable as Hashable # noqa: F401 from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401 from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401 from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401 from collections import MutableMapping as MutableMapping # noqa: F401
from collections import MutableSequence as MutableSequence # noqa: F401 from collections import MutableSequence as MutableSequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401 from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401 from collections import Sequence as Sequence # noqa: F401

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,7 @@
import sys import sys
import traceback import traceback
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Any, Callable, Iterable, List, Tuple from typing import List, Tuple
import six import six
from six import string_types from six import string_types
@@ -21,7 +21,7 @@
from llnl.util.compat import MutableMapping, MutableSequence, zip_longest from llnl.util.compat import MutableMapping, MutableSequence, zip_longest
# Ignore emacs backups when listing modules # Ignore emacs backups when listing modules
ignore_modules = [r"^\.#", "~$"] ignore_modules = [r'^\.#', '~$']
def index_by(objects, *funcs): def index_by(objects, *funcs):
@@ -91,9 +91,9 @@ def index_by(objects, *funcs):
def caller_locals(): def caller_locals():
"""This will return the locals of the *parent* of the caller. """This will return the locals of the *parent* of the caller.
This allows a function to insert variables into its caller's This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides. for implementing things like depends_on and provides.
""" """
# Passing zero here skips line context for speed. # Passing zero here skips line context for speed.
stack = inspect.stack(0) stack = inspect.stack(0)
@@ -105,7 +105,7 @@ def caller_locals():
def get_calling_module_name(): def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the """Make sure that the caller is a class definition, and return the
enclosing module's name. enclosing module's name.
""" """
# Passing zero here skips line context for speed. # Passing zero here skips line context for speed.
stack = inspect.stack(0) stack = inspect.stack(0)
@@ -115,13 +115,12 @@ def get_calling_module_name():
finally: finally:
del stack del stack
if "__module__" not in caller_locals: if '__module__' not in caller_locals:
raise RuntimeError( raise RuntimeError("Must invoke get_calling_module_name() "
"Must invoke get_calling_module_name() " "from inside a class definition!" "from inside a class definition!")
)
module_name = caller_locals["__module__"] module_name = caller_locals['__module__']
base_name = module_name.split(".")[-1] base_name = module_name.split('.')[-1]
return base_name return base_name
@@ -129,8 +128,8 @@ def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute.""" """Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name): if not hasattr(obj, attr_name):
raise RequiredAttributeError( raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__) "No required attribute '%s' in class '%s'"
) % (attr_name, obj.__class__.__name__))
def attr_setdefault(obj, name, value): def attr_setdefault(obj, name, value):
@@ -202,35 +201,33 @@ def _memoized_function(*args, **kwargs):
# TypeError is raised when indexing into a dict if the key is unhashable. # TypeError is raised when indexing into a dict if the key is unhashable.
raise six.raise_from( raise six.raise_from(
UnhashableArguments( UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format( "args + kwargs '{}' was not hashable for function '{}'"
key, func.__name__ .format(key, func.__name__),
),
), ),
e, e)
)
return _memoized_function return _memoized_function
def list_modules(directory, **kwargs): def list_modules(directory, **kwargs):
"""Lists all of the modules, excluding ``__init__.py``, in a """Lists all of the modules, excluding ``__init__.py``, in a
particular directory. Listed packages have no particular particular directory. Listed packages have no particular
order.""" order."""
list_directories = kwargs.setdefault("directories", True) list_directories = kwargs.setdefault('directories', True)
for name in os.listdir(directory): for name in os.listdir(directory):
if name == "__init__.py": if name == '__init__.py':
continue continue
path = os.path.join(directory, name) path = os.path.join(directory, name)
if list_directories and os.path.isdir(path): if list_directories and os.path.isdir(path):
init_py = os.path.join(path, "__init__.py") init_py = os.path.join(path, '__init__.py')
if os.path.isfile(init_py): if os.path.isfile(init_py):
yield name yield name
elif name.endswith(".py"): elif name.endswith('.py'):
if not any(re.search(pattern, name) for pattern in ignore_modules): if not any(re.search(pattern, name) for pattern in ignore_modules):
yield re.sub(".py$", "", name) yield re.sub('.py$', '', name)
def decorator_with_or_without_args(decorator): def decorator_with_or_without_args(decorator):
@@ -260,34 +257,41 @@ def new_dec(*args, **kwargs):
def key_ordering(cls): def key_ordering(cls):
"""Decorates a class with extra methods that implement rich comparison """Decorates a class with extra methods that implement rich comparison
operations and ``__hash__``. The decorator assumes that the class operations and ``__hash__``. The decorator assumes that the class
implements a function called ``_cmp_key()``. The rich comparison implements a function called ``_cmp_key()``. The rich comparison
operations will compare objects using this key, and the ``__hash__`` operations will compare objects using this key, and the ``__hash__``
function will return the hash of this key. function will return the hash of this key.
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them. ``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
Raises: Raises:
TypeError: If the class does not have a ``_cmp_key`` method TypeError: If the class does not have a ``_cmp_key`` method
""" """
def setter(name, value): def setter(name, value):
value.__name__ = name value.__name__ = name
setattr(cls, name, value) setattr(cls, name, value)
if not has_method(cls, "_cmp_key"): if not has_method(cls, '_cmp_key'):
raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__) raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
setter("__eq__", lambda s, o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key())) setter('__eq__',
setter("__lt__", lambda s, o: o is not None and s._cmp_key() < o._cmp_key()) lambda s, o:
setter("__le__", lambda s, o: o is not None and s._cmp_key() <= o._cmp_key()) (s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter('__lt__',
lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
setter('__le__',
lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
setter("__ne__", lambda s, o: (s is not o) and (o is None or s._cmp_key() != o._cmp_key())) setter('__ne__',
setter("__gt__", lambda s, o: o is None or s._cmp_key() > o._cmp_key()) lambda s, o:
setter("__ge__", lambda s, o: o is None or s._cmp_key() >= o._cmp_key()) (s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
setter('__gt__',
lambda s, o: o is None or s._cmp_key() > o._cmp_key())
setter('__ge__',
lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
setter("__hash__", lambda self: hash(self._cmp_key())) setter('__hash__', lambda self: hash(self._cmp_key()))
return cls return cls
@@ -454,7 +458,8 @@ def gt(self, other):
def le(self, other): def le(self, other):
if self is other: if self is other:
return True return True
return (other is not None) and not lazy_lt(other._cmp_iter, self._cmp_iter) return (other is not None) and not lazy_lt(other._cmp_iter,
self._cmp_iter)
def ge(self, other): def ge(self, other):
if self is other: if self is other:
@@ -484,9 +489,7 @@ def add_func_to_class(name, func):
@lazy_lexicographic_ordering @lazy_lexicographic_ordering
class HashableMap(MutableMapping): class HashableMap(MutableMapping):
"""This is a hashable, comparable dictionary. Hash is performed on """This is a hashable, comparable dictionary. Hash is performed on
a tuple of the values in the dictionary.""" a tuple of the values in the dictionary."""
__slots__ = ("dict",)
def __init__(self): def __init__(self):
self.dict = {} self.dict = {}
@@ -524,7 +527,7 @@ def copy(self):
def in_function(function_name): def in_function(function_name):
"""True if the caller was called from some function with """True if the caller was called from some function with
the supplied Name, False otherwise.""" the supplied Name, False otherwise."""
stack = inspect.stack() stack = inspect.stack()
try: try:
for elt in stack[2:]: for elt in stack[2:]:
@@ -537,25 +540,24 @@ def in_function(function_name):
def check_kwargs(kwargs, fun): def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs """Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid. not, raises an error describing which kwargs are invalid.
Example:: Example::
def foo(self, **kwargs): def foo(self, **kwargs):
x = kwargs.pop('x', None) x = kwargs.pop('x', None)
y = kwargs.pop('y', None) y = kwargs.pop('y', None)
z = kwargs.pop('z', None) z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo) check_kwargs(kwargs, self.foo)
# This raises a TypeError: # This raises a TypeError:
foo(w='bad kwarg') foo(w='bad kwarg')
""" """
if kwargs: if kwargs:
raise TypeError( raise TypeError(
"'%s' is an invalid keyword argument for function %s()." "'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__) % (next(iter(kwargs)), fun.__name__))
)
def match_predicate(*args): def match_predicate(*args):
@@ -571,7 +573,6 @@ def match_predicate(*args):
* any regex in a list or tuple of regexes matches. * any regex in a list or tuple of regexes matches.
* any predicate in args matches. * any predicate in args matches.
""" """
def match(string): def match(string):
for arg in args: for arg in args:
if isinstance(arg, string_types): if isinstance(arg, string_types):
@@ -584,11 +585,9 @@ def match(string):
if arg(string): if arg(string):
return True return True
else: else:
raise ValueError( raise ValueError("args to match_predicate must be regex, "
"args to match_predicate must be regex, " "list of regexes, or callable." "list of regexes, or callable.")
)
return False return False
return match return match
@@ -648,7 +647,7 @@ def pretty_date(time, now=None):
day_diff = diff.days day_diff = diff.days
if day_diff < 0: if day_diff < 0:
return "" return ''
if day_diff == 0: if day_diff == 0:
if second_diff < 10: if second_diff < 10:
@@ -706,40 +705,43 @@ def pretty_string_to_date(date_str, now=None):
now = now or datetime.now() now = now or datetime.now()
# datetime formats # datetime formats
pattern[re.compile(r"^\d{4}$")] = lambda x: datetime.strptime(x, "%Y") pattern[re.compile(r'^\d{4}$')] = lambda x: datetime.strptime(x, '%Y')
pattern[re.compile(r"^\d{4}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m") pattern[re.compile(r'^\d{4}-\d{2}$')] = lambda x: datetime.strptime(
pattern[re.compile(r"^\d{4}-\d{2}-\d{2}$")] = lambda x: datetime.strptime(x, "%Y-%m-%d") x, '%Y-%m'
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$")] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M"
) )
pattern[re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")] = lambda x: datetime.strptime( pattern[re.compile(r'^\d{4}-\d{2}-\d{2}$')] = lambda x: datetime.strptime(
x, "%Y-%m-%d %H:%M:%S" x, '%Y-%m-%d'
) )
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M')
pattern[re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$')] = \
lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
pretty_regex = re.compile(r"(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago") pretty_regex = re.compile(
r'(a|\d+)\s*(year|month|week|day|hour|minute|second)s?\s*ago')
def _n_xxx_ago(x): def _n_xxx_ago(x):
how_many, time_period = pretty_regex.search(x).groups() how_many, time_period = pretty_regex.search(x).groups()
how_many = 1 if how_many == "a" else int(how_many) how_many = 1 if how_many == 'a' else int(how_many)
# timedelta natively supports time periods up to 'weeks'. # timedelta natively supports time periods up to 'weeks'.
# To apply month or year we convert to 30 and 365 days # To apply month or year we convert to 30 and 365 days
if time_period == "month": if time_period == 'month':
how_many *= 30 how_many *= 30
time_period = "day" time_period = 'day'
elif time_period == "year": elif time_period == 'year':
how_many *= 365 how_many *= 365
time_period = "day" time_period = 'day'
kwargs = {(time_period + "s"): how_many} kwargs = {(time_period + 's'): how_many}
return now - timedelta(**kwargs) return now - timedelta(**kwargs)
pattern[pretty_regex] = _n_xxx_ago pattern[pretty_regex] = _n_xxx_ago
# yesterday # yesterday
callback = lambda x: now - timedelta(days=1) callback = lambda x: now - timedelta(days=1)
pattern[re.compile("^yesterday$")] = callback pattern[re.compile('^yesterday$')] = callback
for regexp, parser in pattern.items(): for regexp, parser in pattern.items():
if bool(regexp.match(date_str)): if bool(regexp.match(date_str)):
@@ -749,27 +751,8 @@ def _n_xxx_ago(x):
raise ValueError(msg) raise ValueError(msg)
def pretty_seconds(seconds):
"""Seconds to string with appropriate units
Arguments:
seconds (float): Number of seconds
Returns:
str: Time string with units
"""
if seconds >= 1:
value, unit = seconds, "s"
elif seconds >= 1e-3:
value, unit = seconds * 1e3, "ms"
elif seconds >= 1e-6:
value, unit = seconds * 1e6, "us"
else:
value, unit = seconds * 1e9, "ns"
return "%.3f%s" % (value, unit)
class RequiredAttributeError(ValueError): class RequiredAttributeError(ValueError):
def __init__(self, message): def __init__(self, message):
super(RequiredAttributeError, self).__init__(message) super(RequiredAttributeError, self).__init__(message)
@@ -781,7 +764,6 @@ class ObjectWrapper(object):
This class is modeled after the stackoverflow answer: This class is modeled after the stackoverflow answer:
* http://stackoverflow.com/a/1445289/771663 * http://stackoverflow.com/a/1445289/771663
""" """
def __init__(self, wrapped_object): def __init__(self, wrapped_object):
wrapped_cls = type(wrapped_object) wrapped_cls = type(wrapped_object)
wrapped_name = wrapped_cls.__name__ wrapped_name = wrapped_cls.__name__
@@ -825,7 +807,7 @@ def __getattr__(self, name):
# requested but not yet set. The final 'getattr' line here requires # requested but not yet set. The final 'getattr' line here requires
# 'instance'/'_instance' to be defined or it will enter an infinite # 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here. # loop, so protect against that here.
if name in ["_instance", "instance"]: if name in ['_instance', 'instance']:
raise AttributeError() raise AttributeError()
return getattr(self.instance, name) return getattr(self.instance, name)
@@ -855,7 +837,7 @@ def __init__(self, ref_function):
self.ref_function = ref_function self.ref_function = ref_function
def __getattr__(self, name): def __getattr__(self, name):
if name == "ref_function": if name == 'ref_function':
raise AttributeError() raise AttributeError()
return getattr(self.ref_function(), name) return getattr(self.ref_function(), name)
@@ -893,8 +875,8 @@ def load_module_from_file(module_name, module_path):
# This recipe is adapted from https://stackoverflow.com/a/67692/771663 # This recipe is adapted from https://stackoverflow.com/a/67692/771663
if sys.version_info[0] == 3 and sys.version_info[1] >= 5: if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util import importlib.util
spec = importlib.util.spec_from_file_location( # novm
spec = importlib.util.spec_from_file_location(module_name, module_path) # novm module_name, module_path)
module = importlib.util.module_from_spec(spec) # novm module = importlib.util.module_from_spec(spec) # novm
# The module object needs to exist in sys.modules before the # The module object needs to exist in sys.modules before the
# loader executes the module code. # loader executes the module code.
@@ -911,7 +893,6 @@ def load_module_from_file(module_name, module_path):
raise raise
elif sys.version_info[0] == 2: elif sys.version_info[0] == 2:
import imp import imp
module = imp.load_source(module_name, module_path) module = imp.load_source(module_name, module_path)
return module return module
@@ -943,10 +924,8 @@ def uniq(sequence):
def star(func): def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions""" """Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args): def _wrapper(args):
return func(*args) return func(*args)
return _wrapper return _wrapper
@@ -955,23 +934,22 @@ class Devnull(object):
See https://stackoverflow.com/a/2929954. See https://stackoverflow.com/a/2929954.
""" """
def write(self, *_): def write(self, *_):
pass pass
def elide_list(line_list, max_num=10): def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements, """Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example:: replacing intervening elements with '...'. For example::
elide_list([1,2,3,4,5,6], 4) elide_list([1,2,3,4,5,6], 4)
gives:: gives::
[1, 2, 3, '...', 6] [1, 2, 3, '...', 6]
""" """
if len(line_list) > max_num: if len(line_list) > max_num:
return line_list[: max_num - 1] + ["..."] + line_list[-1:] return line_list[:max_num - 1] + ['...'] + line_list[-1:]
else: else:
return line_list return line_list
@@ -994,40 +972,7 @@ def enum(**kwargs):
Args: Args:
**kwargs: explicit dictionary of enums **kwargs: explicit dictionary of enums
""" """
return type("Enum", (object,), kwargs) return type('Enum', (object,), kwargs)
def stable_partition(
input_iterable, # type: Iterable
predicate_fn, # type: Callable[[Any], bool]
):
# type: (...) -> Tuple[List[Any], List[Any]]
"""Partition the input iterable according to a custom predicate.
Args:
input_iterable: input iterable to be partitioned.
predicate_fn: predicate function accepting an iterable item
as argument.
Return:
Tuple of the list of elements evaluating to True, and
list of elements evaluating to False.
"""
true_items, false_items = [], []
for item in input_iterable:
if predicate_fn(item):
true_items.append(item)
continue
false_items.append(item)
return true_items, false_items
def ensure_last(lst, *elements):
"""Performs a stable partition of lst, ensuring that ``elements``
occur at the end of ``lst`` in specified order. Mutates ``lst``.
Raises ``ValueError`` if any ``elements`` are not already in ``lst``."""
for elt in elements:
lst.append(lst.pop(lst.index(elt)))
class TypedMutableSequence(MutableSequence): class TypedMutableSequence(MutableSequence):
@@ -1043,7 +988,6 @@ class Foo(TypedMutableSequence):
if isinstance(l, Foo): if isinstance(l, Foo):
# do something # do something
""" """
def __init__(self, iterable): def __init__(self, iterable):
self.data = list(iterable) self.data = list(iterable)
@@ -1073,7 +1017,7 @@ class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks.""" """A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self): def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]] self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self): def __bool__(self):
"""Whether any exceptions were handled.""" """Whether any exceptions were handled."""
@@ -1092,15 +1036,17 @@ def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str # type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors.""" """Print out an error message coalescing all the forwarded errors."""
each_exception_message = [ each_exception_message = [
"{0} raised {1}: {2}{3}".format( '{0} raised {1}: {2}{3}'.format(
context, context,
exc.__class__.__name__, exc.__class__.__name__,
exc, exc,
"\n{0}".format("".join(tb)) if with_tracebacks else "", '\n{0}'.format(''.join(tb)) if with_tracebacks else '',
) )
for context, exc, tb in self.exceptions for context, exc, tb in self.exceptions
] ]
return "due to the following failures:\n{0}".format("\n".join(each_exception_message)) return 'due to the following failures:\n{0}'.format(
'\n'.join(each_exception_message)
)
class GroupedExceptionForwarder(object): class GroupedExceptionForwarder(object):
@@ -1126,16 +1072,3 @@ def __exit__(self, exc_type, exc_value, tb):
# Suppress any exception from being re-raised: # Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__. # https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True return True
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)
"""
def __init__(self, callback):
self.callback = callback
def __get__(self, instance, owner):
return self.callback(owner)

View File

@@ -13,12 +13,12 @@
from collections import OrderedDict from collections import OrderedDict
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree from llnl.util.filesystem import mkdirp, touch, traverse_tree
from llnl.util.symlink import islink, symlink from llnl.util.symlink import islink, symlink
__all__ = ["LinkTree"] __all__ = ['LinkTree']
empty_file_name = ".spack-empty" empty_file_name = '.spack-empty'
def remove_link(src, dest): def remove_link(src, dest):
@@ -38,28 +38,26 @@ class MergeConflict:
project(src_a) == project(src_b) == dst project(src_a) == project(src_b) == dst
""" """
def __init__(self, dst, src_a=None, src_b=None): def __init__(self, dst, src_a=None, src_b=None):
self.dst = dst self.dst = dst
self.src_a = src_a self.src_a = src_a
self.src_b = src_b self.src_b = src_b
class SourceMergeVisitor(BaseDirectoryVisitor): class SourceMergeVisitor(object):
""" """
Visitor that produces actions: Visitor that produces actions:
- An ordered list of directories to create in dst - An ordered list of directories to create in dst
- A list of files to link in dst - A list of files to link in dst
- A list of merge conflicts in dst/ - A list of merge conflicts in dst/
""" """
def __init__(self, ignore=None): def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need # When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the # to prepend the <projection> bit to the relative path in the
# destination dir. # destination dir.
self.projection = "" self.projection = ''
# When a file blocks another file, the conflict can sometimes # When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or # be resolved / ignored (e.g. <prefix>/LICENSE or
@@ -75,7 +73,7 @@ def __init__(self, ignore=None):
# so that we have a fast lookup and can run mkdir in order. # so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict() self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_root, src_rel) # Files to link. Maps dst_rel to (src_rel, src_root)
self.files = OrderedDict() self.files = OrderedDict()
def before_visit_dir(self, root, rel_path, depth): def before_visit_dir(self, root, rel_path, depth):
@@ -90,13 +88,10 @@ def before_visit_dir(self, root, rel_path, depth):
elif proj_rel_path in self.files: elif proj_rel_path in self.files:
# Can't create a dir where a file is. # Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[proj_rel_path] src_a_root, src_a_relpath = self.files[proj_rel_path]
self.fatal_conflicts.append( self.fatal_conflicts.append(MergeConflict(
MergeConflict( dst=proj_rel_path,
dst=proj_rel_path, src_a=os.path.join(src_a_root, src_a_relpath),
src_a=os.path.join(src_a_root, src_a_relpath), src_b=os.path.join(root, rel_path)))
src_b=os.path.join(root, rel_path),
)
)
return False return False
elif proj_rel_path in self.directories: elif proj_rel_path in self.directories:
# No new directory, carry on. # No new directory, carry on.
@@ -106,6 +101,9 @@ def before_visit_dir(self, root, rel_path, depth):
self.directories[proj_rel_path] = (root, rel_path) self.directories[proj_rel_path] = (root, rel_path)
return True return True
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth): def before_visit_symlinked_dir(self, root, rel_path, depth):
""" """
Replace symlinked dirs with actual directories when possible in low depths, Replace symlinked dirs with actual directories when possible in low depths,
@@ -138,6 +136,9 @@ def before_visit_symlinked_dir(self, root, rel_path, depth):
self.visit_file(root, rel_path, depth) self.visit_file(root, rel_path, depth)
return False return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth): def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path) proj_rel_path = os.path.join(self.projection, rel_path)
@@ -146,59 +147,46 @@ def visit_file(self, root, rel_path, depth):
elif proj_rel_path in self.directories: elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error # Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path] src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append( self.fatal_conflicts.append(MergeConflict(
MergeConflict( dst=proj_rel_path,
dst=proj_rel_path, src_a=os.path.join(src_a_root, src_a_relpath),
src_a=os.path.join(src_a_root, src_a_relpath), src_b=os.path.join(root, rel_path)))
src_b=os.path.join(root, rel_path),
)
)
elif proj_rel_path in self.files: elif proj_rel_path in self.files:
# In some cases we can resolve file-file conflicts # In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path] src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append( self.file_conflicts.append(MergeConflict(
MergeConflict( dst=proj_rel_path,
dst=proj_rel_path, src_a=os.path.join(src_a_root, src_a_relpath),
src_a=os.path.join(src_a_root, src_a_relpath), src_b=os.path.join(root, rel_path)))
src_b=os.path.join(root, rel_path),
)
)
else: else:
# Otherwise register this file to be linked. # Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path) self.files[proj_rel_path] = (root, rel_path)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
def set_projection(self, projection): def set_projection(self, projection):
self.projection = os.path.normpath(projection) self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection? # Todo, is this how to check in general for empty projection?
if self.projection == ".": if self.projection == '.':
self.projection = "" self.projection = ''
return return
# If there is a projection, we'll also create the directories # If there is a projection, we'll also create the directories
# it consists of, and check whether that's causing conflicts. # it consists of, and check whether that's causing conflicts.
path = "" path = ''
for part in self.projection.split(os.sep): for part in self.projection.split(os.sep):
path = os.path.join(path, part) path = os.path.join(path, part)
if path not in self.files: if path not in self.files:
self.directories[path] = ("<projection>", path) self.directories[path] = ('<projection>', path)
else: else:
# Can't create a dir where a file is. # Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[path] src_a_root, src_a_relpath = self.files[path]
self.fatal_conflicts.append( self.fatal_conflicts.append(MergeConflict(
MergeConflict( dst=path,
dst=path, src_a=os.path.join(src_a_root, src_a_relpath),
src_a=os.path.join(src_a_root, src_a_relpath), src_b=os.path.join('<projection>', path)))
src_b=os.path.join("<projection>", path),
)
)
class DestinationMergeVisitor(BaseDirectoryVisitor): class DestinationMergeVisitor(object):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor """DestinatinoMergeVisitor takes a SourceMergeVisitor
and: and:
@@ -212,7 +200,6 @@ class DestinationMergeVisitor(BaseDirectoryVisitor):
in the target prefix will never be merged with in the target prefix will never be merged with
directories in the sources directories. directories in the sources directories.
""" """
def __init__(self, source_merge_visitor): def __init__(self, source_merge_visitor):
self.src = source_merge_visitor self.src = source_merge_visitor
@@ -221,11 +208,10 @@ def before_visit_dir(self, root, rel_path, depth):
# and don't traverse deeper # and don't traverse deeper
if rel_path in self.src.files: if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path] src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append( self.src.fatal_conflicts.append(MergeConflict(
MergeConflict( rel_path,
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path) os.path.join(src_a_root, src_a_relpath),
) os.path.join(root, rel_path)))
)
return False return False
# If destination dir was also a src dir, remove the mkdir # If destination dir was also a src dir, remove the mkdir
@@ -238,6 +224,9 @@ def before_visit_dir(self, root, rel_path, depth):
# don't descend into it. # don't descend into it.
return False return False
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth): def before_visit_symlinked_dir(self, root, rel_path, depth):
""" """
Symlinked directories in the destination prefix should Symlinked directories in the destination prefix should
@@ -247,44 +236,39 @@ def before_visit_symlinked_dir(self, root, rel_path, depth):
# Always conflict # Always conflict
if rel_path in self.src.directories: if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path] src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append( self.src.fatal_conflicts.append(MergeConflict(
MergeConflict( rel_path,
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path) os.path.join(src_a_root, src_a_relpath),
) os.path.join(root, rel_path)))
)
if rel_path in self.src.files: if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path] src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append( self.src.fatal_conflicts.append(MergeConflict(
MergeConflict( rel_path,
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path) os.path.join(src_a_root, src_a_relpath),
) os.path.join(root, rel_path)))
)
# Never descend into symlinked target dirs. # Never descend into symlinked target dirs.
return False return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth): def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists # Can't merge a file if target already exists
if rel_path in self.src.directories: if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path] src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append( self.src.fatal_conflicts.append(MergeConflict(
MergeConflict( rel_path,
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path) os.path.join(src_a_root, src_a_relpath),
) os.path.join(root, rel_path)))
)
elif rel_path in self.src.files: elif rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path] src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append( self.src.fatal_conflicts.append(MergeConflict(
MergeConflict( rel_path,
rel_path, os.path.join(src_a_root, src_a_relpath), os.path.join(root, rel_path) os.path.join(src_a_root, src_a_relpath),
) os.path.join(root, rel_path)))
)
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)
class LinkTree(object): class LinkTree(object):
@@ -297,31 +281,30 @@ class LinkTree(object):
symlinked to, to prevent the source directory from ever being symlinked to, to prevent the source directory from ever being
modified. modified.
""" """
def __init__(self, source_root): def __init__(self, source_root):
if not os.path.exists(source_root): if not os.path.exists(source_root):
raise IOError("No such file or directory: '%s'", source_root) raise IOError("No such file or directory: '%s'", source_root)
self._root = source_root self._root = source_root
def find_conflict(self, dest_root, ignore=None, ignore_file_conflicts=False): def find_conflict(self, dest_root, ignore=None,
ignore_file_conflicts=False):
"""Returns the first file in dest that conflicts with src""" """Returns the first file in dest that conflicts with src"""
ignore = ignore or (lambda x: False) ignore = ignore or (lambda x: False)
conflicts = self.find_dir_conflicts(dest_root, ignore) conflicts = self.find_dir_conflicts(dest_root, ignore)
if not ignore_file_conflicts: if not ignore_file_conflicts:
conflicts.extend( conflicts.extend(
dst dst for src, dst
for src, dst in self.get_file_map(dest_root, ignore).items() in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst) if os.path.exists(dst))
)
if conflicts: if conflicts:
return conflicts[0] return conflicts[0]
def find_dir_conflicts(self, dest_root, ignore): def find_dir_conflicts(self, dest_root, ignore):
conflicts = [] conflicts = []
kwargs = {"follow_nonexisting": False, "ignore": ignore} kwargs = {'follow_nonexisting': False, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs): for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if os.path.isdir(src): if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest): if os.path.exists(dest) and not os.path.isdir(dest):
@@ -332,7 +315,7 @@ def find_dir_conflicts(self, dest_root, ignore):
def get_file_map(self, dest_root, ignore): def get_file_map(self, dest_root, ignore):
merge_map = {} merge_map = {}
kwargs = {"follow_nonexisting": True, "ignore": ignore} kwargs = {'follow_nonexisting': True, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs): for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if not os.path.isdir(src): if not os.path.isdir(src):
merge_map[src] = dest merge_map[src] = dest
@@ -354,7 +337,8 @@ def merge_directories(self, dest_root, ignore):
touch(marker) touch(marker)
def unmerge_directories(self, dest_root, ignore): def unmerge_directories(self, dest_root, ignore):
for src, dest in traverse_tree(self._root, dest_root, ignore=ignore, order="post"): for src, dest in traverse_tree(
self._root, dest_root, ignore=ignore, order='post'):
if os.path.isdir(src): if os.path.isdir(src):
if not os.path.exists(dest): if not os.path.exists(dest):
continue continue
@@ -370,7 +354,8 @@ def unmerge_directories(self, dest_root, ignore):
if os.path.exists(marker): if os.path.exists(marker):
os.remove(marker) os.remove(marker)
def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, relative=False): def merge(self, dest_root, ignore_conflicts=False, ignore=None,
link=symlink, relative=False):
"""Link all files in src into dest, creating directories """Link all files in src into dest, creating directories
if necessary. if necessary.
@@ -392,8 +377,7 @@ def merge(self, dest_root, ignore_conflicts=False, ignore=None, link=symlink, re
ignore = lambda x: False ignore = lambda x: False
conflict = self.find_conflict( conflict = self.find_conflict(
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts)
)
if conflict: if conflict:
raise SingleMergeConflictError(conflict) raise SingleMergeConflictError(conflict)
@@ -432,7 +416,8 @@ class MergeConflictError(Exception):
class SingleMergeConflictError(MergeConflictError): class SingleMergeConflictError(MergeConflictError):
def __init__(self, path): def __init__(self, path):
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path) super(MergeConflictError, self).__init__(
"Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError): class MergeConflictSummary(MergeConflictError):
@@ -445,6 +430,5 @@ def __init__(self, conflicts):
# show the first 3 merge conflicts. # show the first 3 merge conflicts.
for conflict in conflicts[:3]: for conflict in conflicts[:3]:
msg += "\n `{0}` and `{1}` both project to `{2}`".format( msg += "\n `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst conflict.src_a, conflict.src_b, conflict.dst)
)
super(MergeConflictSummary, self).__init__(msg) super(MergeConflictSummary, self).__init__(msg)

View File

@@ -12,26 +12,25 @@
from typing import Dict, Tuple # novm from typing import Dict, Tuple # novm
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.util.lang import pretty_seconds
import spack.util.string import spack.util.string
if sys.platform != "win32": if sys.platform != 'win32':
import fcntl import fcntl
__all__ = [ __all__ = [
"Lock", 'Lock',
"LockDowngradeError", 'LockDowngradeError',
"LockUpgradeError", 'LockUpgradeError',
"LockTransaction", 'LockTransaction',
"WriteTransaction", 'WriteTransaction',
"ReadTransaction", 'ReadTransaction',
"LockError", 'LockError',
"LockTimeoutError", 'LockTimeoutError',
"LockPermissionError", 'LockPermissionError',
"LockROFileError", 'LockROFileError',
"CantCreateLockError", 'CantCreateLockError'
] ]
@@ -48,7 +47,6 @@ class OpenFile(object):
the file descriptor from the file handle if needed -- or we could make this track the file descriptor from the file handle if needed -- or we could make this track
file descriptors as well in the future. file descriptors as well in the future.
""" """
def __init__(self, fh): def __init__(self, fh):
self.fh = fh self.fh = fh
self.refs = 0 self.refs = 0
@@ -94,11 +92,11 @@ def get_fh(self, path):
path (str): path to lock file we want a filehandle for path (str): path to lock file we want a filehandle for
""" """
# Open writable files as 'r+' so we can upgrade to write later # Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), "r+" os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
pid = os.getpid() pid = os.getpid()
open_file = None # OpenFile object, if there is one open_file = None # OpenFile object, if there is one
stat = None # stat result for the lockfile, if it exists stat = None # stat result for the lockfile, if it exists
try: try:
# see whether we've seen this inode/pid before # see whether we've seen this inode/pid before
@@ -111,7 +109,7 @@ def get_fh(self, path):
raise raise
# path does not exist -- fail if we won't be able to create it # path does not exist -- fail if we won't be able to create it
parent = os.path.dirname(path) or "." parent = os.path.dirname(path) or '.'
if not os.access(parent, os.W_OK): if not os.access(parent, os.W_OK):
raise CantCreateLockError(path) raise CantCreateLockError(path)
@@ -121,7 +119,7 @@ def get_fh(self, path):
# we know path exists but not if it's writable. If it's read-only, # we know path exists but not if it's writable. If it's read-only,
# only open the file for reading (and fail if we're trying to get # only open the file for reading (and fail if we're trying to get
# an exclusive (write) lock on it) # an exclusive (write) lock on it)
os_mode, fh_mode = os.O_RDONLY, "r" os_mode, fh_mode = os.O_RDONLY, 'r'
fd = os.open(path, os_mode) fd = os.open(path, os_mode)
fh = os.fdopen(fd, fh_mode) fh = os.fdopen(fd, fh_mode)
@@ -164,10 +162,10 @@ def release_fh(self, path):
def _attempts_str(wait_time, nattempts): def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try # Don't print anything if we succeeded on the first try
if nattempts <= 1: if nattempts <= 1:
return "" return ''
attempts = spack.util.string.plural(nattempts, "attempt") attempts = spack.util.string.plural(nattempts, 'attempt')
return " after {} and {}".format(pretty_seconds(wait_time), attempts) return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
class LockType(object): class LockType(object):
@@ -190,7 +188,8 @@ def to_module(tid):
@staticmethod @staticmethod
def is_valid(op): def is_valid(op):
return op == LockType.READ or op == LockType.WRITE return op == LockType.READ \
or op == LockType.WRITE
class Lock(object): class Lock(object):
@@ -208,7 +207,8 @@ class Lock(object):
overlapping byte ranges in the same file). overlapping byte ranges in the same file).
""" """
def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, desc=""): def __init__(self, path, start=0, length=0, default_timeout=None,
debug=False, desc=''):
"""Construct a new lock on the file at ``path``. """Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally, By default, the lock applies to the whole file. Optionally,
@@ -243,7 +243,7 @@ def __init__(self, path, start=0, length=0, default_timeout=None, debug=False, d
self.debug = debug self.debug = debug
# optional debug description # optional debug description
self.desc = " ({0})".format(desc) if desc else "" self.desc = ' ({0})'.format(desc) if desc else ''
# If the user doesn't set a default timeout, or if they choose # If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the # None, 0, etc. then lock attempts will not time out (unless the
@@ -280,17 +280,17 @@ def _poll_interval_generator(_wait_times=None):
def __repr__(self): def __repr__(self):
"""Formal representation of the lock.""" """Formal representation of the lock."""
rep = "{0}(".format(self.__class__.__name__) rep = '{0}('.format(self.__class__.__name__)
for attr, value in self.__dict__.items(): for attr, value in self.__dict__.items():
rep += "{0}={1}, ".format(attr, value.__repr__()) rep += '{0}={1}, '.format(attr, value.__repr__())
return "{0})".format(rep.strip(", ")) return '{0})'.format(rep.strip(', '))
def __str__(self): def __str__(self):
"""Readable string (with key fields) of the lock.""" """Readable string (with key fields) of the lock."""
location = "{0}[{1}:{2}]".format(self.path, self._start, self._length) location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
timeout = "timeout={0}".format(self.default_timeout) timeout = 'timeout={0}'.format(self.default_timeout)
activity = "#reads={0}, #writes={1}".format(self._reads, self._writes) activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
return "({0}, {1}, {2})".format(location, timeout, activity) return '({0}, {1}, {2})'.format(location, timeout, activity)
def _lock(self, op, timeout=None): def _lock(self, op, timeout=None):
"""This takes a lock using POSIX locks (``fcntl.lockf``). """This takes a lock using POSIX locks (``fcntl.lockf``).
@@ -305,7 +305,7 @@ def _lock(self, op, timeout=None):
assert LockType.is_valid(op) assert LockType.is_valid(op)
op_str = LockType.to_str(op) op_str = LockType.to_str(op)
self._log_acquiring("{0} LOCK".format(op_str)) self._log_acquiring('{0} LOCK'.format(op_str))
timeout = timeout or self.default_timeout timeout = timeout or self.default_timeout
# Create file and parent directories if they don't exist. # Create file and parent directories if they don't exist.
@@ -313,16 +313,14 @@ def _lock(self, op, timeout=None):
self._ensure_parent_directory() self._ensure_parent_directory()
self._file = file_tracker.get_fh(self.path) self._file = file_tracker.get_fh(self.path)
if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == "r": if LockType.to_module(op) == fcntl.LOCK_EX and self._file.mode == 'r':
# Attempt to upgrade to write lock w/a read-only file. # Attempt to upgrade to write lock w/a read-only file.
# If the file were writable, we'd have opened it 'r+' # If the file were writable, we'd have opened it 'r+'
raise LockROFileError(self.path) raise LockROFileError(self.path)
self._log_debug( self._log_debug("{0} locking [{1}:{2}]: timeout {3} sec"
"{} locking [{}:{}]: timeout {}".format( .format(op_str.lower(), self._start, self._length,
op_str.lower(), self._start, self._length, pretty_seconds(timeout or 0) timeout))
)
)
poll_intervals = iter(Lock._poll_interval_generator()) poll_intervals = iter(Lock._poll_interval_generator())
start_time = time.time() start_time = time.time()
@@ -341,8 +339,8 @@ def _lock(self, op, timeout=None):
total_wait_time = time.time() - start_time total_wait_time = time.time() - start_time
return total_wait_time, num_attempts return total_wait_time, num_attempts
total_wait_time = time.time() - start_time raise LockTimeoutError("Timed out waiting for a {0} lock."
raise LockTimeoutError(op_str.lower(), self.path, total_wait_time, num_attempts) .format(op_str.lower()))
def _poll_lock(self, op): def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether """Attempt to acquire the lock in a non-blocking manner. Return whether
@@ -351,19 +349,16 @@ def _poll_lock(self, op):
module_op = LockType.to_module(op) module_op = LockType.to_module(op)
try: try:
# Try to get the lock (will raise if not available.) # Try to get the lock (will raise if not available.)
fcntl.lockf( fcntl.lockf(self._file, module_op | fcntl.LOCK_NB,
self._file, module_op | fcntl.LOCK_NB, self._length, self._start, os.SEEK_SET self._length, self._start, os.SEEK_SET)
)
# help for debugging distributed locking # help for debugging distributed locking
if self.debug: if self.debug:
# All locks read the owner PID and host # All locks read the owner PID and host
self._read_log_debug_data() self._read_log_debug_data()
self._log_debug( self._log_debug('{0} locked {1} [{2}:{3}] (owner={4})'
"{0} locked {1} [{2}:{3}] (owner={4})".format( .format(LockType.to_str(op), self.path,
LockType.to_str(op), self.path, self._start, self._length, self.pid self._start, self._length, self.pid))
)
)
# Exclusive locks write their PID/host # Exclusive locks write their PID/host
if module_op == fcntl.LOCK_EX: if module_op == fcntl.LOCK_EX:
@@ -383,17 +378,14 @@ def _ensure_parent_directory(self):
# relative paths to lockfiles in the current directory have no parent # relative paths to lockfiles in the current directory have no parent
if not parent: if not parent:
return "." return '.'
try: try:
os.makedirs(parent) os.makedirs(parent)
except OSError as e: except OSError as e:
# os.makedirs can fail in a number of ways when the directory already exists. # makedirs can fail when diretory already exists.
# With EISDIR, we know it exists, and others like EEXIST, EACCES, and EROFS if not (e.errno == errno.EEXIST and os.path.isdir(parent) or
# are fine if we ensure that the directory exists. e.errno == errno.EISDIR):
# Python 3 allows an exist_ok parameter and ignores any OSError as long as
# the directory exists.
if not (e.errno == errno.EISDIR or os.path.isdir(parent)):
raise raise
return parent return parent
@@ -404,9 +396,9 @@ def _read_log_debug_data(self):
line = self._file.read() line = self._file.read()
if line: if line:
pid, host = line.strip().split(",") pid, host = line.strip().split(',')
_, _, self.pid = pid.rpartition("=") _, _, self.pid = pid.rpartition('=')
_, _, self.host = host.rpartition("=") _, _, self.host = host.rpartition('=')
self.pid = int(self.pid) self.pid = int(self.pid)
def _write_log_debug_data(self): def _write_log_debug_data(self):
@@ -431,7 +423,8 @@ def _unlock(self):
be masquerading as write locks, but this removes either. be masquerading as write locks, but this removes either.
""" """
fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET) fcntl.lockf(self._file, fcntl.LOCK_UN,
self._length, self._start, os.SEEK_SET)
file_tracker.release_fh(self.path) file_tracker.release_fh(self.path)
self._file = None self._file = None
@@ -456,7 +449,7 @@ def acquire_read(self, timeout=None):
wait_time, nattempts = self._lock(LockType.READ, timeout=timeout) wait_time, nattempts = self._lock(LockType.READ, timeout=timeout)
self._reads += 1 self._reads += 1
# Log if acquired, which includes counts when verbose # Log if acquired, which includes counts when verbose
self._log_acquired("READ LOCK", wait_time, nattempts) self._log_acquired('READ LOCK', wait_time, nattempts)
return True return True
else: else:
# Increment the read count for nested lock tracking # Increment the read count for nested lock tracking
@@ -481,7 +474,7 @@ def acquire_write(self, timeout=None):
wait_time, nattempts = self._lock(LockType.WRITE, timeout=timeout) wait_time, nattempts = self._lock(LockType.WRITE, timeout=timeout)
self._writes += 1 self._writes += 1
# Log if acquired, which includes counts when verbose # Log if acquired, which includes counts when verbose
self._log_acquired("WRITE LOCK", wait_time, nattempts) self._log_acquired('WRITE LOCK', wait_time, nattempts)
# return True only if we weren't nested in a read lock. # return True only if we weren't nested in a read lock.
# TODO: we may need to return two values: whether we got # TODO: we may need to return two values: whether we got
@@ -568,7 +561,7 @@ def release_read(self, release_fn=None):
""" """
assert self._reads > 0 assert self._reads > 0
locktype = "READ LOCK" locktype = 'READ LOCK'
if self._reads == 1 and self._writes == 0: if self._reads == 1 and self._writes == 0:
self._log_releasing(locktype) self._log_releasing(locktype)
@@ -576,7 +569,7 @@ def release_read(self, release_fn=None):
release_fn = release_fn or true_fn release_fn = release_fn or true_fn
result = release_fn() result = release_fn()
self._unlock() # can raise LockError. self._unlock() # can raise LockError.
self._reads = 0 self._reads = 0
self._log_released(locktype) self._log_released(locktype)
return result return result
@@ -604,14 +597,14 @@ def release_write(self, release_fn=None):
assert self._writes > 0 assert self._writes > 0
release_fn = release_fn or true_fn release_fn = release_fn or true_fn
locktype = "WRITE LOCK" locktype = 'WRITE LOCK'
if self._writes == 1 and self._reads == 0: if self._writes == 1 and self._reads == 0:
self._log_releasing(locktype) self._log_releasing(locktype)
# we need to call release_fn before releasing the lock # we need to call release_fn before releasing the lock
result = release_fn() result = release_fn()
self._unlock() # can raise LockError. self._unlock() # can raise LockError.
self._writes = 0 self._writes = 0
self._log_released(locktype) self._log_released(locktype)
return result return result
@@ -632,55 +625,56 @@ def cleanup(self):
raise LockError("Attempting to cleanup active lock.") raise LockError("Attempting to cleanup active lock.")
def _get_counts_desc(self): def _get_counts_desc(self):
return ( return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
"(reads {0}, writes {1})".format(self._reads, self._writes) if tty.is_verbose() else "" if tty.is_verbose() else ''
)
def _log_acquired(self, locktype, wait_time, nattempts): def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts) attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now() now = datetime.now()
desc = "Acquired at %s" % now.strftime("%H:%M:%S.%f") desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, "{0}{1}".format(desc, attempts_part))) self._log_debug(self._status_msg(locktype, '{0}{1}'
.format(desc, attempts_part)))
def _log_acquiring(self, locktype): def _log_acquiring(self, locktype):
self._log_debug(self._status_msg(locktype, "Acquiring"), level=3) self._log_debug(self._status_msg(locktype, 'Acquiring'), level=3)
def _log_debug(self, *args, **kwargs): def _log_debug(self, *args, **kwargs):
"""Output lock debug messages.""" """Output lock debug messages."""
kwargs["level"] = kwargs.get("level", 2) kwargs['level'] = kwargs.get('level', 2)
tty.debug(*args, **kwargs) tty.debug(*args, **kwargs)
def _log_downgraded(self, wait_time, nattempts): def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts) attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now() now = datetime.now()
desc = "Downgraded at %s" % now.strftime("%H:%M:%S.%f") desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("READ LOCK", "{0}{1}".format(desc, attempts_part))) self._log_debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
def _log_downgrading(self): def _log_downgrading(self):
self._log_debug(self._status_msg("WRITE LOCK", "Downgrading"), level=3) self._log_debug(self._status_msg('WRITE LOCK', 'Downgrading'), level=3)
def _log_released(self, locktype): def _log_released(self, locktype):
now = datetime.now() now = datetime.now()
desc = "Released at %s" % now.strftime("%H:%M:%S.%f") desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, desc)) self._log_debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype): def _log_releasing(self, locktype):
self._log_debug(self._status_msg(locktype, "Releasing"), level=3) self._log_debug(self._status_msg(locktype, 'Releasing'), level=3)
def _log_upgraded(self, wait_time, nattempts): def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts) attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now() now = datetime.now()
desc = "Upgraded at %s" % now.strftime("%H:%M:%S.%f") desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg("WRITE LOCK", "{0}{1}".format(desc, attempts_part))) self._log_debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
def _log_upgrading(self): def _log_upgrading(self):
self._log_debug(self._status_msg("READ LOCK", "Upgrading"), level=3) self._log_debug(self._status_msg('READ LOCK', 'Upgrading'), level=3)
def _status_msg(self, locktype, status): def _status_msg(self, locktype, status):
status_desc = "[{0}] {1}".format(status, self._get_counts_desc()) status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
return "{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}".format( return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
locktype, self, status_desc locktype, self, status_desc)
)
class LockTransaction(object): class LockTransaction(object):
@@ -721,7 +715,7 @@ def __init__(self, lock, acquire=None, release=None, timeout=None):
def __enter__(self): def __enter__(self):
if self._enter() and self._acquire_fn: if self._enter() and self._acquire_fn:
self._as = self._acquire_fn() self._as = self._acquire_fn()
if hasattr(self._as, "__enter__"): if hasattr(self._as, '__enter__'):
return self._as.__enter__() return self._as.__enter__()
else: else:
return self._as return self._as
@@ -733,7 +727,7 @@ def release_fn():
if self._release_fn is not None: if self._release_fn is not None:
return self._release_fn(type, value, traceback) return self._release_fn(type, value, traceback)
if self._as and hasattr(self._as, "__exit__"): if self._as and hasattr(self._as, '__exit__'):
if self._as.__exit__(type, value, traceback): if self._as.__exit__(type, value, traceback):
suppress = True suppress = True
@@ -745,7 +739,6 @@ def release_fn():
class ReadTransaction(LockTransaction): class ReadTransaction(LockTransaction):
"""LockTransaction context manager that does a read and releases it.""" """LockTransaction context manager that does a read and releases it."""
def _enter(self): def _enter(self):
return self._lock.acquire_read(self._timeout) return self._lock.acquire_read(self._timeout)
@@ -755,7 +748,6 @@ def _exit(self, release_fn):
class WriteTransaction(LockTransaction): class WriteTransaction(LockTransaction):
"""LockTransaction context manager that does a write and releases it.""" """LockTransaction context manager that does a write and releases it."""
def _enter(self): def _enter(self):
return self._lock.acquire_write(self._timeout) return self._lock.acquire_write(self._timeout)
@@ -769,7 +761,6 @@ class LockError(Exception):
class LockDowngradeError(LockError): class LockDowngradeError(LockError):
"""Raised when unable to downgrade from a write to a read lock.""" """Raised when unable to downgrade from a write to a read lock."""
def __init__(self, path): def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path msg = "Cannot downgrade lock from write to read on file: %s" % path
super(LockDowngradeError, self).__init__(msg) super(LockDowngradeError, self).__init__(msg)
@@ -782,22 +773,9 @@ class LockLimitError(LockError):
class LockTimeoutError(LockError): class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out.""" """Raised when an attempt to acquire a lock times out."""
def __init__(self, lock_type, path, time, attempts):
fmt = "Timed out waiting for a {} lock after {}.\n Made {} {} on file: {}"
super(LockTimeoutError, self).__init__(
fmt.format(
lock_type,
pretty_seconds(time),
attempts,
"attempt" if attempts == 1 else "attempts",
path,
)
)
class LockUpgradeError(LockError): class LockUpgradeError(LockError):
"""Raised when unable to upgrade from a read to a write lock.""" """Raised when unable to upgrade from a read to a write lock."""
def __init__(self, path): def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path msg = "Cannot upgrade lock from read to write on file: %s" % path
super(LockUpgradeError, self).__init__(msg) super(LockUpgradeError, self).__init__(msg)
@@ -809,7 +787,6 @@ class LockPermissionError(LockError):
class LockROFileError(LockPermissionError): class LockROFileError(LockPermissionError):
"""Tried to take an exclusive lock on a read-only file.""" """Tried to take an exclusive lock on a read-only file."""
def __init__(self, path): def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path msg = "Can't take write lock on read-only file: %s" % path
super(LockROFileError, self).__init__(msg) super(LockROFileError, self).__init__(msg)
@@ -817,7 +794,6 @@ def __init__(self, path):
class CantCreateLockError(LockPermissionError): class CantCreateLockError(LockPermissionError):
"""Attempt to create a lock in an unwritable location.""" """Attempt to create a lock in an unwritable location."""
def __init__(self, path): def __init__(self, path):
msg = "cannot create lock '%s': " % path msg = "cannot create lock '%s': " % path
msg += "file does not exist and location is not writable" msg += "file does not exist and location is not writable"

View File

@@ -10,7 +10,7 @@
""" """
from multiprocessing import Semaphore, Value from multiprocessing import Semaphore, Value
__all__ = ["Barrier"] __all__ = ['Barrier']
class Barrier: class Barrier:
@@ -24,7 +24,7 @@ class Barrier:
def __init__(self, n, timeout=None): def __init__(self, n, timeout=None):
self.n = n self.n = n
self.to = timeout self.to = timeout
self.count = Value("i", 0) self.count = Value('i', 0)
self.mutex = Semaphore(1) self.mutex = Semaphore(1)
self.turnstile1 = Semaphore(0) self.turnstile1 = Semaphore(0)
self.turnstile2 = Semaphore(1) self.turnstile2 = Semaphore(1)

View File

@@ -11,7 +11,7 @@
from llnl.util import lang from llnl.util import lang
is_windows = _platform == "win32" is_windows = _platform == 'win32'
if is_windows: if is_windows:
from win32file import CreateHardLink from win32file import CreateHardLink
@@ -47,7 +47,7 @@ def _win32_junction(path, link):
# os.symlink will fail if link exists, emulate the behavior here # os.symlink will fail if link exists, emulate the behavior here
if exists(link): if exists(link):
raise OSError(errno.EEXIST, "File exists: %s -> %s" % (link, path)) raise OSError(errno.EEXIST, 'File exists: %s -> %s' % (link, path))
if not os.path.isabs(path): if not os.path.isabs(path):
parent = os.path.join(link, os.pardir) parent = os.path.join(link, os.pardir)
@@ -61,14 +61,13 @@ def _win32_junction(path, link):
def _win32_can_symlink(): def _win32_can_symlink():
tempdir = tempfile.mkdtemp() tempdir = tempfile.mkdtemp()
dpath = join(tempdir, "dpath") dpath = join(tempdir, 'dpath')
fpath = join(tempdir, "fpath.txt") fpath = join(tempdir, 'fpath.txt')
dlink = join(tempdir, "dlink") dlink = join(tempdir, 'dlink')
flink = join(tempdir, "flink.txt") flink = join(tempdir, 'flink.txt')
import llnl.util.filesystem as fs import llnl.util.filesystem as fs
fs.touchp(fpath) fs.touchp(fpath)
try: try:
@@ -107,6 +106,7 @@ def _win32_is_junction(path):
FILE_ATTRIBUTE_REPARSE_POINT = 0x400 FILE_ATTRIBUTE_REPARSE_POINT = 0x400
res = GetFileAttributes(path) res = GetFileAttributes(path)
return res != INVALID_FILE_ATTRIBUTES and bool(res & FILE_ATTRIBUTE_REPARSE_POINT) return res != INVALID_FILE_ATTRIBUTES and \
bool(res & FILE_ATTRIBUTE_REPARSE_POINT)
return False return False

View File

@@ -54,7 +54,7 @@ def is_stacktrace():
def set_debug(level=0): def set_debug(level=0):
global _debug global _debug
assert level >= 0, "Debug level must be a positive value" assert level >= 0, 'Debug level must be a positive value'
_debug = level _debug = level
@@ -110,7 +110,10 @@ def output_filter(filter_fn):
class SuppressOutput: class SuppressOutput:
"""Class for disabling output in a scope using 'with' keyword""" """Class for disabling output in a scope using 'with' keyword"""
def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True): def __init__(self,
msg_enabled=True,
warn_enabled=True,
error_enabled=True):
self._msg_enabled_initial = _msg_enabled self._msg_enabled_initial = _msg_enabled
self._warn_enabled_initial = _warn_enabled self._warn_enabled_initial = _warn_enabled
@@ -161,10 +164,11 @@ def get_timestamp(force=False):
"""Get a string timestamp""" """Get a string timestamp"""
if _debug or _timestamp or force: if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds. # Note inclusion of the PID is useful for parallel builds.
pid = ", {0}".format(os.getpid()) if show_pid() else "" pid = ', {0}'.format(os.getpid()) if show_pid() else ''
return "[{0}{1}] ".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid) return '[{0}{1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
else: else:
return "" return ''
def msg(message, *args, **kwargs): def msg(message, *args, **kwargs):
@@ -174,14 +178,26 @@ def msg(message, *args, **kwargs):
if isinstance(message, Exception): if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message)) message = "%s: %s" % (message.__class__.__name__, str(message))
newline = kwargs.get("newline", True) newline = kwargs.get('newline', True)
st_text = "" st_text = ""
if _stacktrace: if _stacktrace:
st_text = process_stacktrace(2) st_text = process_stacktrace(2)
if newline: if newline:
cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message)))) cprint(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
else: else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message)))) cwrite(
"@*b{%s==>} %s%s" % (
st_text,
get_timestamp(),
cescape(_output_filter(message))
)
)
for arg in args: for arg in args:
print(indent + _output_filter(six.text_type(arg))) print(indent + _output_filter(six.text_type(arg)))
@@ -190,19 +206,23 @@ def info(message, *args, **kwargs):
if isinstance(message, Exception): if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message)) message = "%s: %s" % (message.__class__.__name__, str(message))
format = kwargs.get("format", "*b") format = kwargs.get('format', '*b')
stream = kwargs.get("stream", sys.stdout) stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get("wrap", False) wrap = kwargs.get('wrap', False)
break_long_words = kwargs.get("break_long_words", False) break_long_words = kwargs.get('break_long_words', False)
st_countback = kwargs.get("countback", 3) st_countback = kwargs.get('countback', 3)
st_text = "" st_text = ""
if _stacktrace: if _stacktrace:
st_text = process_stacktrace(st_countback) st_text = process_stacktrace(st_countback)
cprint( cprint(
"@%s{%s==>} %s%s" "@%s{%s==>} %s%s" % (
% (format, st_text, get_timestamp(), cescape(_output_filter(six.text_type(message)))), format,
stream=stream, st_text,
get_timestamp(),
cescape(_output_filter(six.text_type(message)))
),
stream=stream
) )
for arg in args: for arg in args:
if wrap: if wrap:
@@ -210,25 +230,27 @@ def info(message, *args, **kwargs):
_output_filter(six.text_type(arg)), _output_filter(six.text_type(arg)),
initial_indent=indent, initial_indent=indent,
subsequent_indent=indent, subsequent_indent=indent,
break_long_words=break_long_words, break_long_words=break_long_words
) )
for line in lines: for line in lines:
stream.write(line + "\n") stream.write(line + '\n')
else: else:
stream.write(indent + _output_filter(six.text_type(arg)) + "\n") stream.write(
indent + _output_filter(six.text_type(arg)) + '\n'
)
def verbose(message, *args, **kwargs): def verbose(message, *args, **kwargs):
if _verbose: if _verbose:
kwargs.setdefault("format", "c") kwargs.setdefault('format', 'c')
info(message, *args, **kwargs) info(message, *args, **kwargs)
def debug(message, *args, **kwargs): def debug(message, *args, **kwargs):
level = kwargs.get("level", 1) level = kwargs.get('level', 1)
if is_debug(level): if is_debug(level):
kwargs.setdefault("format", "g") kwargs.setdefault('format', 'g')
kwargs.setdefault("stream", sys.stderr) kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs) info(message, *args, **kwargs)
@@ -236,8 +258,8 @@ def error(message, *args, **kwargs):
if not error_enabled(): if not error_enabled():
return return
kwargs.setdefault("format", "*r") kwargs.setdefault('format', '*r')
kwargs.setdefault("stream", sys.stderr) kwargs.setdefault('stream', sys.stderr)
info("Error: " + six.text_type(message), *args, **kwargs) info("Error: " + six.text_type(message), *args, **kwargs)
@@ -245,27 +267,27 @@ def warn(message, *args, **kwargs):
if not warn_enabled(): if not warn_enabled():
return return
kwargs.setdefault("format", "*Y") kwargs.setdefault('format', '*Y')
kwargs.setdefault("stream", sys.stderr) kwargs.setdefault('stream', sys.stderr)
info("Warning: " + six.text_type(message), *args, **kwargs) info("Warning: " + six.text_type(message), *args, **kwargs)
def die(message, *args, **kwargs): def die(message, *args, **kwargs):
kwargs.setdefault("countback", 4) kwargs.setdefault('countback', 4)
error(message, *args, **kwargs) error(message, *args, **kwargs)
sys.exit(1) sys.exit(1)
def get_number(prompt, **kwargs): def get_number(prompt, **kwargs):
default = kwargs.get("default", None) default = kwargs.get('default', None)
abort = kwargs.get("abort", None) abort = kwargs.get('abort', None)
if default is not None and abort is not None: if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort) prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None: elif default is not None:
prompt += " (default is %s) " % default prompt += ' (default is %s) ' % default
elif abort is not None: elif abort is not None:
prompt += " (%s to abort) " % abort prompt += ' (%s to abort) ' % abort
number = None number = None
while number is None: while number is None:
@@ -288,16 +310,17 @@ def get_number(prompt, **kwargs):
def get_yes_or_no(prompt, **kwargs): def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None) default_value = kwargs.get('default', None)
if default_value is None: if default_value is None:
prompt += " [y/n] " prompt += ' [y/n] '
elif default_value is True: elif default_value is True:
prompt += " [Y/n] " prompt += ' [Y/n] '
elif default_value is False: elif default_value is False:
prompt += " [y/N] " prompt += ' [y/N] '
else: else:
raise ValueError("default for get_yes_no() must be True, False, or None.") raise ValueError(
"default for get_yes_no() must be True, False, or None.")
result = None result = None
while result is None: while result is None:
@@ -308,9 +331,9 @@ def get_yes_or_no(prompt, **kwargs):
if result is None: if result is None:
print("Please enter yes or no.") print("Please enter yes or no.")
else: else:
if ans == "y" or ans == "yes": if ans == 'y' or ans == 'yes':
result = True result = True
elif ans == "n" or ans == "no": elif ans == 'n' or ans == 'no':
result = False result = False
return result return result
@@ -322,12 +345,12 @@ def hline(label=None, **kwargs):
char (str): Char to draw the line with. Default '-' char (str): Char to draw the line with. Default '-'
max_width (int): Maximum width of the line. Default is 64 chars. max_width (int): Maximum width of the line. Default is 64 chars.
""" """
char = kwargs.pop("char", "-") char = kwargs.pop('char', '-')
max_width = kwargs.pop("max_width", 64) max_width = kwargs.pop('max_width', 64)
if kwargs: if kwargs:
raise TypeError( raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(kwargs.iterkeys()) "'%s' is an invalid keyword argument for this function."
) % next(kwargs.iterkeys()))
rows, cols = terminal_size() rows, cols = terminal_size()
if not cols: if not cols:
@@ -351,14 +374,13 @@ def hline(label=None, **kwargs):
def terminal_size(): def terminal_size():
"""Gets the dimensions of the console: (rows, cols).""" """Gets the dimensions of the console: (rows, cols)."""
if _platform != "win32": if _platform != "win32":
def ioctl_gwinsz(fd): def ioctl_gwinsz(fd):
try: try:
rc = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234")) rc = struct.unpack('hh', fcntl.ioctl(
fd, termios.TIOCGWINSZ, '1234'))
except BaseException: except BaseException:
return return
return rc return rc
rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not rc: if not rc:
try: try:
@@ -368,14 +390,12 @@ def ioctl_gwinsz(fd):
except BaseException: except BaseException:
pass pass
if not rc: if not rc:
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80)) rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1]) return int(rc[0]), int(rc[1])
else: else:
if sys.version_info[0] < 3: if sys.version_info[0] < 3:
raise RuntimeError( raise RuntimeError("Terminal size not obtainable on Windows with a\
"Terminal size not obtainable on Windows with a\ Python version older than 3")
Python version older than 3" rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
)
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
return int(rc[0]), int(rc[1]) return int(rc[0]), int(rc[1])

View File

@@ -18,27 +18,29 @@
class ColumnConfig: class ColumnConfig:
def __init__(self, cols): def __init__(self, cols):
self.cols = cols self.cols = cols
self.line_length = 0 self.line_length = 0
self.valid = True self.valid = True
self.widths = [0] * cols # does not include ansi colors self.widths = [0] * cols # does not include ansi colors
def __repr__(self): def __repr__(self):
attrs = [(a, getattr(self, a)) for a in dir(self) if not a.startswith("__")] attrs = [(a, getattr(self, a))
for a in dir(self) if not a.startswith("__")]
return "<Config: %s>" % ", ".join("%s: %r" % a for a in attrs) return "<Config: %s>" % ", ".join("%s: %r" % a for a in attrs)
def config_variable_cols(elts, console_width, padding, cols=0): def config_variable_cols(elts, console_width, padding, cols=0):
"""Variable-width column fitting algorithm. """Variable-width column fitting algorithm.
This function determines the most columns that can fit in the This function determines the most columns that can fit in the
screen width. Unlike uniform fitting, where all columns take screen width. Unlike uniform fitting, where all columns take
the width of the longest element in the list, each column takes the width of the longest element in the list, each column takes
the width of its own longest element. This packs elements more the width of its own longest element. This packs elements more
efficiently on screen. efficiently on screen.
If cols is nonzero, force If cols is nonzero, force
""" """
if cols < 0: if cols < 0:
raise ValueError("cols must be non-negative.") raise ValueError("cols must be non-negative.")
@@ -62,8 +64,8 @@ def config_variable_cols(elts, console_width, padding, cols=0):
if conf.widths[col] < (length + p): if conf.widths[col] < (length + p):
conf.line_length += length + p - conf.widths[col] conf.line_length += length + p - conf.widths[col]
conf.widths[col] = length + p conf.widths[col] = length + p
conf.valid = conf.line_length < console_width conf.valid = (conf.line_length < console_width)
try: try:
config = next(conf for conf in reversed(configs) if conf.valid) config = next(conf for conf in reversed(configs) if conf.valid)
@@ -79,9 +81,9 @@ def config_variable_cols(elts, console_width, padding, cols=0):
def config_uniform_cols(elts, console_width, padding, cols=0): def config_uniform_cols(elts, console_width, padding, cols=0):
"""Uniform-width column fitting algorithm. """Uniform-width column fitting algorithm.
Determines the longest element in the list, and determines how Determines the longest element in the list, and determines how
many columns of that width will fit on screen. Returns a many columns of that width will fit on screen. Returns a
corresponding column config. corresponding column config.
""" """
if cols < 0: if cols < 0:
raise ValueError("cols must be non-negative.") raise ValueError("cols must be non-negative.")
@@ -120,18 +122,18 @@ def colify(elts, **options):
and fit less data on the screen and fit less data on the screen
""" """
# Get keyword arguments or set defaults # Get keyword arguments or set defaults
cols = options.pop("cols", 0) cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout) output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0) indent = options.pop("indent", 0)
padding = options.pop("padding", 2) padding = options.pop("padding", 2)
tty = options.pop("tty", None) tty = options.pop('tty', None)
method = options.pop("method", "variable") method = options.pop("method", "variable")
console_cols = options.pop("width", None) console_cols = options.pop("width", None)
if options: if options:
raise TypeError( raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(options.iterkeys()) "'%s' is an invalid keyword argument for this function."
) % next(options.iterkeys()))
# elts needs to be an array of strings so we can count the elements # elts needs to be an array of strings so we can count the elements
elts = [text_type(elt) for elt in elts] elts = [text_type(elt) for elt in elts]
@@ -139,10 +141,10 @@ def colify(elts, **options):
return (0, ()) return (0, ())
# environment size is of the form "<rows>x<cols>" # environment size is of the form "<rows>x<cols>"
env_size = os.environ.get("COLIFY_SIZE") env_size = os.environ.get('COLIFY_SIZE')
if env_size: if env_size:
try: try:
r, c = env_size.split("x") r, c = env_size.split('x')
console_rows, console_cols = int(r), int(c) console_rows, console_cols = int(r), int(c)
tty = True tty = True
except BaseException: except BaseException:
@@ -178,7 +180,7 @@ def colify(elts, **options):
elt = col * rows + row elt = col * rows + row
width = config.widths[col] + cextra(elts[elt]) width = config.widths[col] + cextra(elts[elt])
if col < cols - 1: if col < cols - 1:
fmt = "%%-%ds" % width fmt = '%%-%ds' % width
output.write(fmt % elts[elt]) output.write(fmt % elts[elt])
else: else:
# Don't pad the rightmost column (sapces can wrap on # Don't pad the rightmost column (sapces can wrap on
@@ -196,15 +198,15 @@ def colify(elts, **options):
def colify_table(table, **options): def colify_table(table, **options):
"""Version of ``colify()`` for data expressed in rows, (list of lists). """Version of ``colify()`` for data expressed in rows, (list of lists).
Same as regular colify but: Same as regular colify but:
1. This takes a list of lists, where each sub-list must be the 1. This takes a list of lists, where each sub-list must be the
same length, and each is interpreted as a row in a table. same length, and each is interpreted as a row in a table.
Regular colify displays a sequential list of values in columns. Regular colify displays a sequential list of values in columns.
2. Regular colify will always print with 1 column when the output 2. Regular colify will always print with 1 column when the output
is not a tty. This will always print with same dimensions of is not a tty. This will always print with same dimensions of
the table argument. the table argument.
""" """
if table is None: if table is None:
@@ -219,20 +221,20 @@ def transpose():
for row in table: for row in table:
yield row[i] yield row[i]
if "cols" in options: if 'cols' in options:
raise ValueError("Cannot override columsn in colify_table.") raise ValueError("Cannot override columsn in colify_table.")
options["cols"] = columns options['cols'] = columns
# don't reduce to 1 column for non-tty # don't reduce to 1 column for non-tty
options["tty"] = True options['tty'] = True
colify(transpose(), **options) colify(transpose(), **options)
def colified(elts, **options): def colified(elts, **options):
"""Invokes the ``colify()`` function but returns the result as a string """Invokes the ``colify()`` function but returns the result as a string
instead of writing it to an output string.""" instead of writing it to an output string."""
sio = StringIO() sio = StringIO()
options["output"] = sio options['output'] = sio
colify(elts, **options) colify(elts, **options)
return sio.getvalue() return sio.getvalue()

View File

@@ -76,33 +76,29 @@ def __init__(self, message):
# Text styles for ansi codes # Text styles for ansi codes
styles = {"*": "1", "_": "4", None: "0"} # bold # underline # plain styles = {'*': '1', # bold
'_': '4', # underline
None: '0'} # plain
# Dim and bright ansi colors # Dim and bright ansi colors
colors = { colors = {'k': 30, 'K': 90, # black
"k": 30, 'r': 31, 'R': 91, # red
"K": 90, # black 'g': 32, 'G': 92, # green
"r": 31, 'y': 33, 'Y': 93, # yellow
"R": 91, # red 'b': 34, 'B': 94, # blue
"g": 32, 'm': 35, 'M': 95, # magenta
"G": 92, # green 'c': 36, 'C': 96, # cyan
"y": 33, 'w': 37, 'W': 97} # white
"Y": 93, # yellow
"b": 34,
"B": 94, # blue
"m": 35,
"M": 95, # magenta
"c": 36,
"C": 96, # cyan
"w": 37,
"W": 97,
} # white
# Regex to be used for color formatting # Regex to be used for color formatting
color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)" color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)'
# Mapping from color arguments to values for tty.set_color # Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False} color_when_values = {
'always': True,
'auto': None,
'never': False
}
# Force color; None: Only color if stdout is a tty # Force color; None: Only color if stdout is a tty
# True: Always colorize output, False: Never colorize output # True: Always colorize output, False: Never colorize output
@@ -118,7 +114,7 @@ def _color_when_value(when):
if when in color_when_values: if when in color_when_values:
return color_when_values[when] return color_when_values[when]
elif when not in color_when_values.values(): elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when) raise ValueError('Invalid color setting: %s' % when)
return when return when
@@ -150,19 +146,16 @@ def color_when(value):
class match_to_ansi(object): class match_to_ansi(object):
def __init__(self, color=True, enclose=False):
def __init__(self, color=True):
self.color = _color_when_value(color) self.color = _color_when_value(color)
self.enclose = enclose
def escape(self, s): def escape(self, s):
"""Returns a TTY escape sequence for a color""" """Returns a TTY escape sequence for a color"""
if self.color: if self.color:
if self.enclose: return "\033[%sm" % s
return r"\[\033[%sm\]" % s
else:
return "\033[%sm" % s
else: else:
return "" return ''
def __call__(self, match): def __call__(self, match):
"""Convert a match object generated by ``color_re`` into an ansi """Convert a match object generated by ``color_re`` into an ansi
@@ -171,22 +164,22 @@ def __call__(self, match):
style, color, text = match.groups() style, color, text = match.groups()
m = match.group(0) m = match.group(0)
if m == "@@": if m == '@@':
return "@" return '@'
elif m == "@.": elif m == '@.':
return self.escape(0) return self.escape(0)
elif m == "@": elif m == '@':
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string)) raise ColorParseError("Incomplete color format: '%s' in %s"
% (m, match.string))
string = styles[style] string = styles[style]
if color: if color:
if color not in colors: if color not in colors:
raise ColorParseError( raise ColorParseError("Invalid color specifier: '%s' in '%s'"
"Invalid color specifier: '%s' in '%s'" % (color, match.string) % (color, match.string))
) string += ';' + str(colors[color])
string += ";" + str(colors[color])
colored_text = "" colored_text = ''
if text: if text:
colored_text = text + self.escape(0) colored_text = text + self.escape(0)
@@ -205,31 +198,29 @@ def colorize(string, **kwargs):
Keyword Arguments: Keyword Arguments:
color (bool): If False, output will be plain text without control color (bool): If False, output will be plain text without control
codes, for output to non-console devices. codes, for output to non-console devices.
enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width.
""" """
color = _color_when_value(kwargs.get("color", get_color_when())) color = _color_when_value(kwargs.get('color', get_color_when()))
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string) string = re.sub(color_re, match_to_ansi(color), string)
string = string.replace("}}", "}") string = string.replace('}}', '}')
return string return string
def clen(string): def clen(string):
"""Return the length of a string, excluding ansi color sequences.""" """Return the length of a string, excluding ansi color sequences."""
return len(re.sub(r"\033[^m]*m", "", string)) return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string): def cextra(string):
"""Length of extra color characters in a string""" """Length of extra color characters in a string"""
return len("".join(re.findall(r"\033[^m]*m", string))) return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=None, color=None): def cwrite(string, stream=None, color=None):
"""Replace all color expressions in string with ANSI control """Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is codes and write the result to the stream. If color is
False, this will write plain text with no color. If True, False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied, then it will always write colored output. If not supplied,
then it will be set based on stream.isatty(). then it will be set based on stream.isatty().
""" """
stream = sys.stdout if stream is None else stream stream = sys.stdout if stream is None else stream
if color is None: if color is None:
@@ -260,19 +251,20 @@ def cescape(string):
(str): the string with color codes escaped (str): the string with color codes escaped
""" """
string = six.text_type(string) string = six.text_type(string)
string = string.replace("@", "@@") string = string.replace('@', '@@')
string = string.replace("}", "}}") string = string.replace('}', '}}')
return string return string
class ColorStream(object): class ColorStream(object):
def __init__(self, stream, color=None): def __init__(self, stream, color=None):
self._stream = stream self._stream = stream
self._color = color self._color = color
def write(self, string, **kwargs): def write(self, string, **kwargs):
raw = kwargs.get("raw", False) raw = kwargs.get('raw', False)
raw_write = getattr(self._stream, "write") raw_write = getattr(self._stream, 'write')
color = self._color color = self._color
if self._color is None: if self._color is None:
@@ -283,6 +275,6 @@ def write(self, string, **kwargs):
raw_write(colorize(string, color=color)) raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs): def writelines(self, sequence, **kwargs):
raw = kwargs.get("raw", False) raw = kwargs.get('raw', False)
for string in sequence: for string in sequence:
self.write(string, self.color, raw=raw) self.write(string, self.color, raw=raw)

View File

@@ -31,22 +31,21 @@
termios = None # type: Optional[ModuleType] termios = None # type: Optional[ModuleType]
try: try:
import termios as term_mod import termios as term_mod
termios = term_mod termios = term_mod
except ImportError: except ImportError:
pass pass
# Use this to strip escape sequences # Use this to strip escape sequences
_escape = re.compile(r"\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07") _escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07')
# control characters for enabling/disabling echo # control characters for enabling/disabling echo
# #
# We use control characters to ensure that echo enable/disable are inline # We use control characters to ensure that echo enable/disable are inline
# with the other output. We always follow these with a newline to ensure # with the other output. We always follow these with a newline to ensure
# one per line the following newline is ignored in output. # one per line the following newline is ignored in output.
xon, xoff = "\x11\n", "\x13\n" xon, xoff = '\x11\n', '\x13\n'
control = re.compile("(\x11\n|\x13\n)") control = re.compile('(\x11\n|\x13\n)')
@contextmanager @contextmanager
@@ -60,13 +59,17 @@ def ignore_signal(signum):
def _is_background_tty(stream): def _is_background_tty(stream):
"""True if the stream is a tty and calling process is in the background.""" """True if the stream is a tty and calling process is in the background.
return stream.isatty() and os.getpgrp() != os.tcgetpgrp(stream.fileno()) """
return (
stream.isatty() and
os.getpgrp() != os.tcgetpgrp(stream.fileno())
)
def _strip(line): def _strip(line):
"""Strip color and control characters from a line.""" """Strip color and control characters from a line."""
return _escape.sub("", line) return _escape.sub('', line)
class keyboard_input(object): class keyboard_input(object):
@@ -144,7 +147,6 @@ class keyboard_input(object):
a TTY, ``keyboard_input`` has no effect. a TTY, ``keyboard_input`` has no effect.
""" """
def __init__(self, stream): def __init__(self, stream):
"""Create a context manager that will enable keyboard input on stream. """Create a context manager that will enable keyboard input on stream.
@@ -202,7 +204,7 @@ def check_fg_bg(self):
bg = self._is_background() bg = self._is_background()
# restore sanity if flags are amiss -- see diagram in class docs # restore sanity if flags are amiss -- see diagram in class docs
if not bg and any(flags): # fg, but input not enabled if not bg and any(flags): # fg, but input not enabled
self._enable_keyboard_input() self._enable_keyboard_input()
elif bg and not all(flags): # bg, but input enabled elif bg and not all(flags): # bg, but input enabled
self._restore_default_terminal_settings() self._restore_default_terminal_settings()
@@ -226,7 +228,8 @@ def __enter__(self):
# Install a signal handler to disable/enable keyboard input # Install a signal handler to disable/enable keyboard input
# when the process moves between foreground and background. # when the process moves between foreground and background.
self.old_handlers[signal.SIGTSTP] = signal.signal(signal.SIGTSTP, self._tstp_handler) self.old_handlers[signal.SIGTSTP] = signal.signal(
signal.SIGTSTP, self._tstp_handler)
# add an atexit handler to ensure the terminal is restored # add an atexit handler to ensure the terminal is restored
atexit.register(self._restore_default_terminal_settings) atexit.register(self._restore_default_terminal_settings)
@@ -255,7 +258,6 @@ class Unbuffered(object):
This is implemented by forcing a flush after each write. This is implemented by forcing a flush after each write.
""" """
def __init__(self, stream): def __init__(self, stream):
self.stream = stream self.stream = stream
@@ -300,7 +302,6 @@ class FileWrapper(object):
yet), or neither. When unwrapped, it returns an open file (or file-like) yet), or neither. When unwrapped, it returns an open file (or file-like)
object. object.
""" """
def __init__(self, file_like): def __init__(self, file_like):
# This records whether the file-like object returned by "unwrap" is # This records whether the file-like object returned by "unwrap" is
# purely in-memory. In that case a subprocess will need to explicitly # purely in-memory. In that case a subprocess will need to explicitly
@@ -324,9 +325,9 @@ def unwrap(self):
if self.open: if self.open:
if self.file_like: if self.file_like:
if sys.version_info < (3,): if sys.version_info < (3,):
self.file = open(self.file_like, "w") self.file = open(self.file_like, 'w')
else: else:
self.file = open(self.file_like, "w", encoding="utf-8") # novm self.file = open(self.file_like, 'w', encoding='utf-8') # novm
else: else:
self.file = StringIO() self.file = StringIO()
return self.file return self.file
@@ -342,9 +343,8 @@ def close(self):
class MultiProcessFd(object): class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an """Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess.""" the file descriptor is available in the subprocess."""
def __init__(self, fd): def __init__(self, fd):
self._connection = None self._connection = None
self._fd = None self._fd = None
@@ -434,7 +434,7 @@ def log_output(*args, **kwargs):
This method is actually a factory serving a per platform This method is actually a factory serving a per platform
(unix vs windows) log_output class (unix vs windows) log_output class
""" """
if sys.platform == "win32": if sys.platform == 'win32':
return winlog(*args, **kwargs) return winlog(*args, **kwargs)
else: else:
return nixlog(*args, **kwargs) return nixlog(*args, **kwargs)
@@ -454,9 +454,8 @@ class nixlog(object):
work within test frameworks like nose and pytest. work within test frameworks like nose and pytest.
""" """
def __init__( def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None env=None, filter_fn=None):
):
"""Create a new output log context manager. """Create a new output log context manager.
Args: Args:
@@ -525,7 +524,8 @@ def __enter__(self):
raise RuntimeError("Can't re-enter the same log_output!") raise RuntimeError("Can't re-enter the same log_output!")
if self.file_like is None: if self.file_like is None:
raise RuntimeError("file argument must be set by either __init__ or __call__") raise RuntimeError(
"file argument must be set by either __init__ or __call__")
# set up a stream for the daemon to write to # set up a stream for the daemon to write to
self.log_file = FileWrapper(self.file_like) self.log_file = FileWrapper(self.file_like)
@@ -555,7 +555,9 @@ def __enter__(self):
input_multiprocess_fd = None input_multiprocess_fd = None
try: try:
if sys.stdin.isatty(): if sys.stdin.isatty():
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno())) input_multiprocess_fd = MultiProcessFd(
os.dup(sys.stdin.fileno())
)
except BaseException: except BaseException:
# just don't forward input if this fails # just don't forward input if this fails
pass pass
@@ -564,14 +566,9 @@ def __enter__(self):
self.process = multiprocessing.Process( self.process = multiprocessing.Process(
target=_writer_daemon, target=_writer_daemon,
args=( args=(
input_multiprocess_fd, input_multiprocess_fd, read_multiprocess_fd, write_fd,
read_multiprocess_fd, self.echo, self.log_file, child_pipe, self.filter_fn
write_fd, )
self.echo,
self.log_file,
child_pipe,
self.filter_fn,
),
) )
self.process.daemon = True # must set before start() self.process.daemon = True # must set before start()
self.process.start() self.process.start()
@@ -612,7 +609,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it. # create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(write_fd, "w") pipe_fd_out = os.fdopen(write_fd, 'w')
sys.stdout = pipe_fd_out sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out sys.stderr = pipe_fd_out
@@ -677,7 +674,8 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def force_echo(self): def force_echo(self):
"""Context manager to force local echo, even if echo is off.""" """Context manager to force local echo, even if echo is off."""
if not self._active: if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!") raise RuntimeError(
"Can't call force_echo() outside log_output region!")
# This uses the xon/xoff to highlight regions to be echoed in the # This uses the xon/xoff to highlight regions to be echoed in the
# output. We us these control characters rather than, say, a # output. We us these control characters rather than, say, a
@@ -693,26 +691,25 @@ def force_echo(self):
class StreamWrapper: class StreamWrapper:
"""Wrapper class to handle redirection of io streams""" """ Wrapper class to handle redirection of io streams """
def __init__(self, sys_attr): def __init__(self, sys_attr):
self.sys_attr = sys_attr self.sys_attr = sys_attr
self.saved_stream = None self.saved_stream = None
if sys.platform.startswith("win32"): if sys.platform.startswith('win32'):
if sys.version_info < (3, 5): if sys.version_info < (3, 5):
libc = ctypes.CDLL(ctypes.util.find_library("c")) libc = ctypes.CDLL(ctypes.util.find_library('c'))
else: else:
if hasattr(sys, "gettotalrefcount"): # debug build if hasattr(sys, 'gettotalrefcount'): # debug build
libc = ctypes.CDLL("ucrtbased") libc = ctypes.CDLL('ucrtbased')
else: else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0") libc = ctypes.CDLL('api-ms-win-crt-stdio-l1-1-0')
kernel32 = ctypes.WinDLL("kernel32") kernel32 = ctypes.WinDLL('kernel32')
# https://docs.microsoft.com/en-us/windows/console/getstdhandle # https://docs.microsoft.com/en-us/windows/console/getstdhandle
if self.sys_attr == "stdout": if self.sys_attr == 'stdout':
STD_HANDLE = -11 STD_HANDLE = -11
elif self.sys_attr == "stderr": elif self.sys_attr == 'stderr':
STD_HANDLE = -12 STD_HANDLE = -12
else: else:
raise KeyError(self.sys_attr) raise KeyError(self.sys_attr)
@@ -731,7 +728,7 @@ def __init__(self, sys_attr):
def redirect_stream(self, to_fd): def redirect_stream(self, to_fd):
"""Redirect stdout to the given file descriptor.""" """Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stream # Flush the C-level buffer stream
if sys.platform.startswith("win32"): if sys.platform.startswith('win32'):
self.libc.fflush(None) self.libc.fflush(None)
else: else:
self.libc.fflush(self.c_stream) self.libc.fflush(self.c_stream)
@@ -742,13 +739,13 @@ def redirect_stream(self, to_fd):
# Make orig_stream_fd point to the same file as to_fd # Make orig_stream_fd point to the same file as to_fd
os.dup2(to_fd, self.orig_stream_fd) os.dup2(to_fd, self.orig_stream_fd)
# Set sys_stream to a new stream that points to the redirected fd # Set sys_stream to a new stream that points to the redirected fd
new_buffer = open(self.orig_stream_fd, "wb") new_buffer = open(self.orig_stream_fd, 'wb')
new_stream = io.TextIOWrapper(new_buffer) new_stream = io.TextIOWrapper(new_buffer)
setattr(sys, self.sys_attr, new_stream) setattr(sys, self.sys_attr, new_stream)
self.sys_stream = getattr(sys, self.sys_attr) self.sys_stream = getattr(sys, self.sys_attr)
def flush(self): def flush(self):
if sys.platform.startswith("win32"): if sys.platform.startswith('win32'):
self.libc.fflush(None) self.libc.fflush(None)
else: else:
self.libc.fflush(self.c_stream) self.libc.fflush(self.c_stream)
@@ -771,16 +768,14 @@ class winlog(object):
Does not support the use of 'v' toggling as nixlog does. Does not support the use of 'v' toggling as nixlog does.
""" """
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
def __init__( env=None, filter_fn=None):
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
):
self.env = env self.env = env
self.debug = debug self.debug = debug
self.echo = echo self.echo = echo
self.logfile = file_like self.logfile = file_like
self.stdout = StreamWrapper("stdout") self.stdout = StreamWrapper('stdout')
self.stderr = StreamWrapper("stderr") self.stderr = StreamWrapper('stderr')
self._active = False self._active = False
self._ioflag = False self._ioflag = False
self.old_stdout = sys.stdout self.old_stdout = sys.stdout
@@ -791,7 +786,8 @@ def __enter__(self):
raise RuntimeError("Can't re-enter the same log_output!") raise RuntimeError("Can't re-enter the same log_output!")
if self.logfile is None: if self.logfile is None:
raise RuntimeError("file argument must be set by __init__ ") raise RuntimeError(
"file argument must be set by __init__ ")
# Open both write and reading on logfile # Open both write and reading on logfile
if type(self.logfile) == StringIO: if type(self.logfile) == StringIO:
@@ -800,8 +796,8 @@ def __enter__(self):
sys.stdout = self.logfile sys.stdout = self.logfile
sys.stderr = self.logfile sys.stderr = self.logfile
else: else:
self.writer = open(self.logfile, mode="wb+") self.writer = open(self.logfile, mode='wb+')
self.reader = open(self.logfile, mode="rb+") self.reader = open(self.logfile, mode='rb+')
# Dup stdout so we can still write to it after redirection # Dup stdout so we can still write to it after redirection
self.echo_writer = open(os.dup(sys.stdout.fileno()), "w") self.echo_writer = open(os.dup(sys.stdout.fileno()), "w")
@@ -815,7 +811,7 @@ def background_reader(reader, echo_writer, _kill):
# if echo: write line to user # if echo: write line to user
try: try:
while True: while True:
is_killed = _kill.wait(0.1) is_killed = _kill.wait(.1)
# Flush buffered build output to file # Flush buffered build output to file
# stdout/err fds refer to log file # stdout/err fds refer to log file
self.stderr.flush() self.stderr.flush()
@@ -823,7 +819,7 @@ def background_reader(reader, echo_writer, _kill):
line = reader.readline() line = reader.readline()
if self.echo and line: if self.echo and line:
echo_writer.write("{0}".format(line.decode())) echo_writer.write('{0}'.format(line.decode()))
echo_writer.flush() echo_writer.flush()
if is_killed: if is_killed:
@@ -833,9 +829,8 @@ def background_reader(reader, echo_writer, _kill):
self._active = True self._active = True
with replace_environment(self.env): with replace_environment(self.env):
self._thread = Thread( self._thread = Thread(target=background_reader,
target=background_reader, args=(self.reader, self.echo_writer, self._kill) args=(self.reader, self.echo_writer, self._kill))
)
self._thread.start() self._thread.start()
return self return self
@@ -859,19 +854,13 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def force_echo(self): def force_echo(self):
"""Context manager to force local echo, even if echo is off.""" """Context manager to force local echo, even if echo is off."""
if not self._active: if not self._active:
raise RuntimeError("Can't call force_echo() outside log_output region!") raise RuntimeError(
"Can't call force_echo() outside log_output region!")
yield yield
def _writer_daemon( def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
stdin_multiprocess_fd, log_file_wrapper, control_pipe, filter_fn):
read_multiprocess_fd,
write_fd,
echo,
log_file_wrapper,
control_pipe,
filter_fn,
):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``. """Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both The daemon receives output from the parent process and writes it both
@@ -924,16 +913,16 @@ def _writer_daemon(
# write_fd to terminate the reading loop, so we close the file descriptor # write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS # here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows # for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != "darwin": if sys.version_info < (3, 8) or sys.platform != 'darwin':
os.close(write_fd) os.close(write_fd)
# Use line buffering (3rd param = 1) since Python 3 has a bug # Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O. # that prevents unbuffered text I/O.
if sys.version_info < (3,): if sys.version_info < (3,):
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1) in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
else: else:
# Python 3.x before 3.7 does not open with UTF-8 encoding by default # Python 3.x before 3.7 does not open with UTF-8 encoding by default
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8") in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')
if stdin_multiprocess_fd: if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd) stdin = os.fdopen(stdin_multiprocess_fd.fd)
@@ -942,7 +931,7 @@ def _writer_daemon(
# list of streams to select from # list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe] istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap() log_file = log_file_wrapper.unwrap()
@@ -965,7 +954,7 @@ def _writer_daemon(
# check and the read, so we ignore SIGTTIN here. # check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN): with ignore_signal(signal.SIGTTIN):
try: try:
if stdin.read(1) == "v": if stdin.read(1) == 'v':
echo = not echo echo = not echo
except IOError as e: except IOError as e:
# If SIGTTIN is ignored, the system gives EIO # If SIGTTIN is ignored, the system gives EIO
@@ -983,14 +972,14 @@ def _writer_daemon(
line = _retry(in_pipe.readline)() line = _retry(in_pipe.readline)()
except UnicodeDecodeError: except UnicodeDecodeError:
# installs like --test=root gpgme produce non-UTF8 logs # installs like --test=root gpgme produce non-UTF8 logs
line = "<line lost: output was not encoded as UTF-8>\n" line = '<line lost: output was not encoded as UTF-8>\n'
if not line: if not line:
return return
line_count += 1 line_count += 1
# find control characters and strip them. # find control characters and strip them.
clean_line, num_controls = control.subn("", line) clean_line, num_controls = control.subn('', line)
# Echo to stdout if requested or forced. # Echo to stdout if requested or forced.
if echo or force_echo: if echo or force_echo:
@@ -1054,7 +1043,6 @@ def _retry(function):
relevant for this file. relevant for this file.
""" """
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
while True: while True:
try: try:
@@ -1067,7 +1055,6 @@ def wrapped(*args, **kwargs):
if e.args[0] == errno.EINTR: if e.args[0] == errno.EINTR:
continue continue
raise raise
return wrapped return wrapped

View File

@@ -30,7 +30,6 @@
termios = None termios = None
try: try:
import termios as term_mod import termios as term_mod
termios = term_mod termios = term_mod
except ImportError: except ImportError:
pass pass
@@ -43,8 +42,8 @@ class ProcessController(object):
minion) similar to the way a shell would, by sending signals and I/O. minion) similar to the way a shell would, by sending signals and I/O.
""" """
def __init__(self, pid, controller_fd,
def __init__(self, pid, controller_fd, timeout=1, sleep_time=1e-1, debug=False): timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid`` """Create a controller to manipulate the process with id ``pid``
Args: Args:
@@ -85,19 +84,18 @@ def get_canon_echo_attrs(self):
def horizontal_line(self, name): def horizontal_line(self, name):
"""Labled horizontal line for debugging.""" """Labled horizontal line for debugging."""
if self.debug: if self.debug:
sys.stderr.write("------------------------------------------- %s\n" % name) sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self): def status(self):
"""Print debug message with status info for the minion.""" """Print debug message with status info for the minion."""
if self.debug: if self.debug:
canon, echo = self.get_canon_echo_attrs() canon, echo = self.get_canon_echo_attrs()
sys.stderr.write( sys.stderr.write("canon: %s, echo: %s\n" % (
"canon: %s, echo: %s\n" "on" if canon else "off",
% ( "on" if echo else "off",
"on" if canon else "off", ))
"on" if echo else "off",
)
)
sys.stderr.write("input: %s\n" % self.input_on()) sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background()) sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n") sys.stderr.write("\n")
@@ -139,7 +137,7 @@ def write(self, byte_string):
def wait(self, condition): def wait(self, condition):
start = time.time() start = time.time()
while ((time.time() - start) < self.timeout) and not condition(): while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2) time.sleep(1e-2)
assert condition() assert condition()
@@ -221,15 +219,14 @@ def minion_function(**kwargs)
|_________________________________________________________| |_________________________________________________________|
""" """
def __init__(self, controller_function, minion_function): def __init__(self, controller_function, minion_function):
self.proc = None self.proc = None
self.controller_function = controller_function self.controller_function = controller_function
self.minion_function = minion_function self.minion_function = minion_function
# these can be optionally set to change defaults # these can be optionally set to change defaults
self.controller_timeout = 3 self.controller_timeout = 1
self.sleep_time = 0.1 self.sleep_time = 0
def start(self, **kwargs): def start(self, **kwargs):
"""Start the controller and minion processes. """Start the controller and minion processes.
@@ -245,12 +242,8 @@ def start(self, **kwargs):
""" """
self.proc = multiprocessing.Process( self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function, target=PseudoShell._set_up_and_run_controller_function,
args=( args=(self.controller_function, self.minion_function,
self.controller_function, self.controller_timeout, self.sleep_time),
self.minion_function,
self.controller_timeout,
self.sleep_time,
),
kwargs=kwargs, kwargs=kwargs,
) )
self.proc.start() self.proc.start()
@@ -262,8 +255,7 @@ def join(self):
@staticmethod @staticmethod
def _set_up_and_run_minion_function( def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
):
"""Minion process wrapper for PseudoShell. """Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls Handles the mechanics of setting up a PTY, then calls
@@ -281,7 +273,8 @@ def _set_up_and_run_minion_function(
os.close(stdin_fd) os.close(stdin_fd)
if kwargs.get("debug"): if kwargs.get("debug"):
sys.stderr.write("minion: stdin.isatty(): %s\n" % sys.stdin.isatty()) sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running # tell the parent that we're really running
if kwargs.get("debug"): if kwargs.get("debug"):
@@ -295,15 +288,15 @@ def _set_up_and_run_minion_function(
@staticmethod @staticmethod
def _set_up_and_run_controller_function( def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout, sleep_time, **kwargs controller_function, minion_function, controller_timeout,
): sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function. """Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls Handles the mechanics of setting up a PTY, then calls
``controller_function``. ``controller_function``.
""" """
os.setsid() # new session; this process is the controller os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty() controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd) pty_name = os.ttyname(minion_fd)
@@ -312,10 +305,11 @@ def _set_up_and_run_controller_function(
pty_fd = os.open(pty_name, os.O_RDWR) pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd) os.close(pty_fd)
ready = multiprocessing.Value("i", False) ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process( minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function, target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(), ready, minion_function), args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
kwargs=kwargs, kwargs=kwargs,
) )
minion_process.start() minion_process.start()
@@ -335,7 +329,8 @@ def _set_up_and_run_controller_function(
minion_pgid = os.getpgid(minion_process.pid) minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid) sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid) sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write("minion sid: %d\n" % os.getsid(minion_process.pid)) sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n") sys.stderr.write("\n")
sys.stderr.flush() sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell # set up controller to ignore SIGTSTP, like a shell
@@ -344,8 +339,7 @@ def _set_up_and_run_controller_function(
# call the controller function once the minion is ready # call the controller function once the minion is ready
try: try:
controller = ProcessController( controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug") minion_process.pid, controller_fd, debug=kwargs.get("debug"))
)
controller.timeout = controller_timeout controller.timeout = controller_timeout
controller.sleep_time = sleep_time controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs) error = controller_function(minion_process, controller, **kwargs)

Some files were not shown because too many files have changed in this diff Show More