Compare commits

..

3 Commits

Author SHA1 Message Date
Todd Gamblin
3ccc744ac8 WIP 2022-09-04 15:39:44 -07:00
Todd Gamblin
e040940833 WIP 2022-09-02 14:01:06 -07:00
Todd Gamblin
0fc85c32c0 bugfix: spack find should show specs that differ only by package hash 2022-08-26 17:08:58 -07:00
2661 changed files with 31669 additions and 53012 deletions

View File

@@ -29,7 +29,6 @@ max-line-length = 99
#
per-file-ignores =
var/spack/repos/*/package.py:F403,F405,F821
*-ci-package.py:F403,F405,F821
# exclude things we usually do not want linting for.
# These still get linted when passed explicitly, as when spack flake8 passes

View File

@@ -1,62 +0,0 @@
name: "\U0001F4A5 Tests error"
description: Some package in Spack had stand-alone tests that didn't pass
title: "Testing issue: "
labels: [test-error]
body:
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce the failure(s) or link(s) to test output(s)
description: |
Fill in the test output from the exact spec that is having stand-alone test failures. Links to test outputs (e.g., CDash) can also be provided.
value: |
```console
$ spack spec -I <spec>
...
```
- type: textarea
id: error
attributes:
label: Error message
description: |
Please post the error message from spack inside the `<details>` tag below:
value: |
<details><summary>Error message</summary><pre>
...
</pre></details>
validations:
required: true
- type: textarea
id: information
attributes:
label: Information on your system or the test runner
description: Please include the output of `spack debug report` for your system.
validations:
required: true
- type: markdown
attributes:
value: |
If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well.
- type: textarea
id: additional_information
attributes:
label: Additional information
description: |
Please upload test logs or any additional information about the problem.
- type: markdown
attributes:
value: |
Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and **@mention** them here if they exist.
- type: checkboxes
id: checks
attributes:
label: General information
options:
- label: I have reported the version of Spack/Python/Platform/Runner
required: true
- label: I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers
required: true
- label: I have uploaded any available logs
required: true
- label: I have searched the issues of this repo and believe this is not a duplicate
required: true

View File

@@ -1,44 +0,0 @@
name: audit
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
python_version:
required: true
type: string
concurrency:
group: audit-${{inputs.python_version}}-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]
- name: Package audits (with coverage)
if: ${{ inputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,7 +0,0 @@
#!/bin/bash
set -ex
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store
exit 0

View File

@@ -3,19 +3,33 @@ name: Bootstrapping
on:
# This Workflow can be triggered manually
workflow_dispatch:
workflow_call:
pull_request:
branches:
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the
# built-in repository or documentation
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
- '!var/spack/repos/builtin/packages/clingo/**'
- '!var/spack/repos/builtin/packages/python/**'
- '!var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/docs/**'
schedule:
# nightly at 2:16 AM
- cron: '16 2 * * *'
concurrency:
group: bootstrap-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: "fedora:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -24,9 +38,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
@@ -37,13 +49,13 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -51,6 +63,7 @@ jobs:
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -62,9 +75,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
@@ -75,13 +86,13 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -89,6 +100,7 @@ jobs:
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -99,9 +111,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
@@ -112,6 +122,7 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
@@ -123,6 +134,7 @@ jobs:
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -133,38 +145,36 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup repo
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
macos-clingo-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack external find --not-buildable cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -173,70 +183,53 @@ jobs:
runs-on: ${{ matrix.macos-version }}
strategy:
matrix:
python-version: ['3.6', '3.7', '3.8', '3.9', '3.10']
macos-version: ['macos-11', 'macos-12']
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
source share/spack/setup-env.sh
spack bootstrap untrust spack-install
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-binaries:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
fetch-depth: 0
python-version: ${{ matrix.python-version }}
- name: Setup repo
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.6' '3.7' '3.8' '3.9' '3.10' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
if (($not_found)) ; then
echo Required python version $ver not found in runner!
exit 1
fi
done
source share/spack/setup-env.sh
spack bootstrap untrust spack-install
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -247,9 +240,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
@@ -260,18 +251,20 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable spack-install
spack bootstrap untrust spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -283,9 +276,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
with:
fetch-depth: 0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
@@ -296,19 +287,20 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-binaries:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -316,16 +308,17 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack bootstrap disable spack-install
spack bootstrap untrust spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -333,13 +326,12 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.3
spack bootstrap untrust github-actions-v0.2
spack -d gpg list
tree ~/.spack/bootstrap/store/

View File

@@ -13,14 +13,14 @@ on:
paths:
- '.github/workflows/build-containers.yml'
- 'share/spack/docker/*'
- 'share/spack/templates/container/*'
- 'share/templates/container/*'
- 'lib/spack/spack/container/*'
# Let's also build & tag Spack containers on releases.
release:
types: [published]
concurrency:
group: build_containers-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
@@ -50,7 +50,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -80,19 +80,19 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # @v1
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # @v1
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -100,13 +100,13 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # @v2
uses: docker/build-push-action@c84f38281176d4c9cdb1626ffafcd6b3911b5d94 # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -1,86 +0,0 @@
name: ci
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: ci-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
with:
with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks:
needs: [ prechecks ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
bootstrap: ${{ steps.filter.outputs.bootstrap }}
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
# Don't run if we only modified packages in the
# built-in repository or documentation
filters: |
bootstrap:
- 'var/spack/repos/builtin/packages/clingo-bootstrap/**'
- 'var/spack/repos/builtin/packages/clingo/**'
- 'var/spack/repos/builtin/packages/python/**'
- 'var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/**'
- 'share/spack/**'
- '.github/workflows/bootstrap.yml'
- '.github/workflows/ci.yaml'
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
bootstrap:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml
unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
all:
needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"

View File

@@ -6,10 +6,6 @@ git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git config --global core.longpaths true
# See https://github.com/git/git/security/advisories/GHSA-3wp6-j8xr-qw85 (CVE-2022-39253)
# This is needed to let some fixture in our unit-test suite run
git config --global protocol.file.allow always
if ($(git branch --show-current) -ne "develop")
{
git branch develop origin/develop

View File

@@ -2,10 +2,6 @@
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
# See https://github.com/git/git/security/advisories/GHSA-3wp6-j8xr-qw85 (CVE-2022-39253)
# This is needed to let some fixture in our unit-test suite run
git config --global protocol.file.allow always
# create a local pr base branch
if [[ -n $GITHUB_BASE_REF ]]; then
git fetch origin "${GITHUB_BASE_REF}:${GITHUB_BASE_REF}"

View File

@@ -1,56 +1,118 @@
name: unit tests
name: linux tests
on:
workflow_dispatch:
workflow_call:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: unit_tests-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
# Run unit tests with different configurations on linux
ubuntu:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest]
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
concretizer: ['clingo']
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
include:
- python-version: '3.11'
os: ubuntu-latest
concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
- python-version: '3.6'
os: ubuntu-20.04
concretizer: clingo
on_develop: ${{ github.ref == 'refs/heads/develop' }}
exclude:
- python-version: '3.7'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.8'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.9'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.10'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: '3.10'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --violations --backport typing -t=2.7- -t=3.6- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools types-six
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@b2feaf19c27470162a626bd6fa8438ae5b263721
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
filters: |
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
- id: coverage
# Run the subsequent jobs with coverage if core has been modified,
# regardless of whether this is a pull request or a push to a branch
run: |
echo Core changes: ${{ steps.filter.outputs.core }}
echo Event name: ${{ github.event_name }}
if [ "${{ steps.filter.outputs.core }}" == "true" ]
then
echo "::set-output name=with_coverage::true"
else
echo "::set-output name=with_coverage::false"
fi
# Run unit tests with different configurations on linux
unittests:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
concretizer: ['clingo']
include:
- python-version: 2.7
concretizer: original
- python-version: 3.9
concretizer: original
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -59,11 +121,19 @@ jobs:
# Needed for unit tests
sudo apt-get -y install \
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
cmake bison libbison-dev kcov
patchelf cmake bison libbison-dev kcov
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov[toml] pytest-xdist pytest-cov
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
pip install --upgrade pip six setuptools pytest codecov "coverage[toml]<=6.2"
# ensure style checks are not skipped in unit tests for python >= 3.6
# note that true/false (i.e., 1/0) are opposite in conditions in python and bash
if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click==8.0.4" "black<=21.12b0"
fi
- name: Pin pathlib for Python 2.7
if: ${{ matrix.python-version == 2.7 }}
run: |
pip install -U pathlib2==2.3.6
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
@@ -75,31 +145,41 @@ jobs:
SPACK_PYTHON: python
run: |
. share/spack/setup-env.sh
spack bootstrap disable spack-install
spack bootstrap now
spack bootstrap untrust spack-install
spack -v solve zlib
- name: Run unit tests
- name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
SPACK_PYTHON: python
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
SPACK_TEST_PARALLEL: 2
COVERAGE: true
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
coverage combine
coverage xml
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
SPACK_PYTHON: python
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
shell:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: '3.11'
python-version: '3.10'
- name: Install System packages
run: |
sudo apt-get -y update
@@ -107,25 +187,33 @@ jobs:
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml] pytest-xdist
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run shell tests
- name: Run shell tests (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
share/spack/qa/run-shell-tests
- name: Run shell tests (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: shelltests,linux
# Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack
rhel8-platform-python:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
container: registry.access.redhat.com/ubi8/ubi
steps:
- name: Install dependencies
@@ -133,7 +221,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -145,71 +233,127 @@ jobs:
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack -d bootstrap now --dev
spack -d solve zlib
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
# Test for the clingo based solver (using clingo-cffi)
clingo-cffi:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: '3.11'
python-version: '3.10'
- name: Install System packages
run: |
sudo apt-get -y update
sudo apt-get -y install coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build kcov
# Needed for unit tests
sudo apt-get -y install \
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
patchelf kcov
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml] pytest-cov clingo pytest-xdist
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2 clingo
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # @v2.1.0
coverage combine
coverage xml
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
macos:
build:
needs: [ validate, style, changes ]
runs-on: macos-latest
strategy:
matrix:
python-version: ["3.10"]
python-version: [3.8]
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade pytest codecov coverage[toml] pytest-xdist pytest-cov
pip install --upgrade pytest codecov coverage[toml]==6.2
- name: Setup Homebrew packages
run: |
brew install dash fish gcc gnupg2 kcov
- name: Run unit tests
env:
SPACK_TEST_SOLVER: clingo
SPACK_TEST_PARALLEL: 4
run: |
git --version
. .github/workflows/setup_git.sh
. share/spack/setup-env.sh
$(which spack) bootstrap disable spack-install
$(which spack) bootstrap untrust spack-install
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
then
coverage run $(which spack) unit-test -x
coverage combine
coverage xml
# Delete the symlink going from ./lib/spack/docs/_spack_root back to
# the initial directory, since it causes ELOOP errors with codecov/actions@2
rm lib/spack/docs/_spack_root
else
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
files: ./coverage.xml
flags: unittests,macos
# Run audits on all the packages in the built-in repository
package-audits:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 # @v2
with:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools pytest codecov coverage[toml]==6.2
- name: Package audits (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -1,60 +0,0 @@
name: style
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
concurrency:
group: style-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # @v2
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python packages
run: |
python3 -m pip install --upgrade pip six setuptools types-six black mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11'

View File

@@ -1,10 +1,17 @@
name: windows
name: windows tests
on:
workflow_call:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
concurrency:
group: windows-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
defaults:
@@ -12,64 +19,91 @@ defaults:
shell:
powershell Invoke-Expression -Command ".\share\spack\qa\windows_test_setup.ps1"; {0}
jobs:
unit-tests:
validate:
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python Packages
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.6-' -v spack/lib/spack/spack/ spack/lib/spack/llnl/ spack/bin/
- name: vermin (Repositories)
run: vermin --backport argparse --backport typing -t='2.7-' -t='3.6-' -v spack/var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov pytest-cov clingo
python -m pip install --upgrade pip six setuptools flake8 "isort>=4.3.5" "mypy>=0.800" "click==8.0.4" "black<=21.12b0" pywin32 types-python-dateutil
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Run style tests
run: |
spack style
- name: Verify license headers
run: |
python spack\bin\spack license verify
unittest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack
dir
spack unit-test -x --verbose --cov --cov-config=pyproject.toml --ignore=lib/spack/spack/test/cmd
coverage combine -a
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
unit-tests-cmd:
spack unit-test --verbose --ignore=lib/spack/spack/test/cmd
unittest-cmd:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage pytest-cov clingo
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
- name: Command Unit Test
run: |
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
cd spack
spack unit-test -x --verbose --cov --cov-config=pyproject.toml lib/spack/spack/test/cmd
coverage combine -a
coverage xml
- uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70
with:
flags: unittests,windows
build-abseil:
spack unit-test lib/spack/spack/test/cmd --verbose
buildtest:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
@@ -81,8 +115,9 @@ jobs:
echo F|xcopy .\spack\share\spack\qa\configuration\windows_config.yaml $env:USERPROFILE\.spack\windows\config.yaml
spack external find cmake
spack external find ninja
spack -d install abseil-cpp
make-installer:
spack install abseil-cpp
generate-installer-test:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- name: Disable Windows Symlinks
@@ -90,15 +125,15 @@ jobs:
git config --global core.symlinks false
shell:
powershell
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Add Light and Candle to Path
run: |
$env:WIX >> $GITHUB_PATH
@@ -109,27 +144,27 @@ jobs:
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env:
ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi
execute-installer:
needs: make-installer
needs: generate-installer-test
runs-on: windows-latest
defaults:
run:
shell: pwsh
steps:
- uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984
- uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six pywin32 setuptools
python -m pip install --upgrade pip six pywin32 setuptools codecov coverage
- name: Setup installer directory
run: |
mkdir -p spack_installer

View File

@@ -1,284 +1,16 @@
# v0.19.0 (2022-11-11)
`v0.19.0` is a major feature release.
## Major features in this release
1. **Package requirements**
Spack's traditional [package preferences](
https://spack.readthedocs.io/en/latest/build_settings.html#package-preferences)
are soft, but we've added hard requriements to `packages.yaml` and `spack.yaml`
(#32528, #32369). Package requirements use the same syntax as specs:
```yaml
packages:
libfabric:
require: "@1.13.2"
mpich:
require:
- one_of: ["+cuda", "+rocm"]
```
More details in [the docs](
https://spack.readthedocs.io/en/latest/build_settings.html#package-requirements).
2. **Environment UI Improvements**
* Fewer surprising modifications to `spack.yaml` (#33711):
* `spack install` in an environment will no longer add to the `specs:` list; you'll
need to either use `spack add <spec>` or `spack install --add <spec>`.
* Similarly, `spack uninstall` will not remove from your environment's `specs:`
list; you'll need to use `spack remove` or `spack uninstall --remove`.
This will make it easier to manage an environment, as there is clear separation
between the stack to be installed (`spack.yaml`/`spack.lock`) and which parts of
it should be installed (`spack install` / `spack uninstall`).
* `concretizer:unify:true` is now the default mode for new environments (#31787)
We see more users creating `unify:true` environments now. Users who need
`unify:false` can add it to their environment to get the old behavior. This will
concretize every spec in the environment independently.
* Include environment configuration from URLs (#29026, [docs](
https://spack.readthedocs.io/en/latest/environments.html#included-configurations))
You can now include configuration in your environment directly from a URL:
```yaml
spack:
include:
- https://github.com/path/to/raw/config/compilers.yaml
```
4. **Multiple Build Systems**
An increasing number of packages in the ecosystem need the ability to support
multiple build systems (#30738, [docs](
https://spack.readthedocs.io/en/latest/packaging_guide.html#multiple-build-systems)),
either across versions, across platforms, or within the same version of the software.
This has been hard to support through multiple inheritance, as methods from different
build system superclasses would conflict. `package.py` files can now define separate
builder classes with installation logic for different build systems, e.g.:
```python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system(
conditional("cmake", when="@0.64:"),
conditional("autotools", when="@:0.63"),
default="cmake",
)
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
pass
class Autotoolsbuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
pass
```
5. **Compiler and variant propagation**
Currently, compiler flags and variants are inconsistent: compiler flags set for a
package are inherited by its dependencies, while variants are not. We should have
these be consistent by allowing for inheritance to be enabled or disabled for both
variants and compiler flags.
Example syntax:
- `package ++variant`:
enabled variant that will be propagated to dependencies
- `package +variant`:
enabled variant that will NOT be propagated to dependencies
- `package ~~variant`:
disabled variant that will be propagated to dependencies
- `package ~variant`:
disabled variant that will NOT be propagated to dependencies
- `package cflags==-g`:
`cflags` will be propagated to dependencies
- `package cflags=-g`:
`cflags` will NOT be propagated to dependencies
Syntax for non-boolan variants is similar to compiler flags. More in the docs for
[variants](
https://spack.readthedocs.io/en/latest/basic_usage.html#variants) and [compiler flags](
https://spack.readthedocs.io/en/latest/basic_usage.html#compiler-flags).
6. **Enhancements to git version specifiers**
* `v0.18.0` added the ability to use git commits as versions. You can now use the
`git.` prefix to specify git tags or branches as versions. All of these are valid git
versions in `v0.19` (#31200):
```console
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # raw commit
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234 # commit with git prefix
foo@git.develop # the develop branch
foo@git.0.19 # use the 0.19 tag
```
* `v0.19` also gives you more control over how Spack interprets git versions, in case
Spack cannot detect the version from the git repository. You can suffix a git
version with `=<version>` to force Spack to concretize it as a particular version
(#30998, #31914, #32257):
```console
# use mybranch, but treat it as version 3.2 for version comparison
foo@git.mybranch=3.2
# use the given commit, but treat it as develop for version comparison
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop
```
More in [the docs](
https://spack.readthedocs.io/en/latest/basic_usage.html#version-specifier)
7. **Changes to Cray EX Support**
Cray machines have historically had their own "platform" within Spack, because we
needed to go through the module system to leverage compilers and MPI installations on
these machines. The Cray EX programming environment now provides standalone `craycc`
executables and proper `mpicc` wrappers, so Spack can treat EX machines like Linux
with extra packages (#29392).
We expect this to greatly reduce bugs, as external packages and compilers can now be
used by prefix instead of through modules. We will also no longer be subject to
reproducibility issues when modules change from Cray PE release to release and from
site to site. This also simplifies dealing with the underlying Linux OS on cray
systems, as Spack will properly model the machine's OS as either SuSE or RHEL.
8. **Improvements to tests and testing in CI**
* `spack ci generate --tests` will generate a `.gitlab-ci.yml` file that not only does
builds but also runs tests for built packages (#27877). Public GitHub pipelines now
also run tests in CI.
* `spack test run --explicit` will only run tests for packages that are explicitly
installed, instead of all packages.
9. **Experimental binding link model**
You can add a new option to `config.yaml` to make Spack embed absolute paths to
needed shared libraries in ELF executables and shared libraries on Linux (#31948, [docs](
https://spack.readthedocs.io/en/latest/config_yaml.html#shared-linking-bind)):
```yaml
config:
shared_linking:
type: rpath
bind: true
```
This can improve launch time at scale for parallel applications, and it can make
installations less susceptible to environment variables like `LD_LIBRARY_PATH`, even
especially when dealing with external libraries that use `RUNPATH`. You can think of
this as a faster, even higher-precedence version of `RPATH`.
## Other new features of note
* `spack spec` prints dependencies more legibly. Dependencies in the output now appear
at the *earliest* level of indentation possible (#33406)
* You can override `package.py` attributes like `url`, directly in `packages.yaml`
(#33275, [docs](
https://spack.readthedocs.io/en/latest/build_settings.html#assigning-package-attributes))
* There are a number of new architecture-related format strings you can use in Spack
configuration files to specify paths (#29810, [docs](
https://spack.readthedocs.io/en/latest/configuration.html#config-file-variables))
* Spack now supports bootstrapping Clingo on Windows (#33400)
* There is now support for an `RPATH`-like library model on Windows (#31930)
## Performance Improvements
* Major performance improvements for installation from binary caches (#27610, #33628,
#33636, #33608, #33590, #33496)
* Test suite can now be parallelized using `xdist` (used in GitHub Actions) (#32361)
* Reduce lock contention for parallel builds in environments (#31643)
## New binary caches and stacks
* We now build nearly all of E4S with `oneapi` in our buildcache (#31781, #31804,
#31804, #31803, #31840, #31991, #32117, #32107, #32239)
* Added 3 new machine learning-centric stacks to binary cache: `x86_64_v3`, CUDA, ROCm
(#31592, #33463)
## Removals and Deprecations
* Support for Python 3.5 is dropped (#31908). Only Python 2.7 and 3.6+ are officially
supported.
* This is the last Spack release that will support Python 2 (#32615). Spack `v0.19`
will emit a deprecation warning if you run it with Python 2, and Python 2 support will
soon be removed from the `develop` branch.
* `LD_LIBRARY_PATH` is no longer set by default by `spack load` or module loads.
Setting `LD_LIBRARY_PATH` in Spack environments/modules can cause binaries from
outside of Spack to crash, and Spack's own builds use `RPATH` and do not need
`LD_LIBRARY_PATH` set in order to run. If you still want the old behavior, you
can run these commands to configure Spack to set `LD_LIBRARY_PATH`:
```console
spack config add modules:prefix_inspections:lib64:[LD_LIBRARY_PATH]
spack config add modules:prefix_inspections:lib:[LD_LIBRARY_PATH]
```
* The `spack:concretization:[together|separately]` has been removed after being
deprecated in `v0.18`. Use `concretizer:unify:[true|false]`.
* `config:module_roots` is no longer supported after being deprecated in `v0.18`. Use
configuration in module sets instead (#28659, [docs](
https://spack.readthedocs.io/en/latest/module_file_support.html)).
* `spack activate` and `spack deactivate` are no longer supported, having been
deprecated in `v0.18`. Use an environment with a view instead of
activating/deactivating ([docs](
https://spack.readthedocs.io/en/latest/environments.html#configuration-in-spack-yaml)).
* The old YAML format for buildcaches is now deprecated (#33707). If you are using an
old buildcache with YAML metadata you will need to regenerate it with JSON metadata.
* `spack bootstrap trust` and `spack bootstrap untrust` are deprecated in favor of
`spack bootstrap enable` and `spack bootstrap disable` and will be removed in `v0.20`.
(#33600)
* The `graviton2` architecture has been renamed to `neoverse_n1`, and `graviton3`
is now `neoverse_v1`. Buildcaches using the old architecture names will need to be rebuilt.
* The terms `blacklist` and `whitelist` have been replaced with `include` and `exclude`
in all configuration files (#31569). You can use `spack config update` to
automatically fix your configuration files.
## Notable Bugfixes
* Permission setting on installation now handles effective uid properly (#19980)
* `buildable:true` for an MPI implementation now overrides `buildable:false` for `mpi` (#18269)
* Improved error messages when attempting to use an unconfigured compiler (#32084)
* Do not punish explicitly requested compiler mismatches in the solver (#30074)
* `spack stage`: add missing --fresh and --reuse (#31626)
* Fixes for adding build system executables like `cmake` to package scope (#31739)
* Bugfix for binary relocation with aliased strings produced by newer `binutils` (#32253)
## Spack community stats
* 6,751 total packages, 335 new since `v0.18.0`
* 141 new Python packages
* 89 new R packages
* 303 people contributed to this release
* 287 committers to packages
* 57 committers to core
# v0.18.1 (2022-07-19)
### Spack Bugfixes
* Fix several bugs related to bootstrapping (#30834,#31042,#31180)
* Fix a regression that was causing spec hashes to differ between
* Fix a regression that was causing spec hashes to differ between
Python 2 and Python 3 (#31092)
* Fixed compiler flags for oneAPI and DPC++ (#30856)
* Fixed several issues related to concretization (#31142,#31153,#31170,#31226)
* Improved support for Cray manifest file and `spack external find` (#31144,#31201,#31173,#31186)
* Assign a version to openSUSE Tumbleweed according to the GLIBC version
in the system (#19895)
in the system (#19895)
* Improved Dockerfile generation for `spack containerize` (#29741,#31321)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
### Package updates
* WarpX: add v22.06, fixed libs property (#30866,#31102)

View File

@@ -62,7 +62,6 @@ Resources:
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
To get an invitation, visit [slack.spack.io](https://slack.spack.io).
* [**Github Discussions**](https://github.com/spack/spack/discussions): not just for discussions, also Q&A.
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!

View File

@@ -10,8 +10,8 @@ For more on Spack's release structure, see
| Version | Supported |
| ------- | ------------------ |
| develop | :white_check_mark: |
| 0.19.x | :white_check_mark: |
| 0.18.x | :white_check_mark: |
| 0.17.x | :white_check_mark: |
| 0.16.x | :white_check_mark: |
## Reporting a Vulnerability

View File

@@ -31,11 +31,13 @@ import os
import os.path
import sys
min_python3 = (3, 6)
min_python3 = (3, 5)
if sys.version_info[:2] < min_python3:
if sys.version_info[:2] < (2, 7) or (
sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < min_python3
):
v_info = sys.version_info[:3]
msg = "Spack requires Python %d.%d or higher " % min_python3
msg = "Spack requires Python 2.7 or %d.%d or higher " % min_python3
msg += "You are running spack with Python %d.%d.%d." % v_info
sys.exit(msg)
@@ -47,8 +49,52 @@ spack_prefix = os.path.dirname(os.path.dirname(spack_file))
spack_lib_path = os.path.join(spack_prefix, "lib", "spack")
sys.path.insert(0, spack_lib_path)
from spack_installable.main import main # noqa: E402
# Add external libs
spack_external_libs = os.path.join(spack_lib_path, "external")
if sys.version_info[:2] <= (2, 7):
sys.path.insert(0, os.path.join(spack_external_libs, "py2"))
sys.path.insert(0, spack_external_libs)
# Here we delete ruamel.yaml in case it has been already imported from site
# (see #9206 for a broader description of the issue).
#
# Briefly: ruamel.yaml produces a .pth file when installed with pip that
# makes the site installed package the preferred one, even though sys.path
# is modified to point to another version of ruamel.yaml.
if "ruamel.yaml" in sys.modules:
del sys.modules["ruamel.yaml"]
if "ruamel" in sys.modules:
del sys.modules["ruamel"]
# The following code is here to avoid failures when updating
# the develop version, due to spurious argparse.pyc files remaining
# in the libs/spack/external directory, see:
# https://github.com/spack/spack/pull/25376
# TODO: Remove in v0.18.0 or later
try:
import argparse
except ImportError:
argparse_pyc = os.path.join(spack_external_libs, "argparse.pyc")
if not os.path.exists(argparse_pyc):
raise
try:
os.remove(argparse_pyc)
import argparse # noqa: F401
except Exception:
msg = (
"The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. "
"Either delete it manually or ask some administrator to "
"delete it for you."
)
print(msg.format(argparse_pyc))
sys.exit(1)
import spack.main # noqa: E402
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":
sys.exit(main())
sys.exit(spack.main.main())

View File

@@ -1,95 +0,0 @@
#!/bin/bash
set -euo pipefail
[[ -n "${TMPCONFIG_DEBUG:=}" ]] && set -x
DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
mkdir -p "${XDG_RUNTIME_DIR:=/tmp}/spack-tests"
export TMPDIR="${XDG_RUNTIME_DIR}"
export TMP_DIR="$(mktemp -d -t spack-test-XXXXX)"
clean_up() {
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "cleaning up: $TMP_DIR\n"
rm -rf "$TMP_DIR"
}
trap clean_up EXIT
trap clean_up ERR
[[ -n "$TMPCONFIG_DEBUG" ]] && printf "Redirecting TMP_DIR and spack directories to $TMP_DIR\n"
export BOOTSTRAP="${SPACK_USER_CACHE_PATH:=$HOME/.spack}/bootstrap"
export SPACK_USER_CACHE_PATH="$TMP_DIR/user_cache"
mkdir -p "$SPACK_USER_CACHE_PATH"
private_bootstrap="$SPACK_USER_CACHE_PATH/bootstrap"
use_spack=''
use_bwrap=''
# argument handling
while (($# >= 1)) ; do
case "$1" in
-b) # privatize bootstrap too, useful for CI but not always cheap
shift
export BOOTSTRAP="$private_bootstrap"
;;
-B) # use specified bootstrap dir
export BOOTSTRAP="$2"
shift 2
;;
-s) # run spack directly with remaining args
shift
use_spack=1
;;
--contain=bwrap)
if bwrap --help 2>&1 > /dev/null ; then
use_bwrap=1
else
echo Bubblewrap containment requested, but no bwrap command found
exit 1
fi
shift
;;
--)
shift
break
;;
*)
break
;;
esac
done
typeset -a CMD
if [[ -n "$use_spack" ]] ; then
CMD=("$DIR/spack" "$@")
else
CMD=("$@")
fi
mkdir -p "$BOOTSTRAP"
export SPACK_SYSTEM_CONFIG_PATH="$TMP_DIR/sys_conf"
export SPACK_USER_CONFIG_PATH="$TMP_DIR/user_conf"
mkdir -p "$SPACK_USER_CONFIG_PATH"
cat >"$SPACK_USER_CONFIG_PATH/config.yaml" <<EOF
config:
install_tree:
root: $TMP_DIR/install
misc_cache: $$user_cache_path/cache
source_cache: $$user_cache_path/source
EOF
cat >"$SPACK_USER_CONFIG_PATH/bootstrap.yaml" <<EOF
bootstrap:
root: $BOOTSTRAP
EOF
if [[ -n "$use_bwrap" ]] ; then
CMD=(
bwrap
--dev-bind / /
--ro-bind "$DIR/.." "$DIR/.." # do not touch spack root
--ro-bind $HOME/.spack $HOME/.spack # do not touch user config/cache dir
--bind "$TMP_DIR" "$TMP_DIR"
--bind "$BOOTSTRAP" "$BOOTSTRAP"
--die-with-parent
"${CMD[@]}"
)
fi
(( ${TMPCONFIG_DEBUG:=0} > 1)) && echo "Running: ${CMD[@]}"
"${CMD[@]}"

View File

@@ -9,15 +9,14 @@ bootstrap:
# may not be able to bootstrap all the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.4'
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
- name: 'github-actions-v0.3'
metadata: $spack/share/spack/bootstrap/github-actions-v0.3
- name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.2
- name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow
github-actions-v0.4: true
github-actions-v0.3: true
github-actions-v0.2: true
spack-install: true

View File

@@ -33,4 +33,4 @@ concretizer:
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: true
unify: false

View File

@@ -19,7 +19,7 @@ config:
install_tree:
root: $spack/opt/spack
projections:
all: "{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}"
all: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
# install_tree can include an optional padded length (int or boolean)
# default is False (do not pad)
# if padded_length is True, Spack will pad as close to the system max path
@@ -187,20 +187,10 @@ config:
package_lock_timeout: null
# Control how shared libraries are located at runtime on Linux. See the
# the Spack documentation for details.
shared_linking:
# Spack automatically embeds runtime search paths in ELF binaries for their
# dependencies. Their type can either be "rpath" or "runpath". For glibc, rpath is
# inherited and has precedence over LD_LIBRARY_PATH; runpath is not inherited
# and of lower precedence. DO NOT MIX these within the same install tree.
type: rpath
# (Experimental) Embed absolute paths of dependent libraries directly in ELF
# binaries to avoid runtime search. This can improve startup time of
# executables with many dependencies, in particular on slow filesystems.
bind: false
# Control whether Spack embeds RPATH or RUNPATH attributes in ELF binaries.
# Has no effect on macOS. DO NOT MIX these within the same install tree.
# See the Spack documentation for details.
shared_linking: 'rpath'
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
@@ -211,11 +201,3 @@ config:
# building and installing packages. This gives information about Spack's
# current progress as well as the current and total number of packages.
terminal_title: false
# Number of seconds a buildcache's index.json is cached locally before probing
# for updates, within a single Spack invocation. Defaults to 10 minutes.
binary_index_ttl: 600
flags:
# Whether to keep -Werror flags active in package builds.
keep_werror: 'none'

View File

@@ -27,8 +27,7 @@ packages:
fuse: [libfuse]
gl: [glx, osmesa]
glu: [mesa-glu, openglu]
golang: [go, gcc]
go-external-or-gccgo-bootstrap: [go-bootstrap, gcc]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]

View File

@@ -1,5 +1,5 @@
config:
locks: false
concretizer: clingo
concretizer: original
build_stage::
- '$spack/.staging'

1
lib/spack/docs/_spack_root Symbolic link
View File

@@ -0,0 +1 @@
../../..

View File

@@ -85,7 +85,7 @@ All packages whose names or descriptions contain documentation:
To get more information on a particular package from `spack list`, use
`spack info`. Just supply the name of a package:
.. command-output:: spack info --all mpich
.. command-output:: spack info mpich
Most of the information is self-explanatory. The *safe versions* are
versions that Spack knows the checksum for, and it will use the
@@ -998,15 +998,11 @@ More formally, a spec consists of the following pieces:
* ``%`` Optional compiler specifier, with an optional compiler version
(``gcc`` or ``gcc@4.7.3``)
* ``+`` or ``-`` or ``~`` Optional variant specifiers (``+debug``,
``-qt``, or ``~qt``) for boolean variants. Use ``++`` or ``--`` or
``~~`` to propagate variants through the dependencies (``++debug``,
``--qt``, or ``~~qt``).
``-qt``, or ``~qt``) for boolean variants
* ``name=<value>`` Optional variant specifiers that are not restricted to
boolean variants. Use ``name==<value>`` to propagate variant through the
dependencies.
boolean variants
* ``name=<value>`` Optional compiler flag specifiers. Valid flag names are
``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``.
Use ``name==<value>`` to propagate compiler flags through the dependencies.
* ``target=<value> os=<value>`` Optional architecture specifier
(``target=haswell os=CNL10``)
* ``^`` Dependency specs (``^callpath@1.1``)
@@ -1114,21 +1110,21 @@ set of arbitrary versions, such as ``@1.0,1.5,1.7`` (``1.0``, ``1.5``,
or ``1.7``). When you supply such a specifier to ``spack install``,
it constrains the set of versions that Spack will install.
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
reference provided. Acceptable syntaxes for this are:
.. code-block:: sh
# branches and tags
foo@git.develop # use the develop branch
foo@git.0.19 # use the 0.19 tag
# commit hashes
foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits
foo@git.abcdef1234abcdef1234abcdef1234abcdef1234
Spack versions from git reference either have an associated version supplied by the user,
or infer a relationship to known versions from the structure of the git repository. If an
associated version is supplied by the user, Spack treats the git version as equivalent to that
@@ -1230,23 +1226,6 @@ variants using the backwards compatibility syntax and uses only ``~``
for disabled boolean variants. The ``-`` and spaces on the command
line are provided for convenience and legibility.
Spack allows variants to propagate their value to the package's
dependency by using ``++``, ``--``, and ``~~`` for boolean variants.
For example, for a ``debug`` variant:
.. code-block:: sh
mpileaks ++debug # enabled debug will be propagated to dependencies
mpileaks +debug # only mpileaks will have debug enabled
To propagate the value of non-boolean variants Spack uses ``name==value``.
For example, for the ``stackstart`` variant:
.. code-block:: sh
mpileaks stackstart==4 # variant will be propagated to dependencies
mpileaks stackstart=4 # only mpileaks will have this variant value
^^^^^^^^^^^^^^
Compiler Flags
^^^^^^^^^^^^^^
@@ -1254,15 +1233,10 @@ Compiler Flags
Compiler flags are specified using the same syntax as non-boolean variants,
but fulfill a different purpose. While the function of a variant is set by
the package, compiler flags are used by the compiler wrappers to inject
flags into the compile line of the build. Additionally, compiler flags can
be inherited by dependencies by using ``==``.
``spack install libdwarf cppflags=="-g"`` will install both libdwarf and
libelf with the ``-g`` flag injected into their compile line.
.. note::
versions of spack prior to 0.19.0 will propagate compiler flags using
the ``=`` syntax.
flags into the compile line of the build. Additionally, compiler flags are
inherited by dependencies. ``spack install libdwarf cppflags="-g"`` will
install both libdwarf and libelf with the ``-g`` flag injected into their
compile line.
Notice that the value of the compiler flags must be quoted if it
contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``,
@@ -1464,7 +1438,7 @@ built.
You can see what virtual packages a particular package provides by
getting info on it:
.. command-output:: spack info --virtuals mpich
.. command-output:: spack info mpich
Spack is unique in that its virtual packages can be versioned, just
like regular packages. A particular version of a package may provide
@@ -1672,13 +1646,9 @@ own install prefix. However, certain packages are typically installed
`Python <https://www.python.org>`_ packages are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
In Spack, installation prefixes are immutable, so this type of installation
is not directly supported. However, it is possible to create views that
allow you to merge install prefixes of multiple packages into a single new prefix.
Views are a convenient way to get a more traditional filesystem structure.
Using *extensions*, you can ensure that Python packages always share the
same prefix in the view as Python itself. Suppose you have
Python installed like so:
Spack has support for this type of installation as well. In Spack,
a package that can live inside the prefix of another package is called
an *extension*. Suppose you have Python installed like so:
.. code-block:: console
@@ -1716,6 +1686,8 @@ You can find extensions for your Python installation like this:
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
==> None activated.
The extensions are a subset of what's returned by ``spack list``, and
they are packages like any other. They are installed into their own
prefixes, and you can see this with ``spack find --paths``:
@@ -1743,72 +1715,32 @@ directly when you run ``python``:
ImportError: No module named numpy
>>>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Extensions in Environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^
Using Extensions
^^^^^^^^^^^^^^^^
The recommended way of working with extensions such as ``py-numpy``
above is through :ref:`Environments <environments>`. For example,
the following creates an environment in the current working directory
with a filesystem view in the ``./view`` directory:
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
We recommend environments for two reasons. Firstly, environments
can be activated (requires :ref:`shell-support`):
.. code-block:: console
$ spack env activate .
which sets all the right environment variables such as ``PATH`` and
``PYTHONPATH``. This ensures that
.. code-block:: console
$ python
>>> import numpy
works. Secondly, even without shell support, the view ensures
that Python can locate its extensions:
.. code-block:: console
$ ./view/bin/python
>>> import numpy
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
^^^^^^^^^^^^^^^^^^^^
Using ``spack load``
^^^^^^^^^^^^^^^^^^^^
A more traditional way of using Spack and extensions is ``spack load``
(requires :ref:`shell-support`). This will add the extension to ``PYTHONPATH``
in your current shell, and Python itself will be available in the ``PATH``:
There are four ways to get ``numpy`` working in Python. The first is
to use :ref:`shell-support`. You can simply ``load`` the extension,
and it will be added to the ``PYTHONPATH`` in your current shell:
.. code-block:: console
$ spack load python
$ spack load py-numpy
$ python
>>> import numpy
Now ``import numpy`` will succeed for as long as you keep your current
session open.
The loaded packages can be checked using ``spack find --loaded``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Apart from ``spack env activate`` and ``spack load``, you can load numpy
through your environment modules (using ``environment-modules`` or
``lmod``). This will also add the extension to the ``PYTHONPATH`` in
your current shell.
Instead of using Spack's environment modification capabilities through
the ``spack load`` command, you can load numpy through your
environment modules (using ``environment-modules`` or ``lmod``). This
will also add the extension to the ``PYTHONPATH`` in your current
shell.
.. code-block:: console
@@ -1818,6 +1750,130 @@ If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions in a View
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Another way to use extensions is to create a view, which merges the
python installation along with the extensions into a single prefix.
See :ref:`configuring_environment_views` for a more in-depth description
of views.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions Globally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As an alternative to creating a merged prefix with Python and its extensions,
and prior to support for views, Spack has provided a means to install the
extension into the Spack installation prefix for the extendee. This has
typically been useful since extendable packages typically search their own
installation path for addons by default.
Global activations are performed with the ``spack activate`` command:
.. _cmd-spack-activate:
^^^^^^^^^^^^^^^^^^
``spack activate``
^^^^^^^^^^^^^^^^^^
.. code-block:: console
$ spack activate py-numpy
==> Activated extension py-setuptools@11.3.1%gcc@4.4.7 arch=linux-debian7-x86_64-3c74eb69 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-nose@1.3.4%gcc@4.4.7 arch=linux-debian7-x86_64-5f70f816 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=linux-debian7-x86_64-66733244 for python@2.7.8%gcc@4.4.7.
Several things have happened here. The user requested that
``py-numpy`` be activated in the ``python`` installation it was built
with. Spack knows that ``py-numpy`` depends on ``py-nose`` and
``py-setuptools``, so it activated those packages first. Finally,
once all dependencies were activated in the ``python`` installation,
``py-numpy`` was activated as well.
If we run ``spack extensions`` again, we now see the three new
packages listed as activated:
.. code-block:: console
$ spack extensions python
==> python@2.7.8%gcc@4.4.7 arch=linux-debian7-x86_64-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
py-biopython py-mako py-pmw py-rpy2 py-sympy
py-cython py-matplotlib py-pychecker py-scientificpython py-virtualenv
py-dateutil py-mpi4py py-pygments py-scikit-learn
py-epydoc py-mx py-pylint py-scipy
py-gnuplot py-nose py-pyparsing py-setuptools
py-h5py py-numpy py-pyqt py-shiboken
==> 12 installed:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-dateutil@2.4.0 py-nose@1.3.4 py-pyside@1.2.2
py-dateutil@2.4.0 py-numpy@1.9.1 py-pytz@2014.10
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
==> 3 currently activated:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-nose@1.3.4 py-numpy@1.9.1 py-setuptools@11.3.1
Now, when a user runs python, ``numpy`` will be available for import
*without* the user having to explicitly load it. ``python@2.7.8`` now
acts like a system Python installation with ``numpy`` installed inside
of it.
Spack accomplishes this by symbolically linking the *entire* prefix of
the ``py-numpy`` package into the prefix of the ``python`` package. To the
python interpreter, it looks like ``numpy`` is installed in the
``site-packages`` directory.
The only limitation of global activation is that you can only have a *single*
version of an extension activated at a time. This is because multiple
versions of the same extension would conflict if symbolically linked
into the same prefix. Users who want a different version of a package
can still get it by using environment modules or views, but they will have to
explicitly load their preferred version.
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack activate --force``
^^^^^^^^^^^^^^^^^^^^^^^^^^
If, for some reason, you want to activate a package *without* its
dependencies, you can use ``spack activate --force``:
.. code-block:: console
$ spack activate --force py-numpy
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=linux-debian7-x86_64-66733244 for python@2.7.8%gcc@4.4.7.
.. _cmd-spack-deactivate:
^^^^^^^^^^^^^^^^^^^^
``spack deactivate``
^^^^^^^^^^^^^^^^^^^^
We've seen how activating an extension can be used to set up a default
version of a Python module. Obviously, you may want to change that at
some point. ``spack deactivate`` is the command for this. There are
several variants:
* ``spack deactivate <extension>`` will deactivate a single
extension. If another activated extension depends on this one,
Spack will warn you and exit with an error.
* ``spack deactivate --force <extension>`` deactivates an extension
regardless of packages that depend on it.
* ``spack deactivate --all <extension>`` deactivates an extension and
all of its dependencies. Use ``--force`` to disregard dependents.
* ``spack deactivate --all <extendee>`` deactivates *all* activated
extensions of a package. For example, to deactivate *all* python
extensions, use:
.. code-block:: console
$ spack deactivate --all python
-----------------------
Filesystem requirements
-----------------------

View File

@@ -15,13 +15,15 @@ is an entire command dedicated to the management of every aspect of bootstrappin
.. command-output:: spack bootstrap --help
Spack is configured to bootstrap its dependencies lazily by default; i.e. the first time they are needed and
can't be found. You can readily check if any prerequisite for using Spack is missing by running:
The first thing to know to understand bootstrapping in Spack is that each of
Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console
% spack bootstrap status
Spack v0.19.0 - python@3.8
Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs
@@ -46,21 +48,6 @@ they can be bootstrapped. Running a command that concretize a spec, like:
triggers the bootstrapping of clingo from pre-built binaries as expected.
Users can also bootstrap all the dependencies needed by Spack in a single command, which
might be useful to setup containers or other similar environments:
.. code-block:: console
$ spack bootstrap now
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-shqedxgvjnhiwdcdrvjhbd73jaevv7wt.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64/gcc-10.2.1/clingo-bootstrap-spack/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-shqedxgvjnhiwdcdrvjhbd73jaevv7wt.spack
==> Installing "clingo-bootstrap@spack%gcc@10.2.1~docs~ipo+python+static_libstdcpp build_type=Release arch=linux-centos7-x86_64" from a buildcache
==> Bootstrapping patchelf from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.15.0-htk62k7efo2z22kh6kmhaselru7bfkuc.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64/gcc-10.2.1/patchelf-0.15.0/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.15.0-htk62k7efo2z22kh6kmhaselru7bfkuc.spack
==> Installing "patchelf@0.15.0%gcc@10.2.1 ldflags="-static-libstdc++ -static-libgcc" arch=linux-centos7-x86_64" from a buildcache
-----------------------
The Bootstrapping store
-----------------------
@@ -120,19 +107,19 @@ If need be, you can disable bootstrapping altogether by running:
in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by disabling
them specifically:
also configure Spack to skip certain bootstrapping methods by *untrusting*
them. For instance:
.. code-block:: console
% spack bootstrap disable github-actions
==> "github-actions" is now disabled and will not be used for bootstrapping
% spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console
% spack bootstrap enable github-actions
% spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults:

View File

@@ -49,8 +49,9 @@ packages rather than building its own packages. This may be desirable
if machines ship with system packages, such as a customized MPI
that should be used instead of Spack building its own MPI.
External packages are configured through the ``packages.yaml`` file.
Here's an example of an external configuration:
External packages are configured through the ``packages.yaml`` file found
in a Spack installation's ``etc/spack/`` or a user's ``~/.spack/``
directory. Here's an example of an external configuration:
.. code-block:: yaml
@@ -96,14 +97,11 @@ Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prevent packages from being built from sources
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding an external spec in ``packages.yaml`` allows Spack to use an external location,
but it does not prevent Spack from building packages from sources. In the above example,
Spack might choose for many valid reasons to start building and linking with the
latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions.
The packages configuration can tell Spack to use an external location
for certain package versions, but it does not restrict Spack to using
external packages. In the above example, since newer versions of OpenMPI
are available, Spack will choose to start building and linking with the
latest version rather than continue using the pre-installed OpenMPI versions.
To prevent this, the ``packages.yaml`` configuration also allows packages
to be flagged as non-buildable. The previous example could be modified to
@@ -123,15 +121,9 @@ be:
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
its own version of OpenMPI from sources, and it will instead always rely on a pre-built
OpenMPI.
.. note::
If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag)
pre-built specs include specs already available from a local store, an upstream store, a registered
buildcache or specs marked as externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only
external specs in ``packages.yaml`` are included in the list of pre-built specs.
its own version of OpenMPI, and it will instead always rely on a pre-built
OpenMPI. Similar to ``paths``, ``buildable`` is specified as a property under
a package name.
If an external module is specified as not buildable, then Spack will load the
external module into the build environment which can be used for linking.
@@ -140,10 +132,6 @@ The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-buildable virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
@@ -165,37 +153,21 @@ but more conveniently:
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers
(available via stores or buildcaches) are not wanted, Spack can be configured to require
specs matching only the available externals:
Implementations can also be listed immediately under the virtual they provide:
.. code-block:: yaml
packages:
mpi:
buildable: False
require:
- one_of: [
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64",
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug",
"openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
]
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
mpich@3.3 %clang@9.0.0 arch=linux-debian7-x86_64: /opt/mpich-3.3-intel
This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused,
unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see
:ref:`package-requirements`.
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
.. _cmd-spack-external-find:
@@ -222,6 +194,11 @@ Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The current implementation only collects and examines executable files,
so it is typically only useful for build/run dependencies (in some cases
if a library package also provides an executable, it may be possible to
extract a meaningful Spec by running the executable - for example the
compiler wrappers in MPI implementations).
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
@@ -302,31 +279,88 @@ microarchitectures considered during the solve are constrained to be compatible
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-preferences:
-------------------
Package Preferences
-------------------
Spack can be configured to prefer certain compilers, package
versions, dependencies, and variants during concretization.
The preferred configuration can be controlled via the
``~/.spack/packages.yaml`` file for user configurations, or the
``etc/spack/packages.yaml`` site configuration.
Here's an example ``packages.yaml`` file that sets preferred packages:
.. code-block:: yaml
packages:
opencv:
compiler: [gcc@4.9]
variants: +debug
gperftools:
version: [2.2, 2.4, 2.3]
all:
compiler: [gcc@4.4.7, 'gcc@4.6:', intel, clang, pgi]
target: [sandybridge]
providers:
mpi: [mvapich2, mpich, openmpi]
At a high level, this example is specifying how packages should be
concretized. The opencv package should prefer using GCC 4.9 and
be built with debug options. The gperftools package should prefer version
2.2 over 2.4. Every package on the system should prefer mvapich2 for
its MPI and GCC 4.4.7 (except for opencv, which overrides this by preferring GCC 4.9).
These options are used to fill in implicit defaults. Any of them can be overwritten
on the command line if explicitly requested.
Each ``packages.yaml`` file begins with the string ``packages:`` and
package names are specified on the next level. The special string ``all``
applies settings to *all* packages. Underneath each package name is one
or more components: ``compiler``, ``variants``, ``version``,
``providers``, and ``target``. Each component has an ordered list of
spec ``constraints``, with earlier entries in the list being preferred
over later entries.
Sometimes a package installation may have constraints that forbid
the first concretization rule, in which case Spack will use the first
legal concretization rule. Going back to the example, if a user
requests gperftools 2.3 or later, then Spack will install version 2.4
as the 2.4 version of gperftools is preferred over 2.3.
An explicit concretization rule in the preferred section will always
take preference over unlisted concretizations. In the above example,
xlc isn't listed in the compiler list. Every listed compiler from
gcc to pgi will thus be preferred over the xlc compiler.
The syntax for the ``provider`` section differs slightly from other
concretization rules. A provider lists a value that packages may
``depend_on`` (e.g, MPI) and a list of rules for fulfilling that
dependency.
.. _package-requirements:
--------------------
Package Requirements
--------------------
Spack can be configured to always use certain compilers, package
versions, and variants during concretization through package
requirements.
You can use the configuration to force the concretizer to choose
specific properties for packages when building them. Like preferences,
these are only applied when the package is required by some other
request (e.g. if the package is needed as a dependency of a
request to ``spack install``).
Package requirements are useful when you find yourself repeatedly
specifying the same constraints on the command line, and wish that
Spack respects these constraints whether you mention them explicitly
or not. Another use case is specifying constraints that should apply
to all root specs in an environment, without having to repeat the
constraint everywhere.
An example of where this is useful is if you have a package that
is normally built as a dependency but only under certain circumstances
(e.g. only when a variant on a dependent is active): you can make
sure that it always builds the way you want it to; this distinguishes
package configuration requirements from constraints that you add to
``spack install`` or to environments (in those cases, the associated
packages are always built).
Apart from that, requirements config is more flexible than constraints
on the command line, because it can specify constraints on packages
*when they occur* as a dependency. In contrast, on the command line it
is not possible to specify constraints on dependencies while also keeping
those dependencies optional.
The package requirements configuration is specified in ``packages.yaml``
keyed by package name:
The following is an example of how to enforce package properties in
``packages.yaml``:
.. code-block:: yaml
@@ -335,7 +369,7 @@ keyed by package name:
require: "@1.13.2"
openmpi:
require:
- any_of: ["~cuda", "%gcc"]
- any_of: ["~cuda", "gcc"]
mpich:
require:
- one_of: ["+cuda", "+rocm"]
@@ -362,16 +396,6 @@ choose between a set of options using ``any_of`` or ``one_of``:
``mpich`` already includes a conflict, so this is redundant but
still demonstrates the concept).
.. note::
For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting default requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also set default requirements for all packages under ``all``
like this:
@@ -395,96 +419,16 @@ under ``all`` are disregarded. For example, with a configuration like this:
cmake:
require: '%gcc'
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake``
dependencies) to use ``clang``.
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including cmake dependencies)
to use ``clang``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting requirements on virtual specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Other notes about ``requires``:
A requirement on a virtual spec applies whenever that virtual is present in the DAG.
This can be useful for fixing which virtual provider you want to use:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
With the configuration above the only allowed ``mpi`` provider is ``mvapich2 %gcc``.
Requirements on the virtual spec and on the specific provider are both applied, if
present. For instance with a configuration like:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
mvapich2:
require: '~cuda'
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
.. _package-preferences:
-------------------
Package Preferences
-------------------
In some cases package requirements can be too strong, and package
preferences are the better option. Package preferences do not impose
constraints on packages for particular versions or variants values,
they rather only set defaults -- the concretizer is free to change
them if it must due to other constraints. Also note that package
preferences are of lower priority than reuse of already installed
packages.
Here's an example ``packages.yaml`` file that sets preferred packages:
.. code-block:: yaml
packages:
opencv:
compiler: [gcc@4.9]
variants: +debug
gperftools:
version: [2.2, 2.4, 2.3]
all:
compiler: [gcc@4.4.7, 'gcc@4.6:', intel, clang, pgi]
target: [sandybridge]
providers:
mpi: [mvapich2, mpich, openmpi]
At a high level, this example is specifying how packages are preferably
concretized. The opencv package should prefer using GCC 4.9 and
be built with debug options. The gperftools package should prefer version
2.2 over 2.4. Every package on the system should prefer mvapich2 for
its MPI and GCC 4.4.7 (except for opencv, which overrides this by preferring GCC 4.9).
These options are used to fill in implicit defaults. Any of them can be overwritten
on the command line if explicitly requested.
Package preferences accept the follow keys or components under
the specific package (or ``all``) section: ``compiler``, ``variants``,
``version``, ``providers``, and ``target``. Each component has an
ordered list of spec ``constraints``, with earlier entries in the
list being preferred over later entries.
Sometimes a package installation may have constraints that forbid
the first concretization rule, in which case Spack will use the first
legal concretization rule. Going back to the example, if a user
requests gperftools 2.3 or later, then Spack will install version 2.4
as the 2.4 version of gperftools is preferred over 2.3.
An explicit concretization rule in the preferred section will always
take preference over unlisted concretizations. In the above example,
xlc isn't listed in the compiler list. Every listed compiler from
gcc to pgi will thus be preferred over the xlc compiler.
The syntax for the ``provider`` section differs slightly from other
concretization rules. A provider lists a value that packages may
``depends_on`` (e.g, MPI) and a list of rules for fulfilling that
dependency.
* You cannot specify requirements for virtual packages (e.g. you can
specify requirements for ``openmpi`` but not ``mpi``).
* For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
.. _package_permissions:
@@ -534,25 +478,3 @@ directories inside the install prefix. This will ensure that even
manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected.
----------------------------
Assigning Package Attributes
----------------------------
You can assign class-level attributes in the configuration:
.. code-block:: yaml
packages:
mpileaks:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these
attributes may be any value parseable by yaml.
These can only be applied to specific packages, not "all" or
virtual packages.

View File

@@ -65,6 +65,7 @@ on these ideas for each distinct build system that Spack supports:
build_systems/custompackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/multiplepackage
build_systems/rocmpackage
build_systems/sourceforgepackage

View File

@@ -5,9 +5,9 @@
.. _autotoolspackage:
---------
Autotools
---------
----------------
AutotoolsPackage
----------------
Autotools is a GNU build system that provides a build-script generator.
By running the platform-independent ``./configure`` script that comes
@@ -17,7 +17,7 @@ with the package, you can generate a platform-dependent Makefile.
Phases
^^^^^^
The ``AutotoolsBuilder`` and ``AutotoolsPackage`` base classes come with the following phases:
The ``AutotoolsPackage`` base class comes with the following phases:
#. ``autoreconf`` - generate the configure script
#. ``configure`` - generate the Makefiles

View File

@@ -5,9 +5,9 @@
.. _bundlepackage:
------
Bundle
------
-------------
BundlePackage
-------------
``BundlePackage`` represents a set of packages that are expected to work well
together, such as a collection of commonly used software libraries. The

View File

@@ -5,9 +5,9 @@
.. _cachedcmakepackage:
-----------
CachedCMake
-----------
------------------
CachedCMakePackage
------------------
The CachedCMakePackage base class is used for CMake-based workflows
that create a CMake cache file prior to running ``cmake``. This is

View File

@@ -5,9 +5,9 @@
.. _cmakepackage:
-----
CMake
-----
------------
CMakePackage
------------
Like Autotools, CMake is a widely-used build-script generator. Designed
by Kitware, CMake is the most popular build system for new C, C++, and
@@ -21,7 +21,7 @@ whereas Autotools is Unix-only.
Phases
^^^^^^
The ``CMakeBuilder`` and ``CMakePackage`` base classes come with the following phases:
The ``CMakePackage`` base class comes with the following phases:
#. ``cmake`` - generate the Makefile
#. ``build`` - build the package
@@ -130,8 +130,8 @@ Adding flags to cmake
To add additional flags to the ``cmake`` call, simply override the
``cmake_args`` function. The following example defines values for the flags
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
and without the :meth:`~spack.build_systems.cmake.CMakeBuilder.define` and
:meth:`~spack.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions:
and without the :meth:`~spack.build_systems.cmake.CMakePackage.define` and
:meth:`~spack.build_systems.cmake.CMakePackage.define_from_variant` helper functions:
.. code-block:: python

View File

@@ -5,9 +5,9 @@
.. _cudapackage:
----
Cuda
----
-----------
CudaPackage
-----------
Different from other packages, ``CudaPackage`` does not represent a build system.
Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a `mixin-class <https://en.wikipedia.org/wiki/Mixin>`_.
@@ -80,7 +80,7 @@ standard CUDA compiler flags.
**cuda_flags**
This built-in static method returns a list of command line flags
This built-in static method returns a list of command line flags
for the chosen ``cuda_arch`` value(s). The flags are intended to
be passed to the CUDA compiler driver (i.e., ``nvcc``).

View File

@@ -6,9 +6,9 @@
.. _inteloneapipackage:
===========
IntelOneapi
===========
====================
IntelOneapiPackage
====================
.. contents::
@@ -32,11 +32,11 @@ oneAPI packages or use::
For more information on a specific package, do::
spack info --all <package-name>
spack info <package-name>
Intel no longer releases new versions of Parallel Studio, which can be
used in Spack via the :ref:`intelpackage`. All of its components can
now be found in oneAPI.
now be found in oneAPI.
Examples
========

View File

@@ -5,9 +5,9 @@
.. _intelpackage:
-----
Intel
-----
------------
IntelPackage
------------
.. contents::

View File

@@ -5,11 +5,11 @@
.. _luapackage:
---
Lua
---
------------
LuaPackage
------------
The ``Lua`` build-system is a helper for the common case of Lua packages that provide
LuaPackage is a helper for the common case of Lua packages that provide
a rockspec file. This is not meant to take a rock archive, but to build
a source archive or repository that provides a rockspec, which should cover
most lua packages. In the case a Lua package builds by Make rather than
@@ -19,7 +19,7 @@ luarocks, prefer MakefilePackage.
Phases
^^^^^^
The ``LuaBuilder`` and `LuaPackage`` base classes come with the following phases:
The ``LuaPackage`` base class comes with the following phases:
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
#. ``preprocess`` - adjust sources or rockspec to fix build

View File

@@ -5,9 +5,9 @@
.. _makefilepackage:
--------
Makefile
--------
---------------
MakefilePackage
---------------
The most primitive build system a package can use is a plain Makefile.
Makefiles are simple to write for small projects, but they usually
@@ -18,7 +18,7 @@ variables.
Phases
^^^^^^
The ``MakefileBuilder`` and ``MakefilePackage`` base classes come with 3 phases:
The ``MakefilePackage`` base class comes with 3 phases:
#. ``edit`` - edit the Makefile
#. ``build`` - build the project

View File

@@ -5,9 +5,9 @@
.. _mavenpackage:
-----
Maven
-----
------------
MavenPackage
------------
Apache Maven is a general-purpose build system that does not rely
on Makefiles to build software. It is designed for building and
@@ -17,7 +17,7 @@ managing and Java-based project.
Phases
^^^^^^
The ``MavenBuilder`` and ``MavenPackage`` base classes come with the following phases:
The ``MavenPackage`` base class comes with the following phases:
#. ``build`` - compile code and package into a JAR file
#. ``install`` - copy to installation prefix

View File

@@ -5,9 +5,9 @@
.. _mesonpackage:
-----
Meson
-----
------------
MesonPackage
------------
Much like Autotools and CMake, Meson is a build system. But it is
meant to be both fast and as user friendly as possible. GNOME's goal
@@ -17,7 +17,7 @@ is to port modules to use the Meson build system.
Phases
^^^^^^
The ``MesonBuilder`` and ``MesonPackage`` base classes come with the following phases:
The ``MesonPackage`` base class comes with the following phases:
#. ``meson`` - generate ninja files
#. ``build`` - build the project

View File

@@ -0,0 +1,350 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _multiplepackage:
----------------------
Multiple Build Systems
----------------------
Quite frequently, a package will change build systems from one version to the
next. For example, a small project that once used a single Makefile to build
may now require Autotools to handle the increased number of files that need to
be compiled. Or, a package that once used Autotools may switch to CMake for
Windows support. In this case, it becomes a bit more challenging to write a
single build recipe for this package in Spack.
There are several ways that this can be handled in Spack:
#. Subclass the new build system, and override phases as needed (preferred)
#. Subclass ``Package`` and implement ``install`` as needed
#. Create separate ``*-cmake``, ``*-autotools``, etc. packages for each build system
#. Rename the old package to ``*-legacy`` and create a new package
#. Move the old package to a ``legacy`` repository and create a new package
#. Drop older versions that only support the older build system
Of these options, 1 is preferred, and will be demonstrated in this
documentation. Options 3-5 have issues with concretization, so shouldn't be
used. Options 4-5 also don't support more than two build systems. Option 6 only
works if the old versions are no longer needed. Option 1 is preferred over 2
because it makes it easier to drop the old build system entirely.
The exact syntax of the package depends on which build systems you need to
support. Below are a couple of common examples.
^^^^^^^^^^^^^^^^^^^^^
Makefile -> Autotools
^^^^^^^^^^^^^^^^^^^^^
Let's say we have the following package:
.. code-block:: python
class Foo(MakefilePackage):
version("1.2.0", sha256="...")
def edit(self, spec, prefix):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
def install(self, spec, prefix):
install_tree(".", prefix)
The package subclasses from :ref:`makefilepackage`, which has three phases:
#. ``edit`` (does nothing by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
In this case, the ``install`` phase needed to be overridden because the
Makefile did not have an install target. We also modify the Makefile to use
Spack's compiler wrappers. The default ``build`` phase is not changed.
Starting with version 1.3.0, we want to use Autotools to build instead.
:ref:`autotoolspackage` has four phases:
#. ``autoreconf`` (does not if a configure script already exists)
#. ``configure`` (runs ``./configure --prefix=...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
If the only version we need to support is 1.3.0, the package would look as
simple as:
.. code-block:: python
class Foo(AutotoolsPackage):
version("1.3.0", sha256="...")
def configure_args(self):
return ["--enable-shared"]
In this case, we use the default methods for each phase and only override
``configure_args`` to specify additional flags to pass to ``./configure``.
If we wanted to write a single package that supports both versions 1.2.0 and
1.3.0, it would look something like:
.. code-block:: python
class Foo(AutotoolsPackage):
version("1.3.0", sha256="...")
version("1.2.0", sha256="...", deprecated=True)
def configure_args(self):
return ["--enable-shared"]
# Remove the following once version 1.2.0 is dropped
@when("@:1.2")
def patch(self):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
@when("@:1.2")
def autoreconf(self, spec, prefix):
pass
@when("@:1.2")
def configure(self, spec, prefix):
pass
@when("@:1.2")
def install(self, spec, prefix):
install_tree(".", prefix)
There are a few interesting things to note here:
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
on version 1.2.0, it's still there and builds just fine.
* We moved the contents of the ``edit`` phase to the ``patch`` function. Since
``AutotoolsPackage`` doesn't have an ``edit`` phase, the only way for this
step to be executed is to move it to the ``patch`` function, which always
gets run.
* The ``autoreconf`` and ``configure`` phases become no-ops. Since the old
Makefile-based build system doesn't use these, we ignore these phases when
building ``foo@1.2.0``.
* The ``@when`` decorator is used to override these phases only for older
versions. The default methods are used for ``foo@1.3:``.
Once a new Spack release comes out, version 1.2.0 and everything below the
comment can be safely deleted. The result is the same as if we had written a
package for version 1.3.0 from scratch.
^^^^^^^^^^^^^^^^^^
Autotools -> CMake
^^^^^^^^^^^^^^^^^^
Let's say we have the following package:
.. code-block:: python
class Bar(AutotoolsPackage):
version("1.2.0", sha256="...")
def configure_args(self):
return ["--enable-shared"]
The package subclasses from :ref:`autotoolspackage`, which has four phases:
#. ``autoreconf`` (does not if a configure script already exists)
#. ``configure`` (runs ``./configure --prefix=...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
In this case, we use the default methods for each phase and only override
``configure_args`` to specify additional flags to pass to ``./configure``.
Starting with version 1.3.0, we want to use CMake to build instead.
:ref:`cmakepackage` has three phases:
#. ``cmake`` (runs ``cmake ...`` by default)
#. ``build`` (runs ``make`` by default)
#. ``install`` (runs ``make install`` by default)
If the only version we need to support is 1.3.0, the package would look as
simple as:
.. code-block:: python
class Bar(CMakePackage):
version("1.3.0", sha256="...")
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
In this case, we use the default methods for each phase and only override
``cmake_args`` to specify additional flags to pass to ``cmake``.
If we wanted to write a single package that supports both versions 1.2.0 and
1.3.0, it would look something like:
.. code-block:: python
class Bar(CMakePackage):
version("1.3.0", sha256="...")
version("1.2.0", sha256="...", deprecated=True)
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
# Remove the following once version 1.2.0 is dropped
def configure_args(self):
return ["--enable-shared"]
@when("@:1.2")
def cmake(self, spec, prefix):
configure("--prefix=" + prefix, *self.configure_args())
There are a few interesting things to note here:
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
on version 1.2.0, it's still there and builds just fine.
* Since CMake and Autotools are so similar, we only need to override the
``cmake`` phase, we can use the default ``build`` and ``install`` phases.
* We override ``cmake`` to run ``./configure`` for older versions.
``configure_args`` remains the same.
* The ``@when`` decorator is used to override these phases only for older
versions. The default methods are used for ``bar@1.3:``.
Once a new Spack release comes out, version 1.2.0 and everything below the
comment can be safely deleted. The result is the same as if we had written a
package for version 1.3.0 from scratch.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Multiple build systems for the same version
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
During the transition from one build system to another, developers often
support multiple build systems at the same time. Spack can only use a single
build system for a single version. To decide which build system to use for a
particular version, take the following things into account:
1. If the developers explicitly state that one build system is preferred over
another, use that one.
2. If one build system is considered "experimental" while another is considered
"stable", use the stable build system.
3. Otherwise, use the newer build system.
The developer preference for which build system to use can change over time as
a newer build system becomes stable/recommended.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Dropping support for old build systems
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When older versions of a package don't support a newer build system, it can be
tempting to simply delete them from a package. This significantly reduces
package complexity and makes the build recipe much easier to maintain. However,
other packages or Spack users may rely on these older versions. The recommended
approach is to first support both build systems (as demonstrated above),
:ref:`deprecate <deprecate>` versions that rely on the old build system, and
remove those versions and any phases that needed to be overridden in the next
Spack release.
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Three or more build systems
^^^^^^^^^^^^^^^^^^^^^^^^^^^
In rare cases, a package may change build systems multiple times. For example,
a package may start with Makefiles, then switch to Autotools, then switch to
CMake. The same logic used above can be extended to any number of build systems.
For example:
.. code-block:: python
class Baz(CMakePackage):
version("1.4.0", sha256="...") # CMake
version("1.3.0", sha256="...") # Autotools
version("1.2.0", sha256="...") # Makefile
def cmake_args(self):
return [self.define("BUILD_SHARED_LIBS", True)]
# Remove the following once version 1.3.0 is dropped
def configure_args(self):
return ["--enable-shared"]
@when("@1.3")
def cmake(self, spec, prefix):
configure("--prefix=" + prefix, *self.configure_args())
# Remove the following once version 1.2.0 is dropped
@when("@:1.2")
def patch(self):
filter_file("CC=", "CC=" + spack_cc, "Makefile")
@when("@:1.2")
def cmake(self, spec, prefix):
pass
@when("@:1.2")
def install(self, spec, prefix):
install_tree(".", prefix)
^^^^^^^^^^^^^^^^^^^
Additional examples
^^^^^^^^^^^^^^^^^^^
When writing new packages, it often helps to see examples of existing packages.
Here is an incomplete list of existing Spack packages that have changed build
systems before:
================ ===================== ================
Package Previous Build System New Build System
================ ===================== ================
amber custom CMake
arpack-ng Autotools CMake
atk Autotools Meson
blast None Autotools
dyninst Autotools CMake
evtgen Autotools CMake
fish Autotools CMake
gdk-pixbuf Autotools Meson
glib Autotools Meson
glog Autotools CMake
gmt Autotools CMake
gtkplus Autotools Meson
hpl Makefile Autotools
interproscan Perl Maven
jasper Autotools CMake
kahip SCons CMake
kokkos Makefile CMake
kokkos-kernels Makefile CMake
leveldb Makefile CMake
libdrm Autotools Meson
libjpeg-turbo Autotools CMake
mesa Autotools Meson
metis None CMake
mpifileutils Autotools CMake
muparser Autotools CMake
mxnet Makefile CMake
nest Autotools CMake
neuron Autotools CMake
nsimd CMake nsconfig
opennurbs Makefile CMake
optional-lite None CMake
plasma Makefile CMake
preseq Makefile Autotools
protobuf Autotools CMake
py-pygobject Autotools Python
singularity Autotools Makefile
span-lite None CMake
ssht Makefile CMake
string-view-lite None CMake
superlu Makefile CMake
superlu-dist Makefile CMake
uncrustify Autotools CMake
================ ===================== ================
Packages that support multiple build systems can be a bit confusing to write.
Don't hesitate to open an issue or draft pull request and ask for advice from
other Spack developers!

View File

@@ -5,9 +5,9 @@
.. _octavepackage:
------
Octave
------
-------------
OctavePackage
-------------
Octave has its own build system for installing packages.
@@ -15,7 +15,7 @@ Octave has its own build system for installing packages.
Phases
^^^^^^
The ``OctaveBuilder`` and ``OctavePackage`` base classes have a single phase:
The ``OctavePackage`` base class has a single phase:
#. ``install`` - install the package

View File

@@ -5,9 +5,9 @@
.. _perlpackage:
----
Perl
----
-----------
PerlPackage
-----------
Much like Octave, Perl has its own language-specific
build system.
@@ -16,7 +16,7 @@ build system.
Phases
^^^^^^
The ``PerlBuilder`` and ``PerlPackage`` base classes come with 3 phases that can be overridden:
The ``PerlPackage`` base class comes with 3 phases that can be overridden:
#. ``configure`` - configure the package
#. ``build`` - build the package

View File

@@ -5,9 +5,9 @@
.. _pythonpackage:
------
Python
------
-------------
PythonPackage
-------------
Python packages and modules have their own special build system. This
documentation covers everything you'll need to know in order to write
@@ -582,19 +582,6 @@ libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed.
Instead of defining the ``import_modules`` explicity, only the subset
of module names to be skipped can be defined by using ``skip_modules``.
If a defined module has submodules, they are skipped as well, e.g.,
in case the ``plotting`` modules should be excluded from the
automatically detected ``import_modules`` ``['nilearn', 'nilearn.surface',
'nilearn.plotting', 'nilearn.plotting.data']`` set:
.. code-block:: python
skip_modules = ['nilearn.plotting']
This will set ``import_modules`` to ``['nilearn', 'nilearn.surface']``
Import tests can be run during the installation using ``spack install
--test=root`` or at any time after the installation using
``spack test run``.
@@ -724,9 +711,10 @@ extends vs. depends_on
This is very similar to the naming dilemma above, with a slight twist.
As mentioned in the :ref:`Packaging Guide <packaging_extensions>`,
``extends`` and ``depends_on`` are very similar, but ``extends`` ensures
that the extension and extendee share the same prefix in views.
This allows the user to import a Python module without
``extends`` and ``depends_on`` are very similar, but ``extends`` adds
the ability to *activate* the package. Activation involves symlinking
everything in the installation prefix of the package to the installation
prefix of Python. This allows the user to import a Python module without
having to add that module to ``PYTHONPATH``.
When deciding between ``extends`` and ``depends_on``, the best rule of
@@ -734,7 +722,7 @@ thumb is to check the installation prefix. If Python libraries are
installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you
should use ``extends``. If Python libraries are installed elsewhere
or the only files that get installed reside in ``<prefix>/bin``, then
don't use ``extends``.
don't use ``extends``, as symlinking the package wouldn't be useful.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack

View File

@@ -5,9 +5,9 @@
.. _qmakepackage:
-----
QMake
-----
------------
QMakePackage
------------
Much like Autotools and CMake, QMake is a build-script generator
designed by the developers of Qt. In its simplest form, Spack's
@@ -29,7 +29,7 @@ variables or edit ``*.pro`` files to get things working properly.
Phases
^^^^^^
The ``QMakeBuilder`` and ``QMakePackage`` base classes come with the following phases:
The ``QMakePackage`` base class comes with the following phases:
#. ``qmake`` - generate Makefiles
#. ``build`` - build the project

View File

@@ -5,9 +5,9 @@
.. _racketpackage:
------
Racket
------
-------------
RacketPackage
-------------
Much like Python, Racket packages and modules have their own special build system.
To learn more about the specifics of Racket package system, please refer to the
@@ -17,7 +17,7 @@ To learn more about the specifics of Racket package system, please refer to the
Phases
^^^^^^
The ``RacketBuilder`` and ``RacketPackage`` base classes provides an ``install`` phase that
The ``RacketPackage`` base class provides an ``install`` phase that
can be overridden, corresponding to the use of:
.. code-block:: console

View File

@@ -5,9 +5,9 @@
.. _rocmpackage:
----
ROCm
----
-----------
ROCmPackage
-----------
The ``ROCmPackage`` is not a build system but a helper package. Like ``CudaPackage``,
it provides standard variants, dependencies, and conflicts to facilitate building
@@ -25,7 +25,7 @@ This package provides the following variants:
* **rocm**
This variant is used to enable/disable building with ``rocm``.
This variant is used to enable/disable building with ``rocm``.
The default is disabled (or ``False``).
* **amdgpu_target**

View File

@@ -5,9 +5,9 @@
.. _rpackage:
--
R
--
--------
RPackage
--------
Like Python, R has its own built-in build system.
@@ -19,7 +19,7 @@ new Spack packages for.
Phases
^^^^^^
The ``RBuilder`` and ``RPackage`` base classes have a single phase:
The ``RPackage`` base class has a single phase:
#. ``install`` - install the package
@@ -193,10 +193,10 @@ Build system dependencies
As an extension of the R ecosystem, your package will obviously depend
on R to build and run. Normally, we would use ``depends_on`` to express
this, but for R packages, we use ``extends``. This implies a special
dependency on R, which is used to set environment variables such as
``R_LIBS`` uniformly. Since every R package needs this, the ``RPackage``
base class contains:
this, but for R packages, we use ``extends``. ``extends`` is similar to
``depends_on``, but adds an additional feature: the ability to "activate"
the package by symlinking it to the R installation directory. Since
every R package needs this, the ``RPackage`` base class contains:
.. code-block:: python

View File

@@ -5,9 +5,9 @@
.. _rubypackage:
----
Ruby
----
-----------
RubyPackage
-----------
Like Perl, Python, and R, Ruby has its own build system for
installing Ruby gems.
@@ -16,7 +16,7 @@ installing Ruby gems.
Phases
^^^^^^
The ``RubyBuilder`` and ``RubyPackage`` base classes provide the following phases that
The ``RubyPackage`` base class provides the following phases that
can be overridden:
#. ``build`` - build everything needed to install

View File

@@ -5,9 +5,9 @@
.. _sconspackage:
-----
SCons
-----
------------
SConsPackage
------------
SCons is a general-purpose build system that does not rely on
Makefiles to build software. SCons is written in Python, and handles
@@ -42,7 +42,7 @@ As previously mentioned, SCons allows developers to add subcommands like
$ scons install
To facilitate this, the ``SConsBuilder`` and ``SconsPackage`` base classes provide the
To facilitate this, the ``SConsPackage`` base class provides the
following phases:
#. ``build`` - build the package

View File

@@ -5,9 +5,9 @@
.. _sippackage:
---
SIP
---
----------
SIPPackage
----------
SIP is a tool that makes it very easy to create Python bindings for C and C++
libraries. It was originally developed to create PyQt, the Python bindings for
@@ -22,7 +22,7 @@ provides support functions to the automatically generated code.
Phases
^^^^^^
The ``SIPBuilder`` and ``SIPPackage`` base classes come with the following phases:
The ``SIPPackage`` base class comes with the following phases:
#. ``configure`` - configure the package
#. ``build`` - build the package

View File

@@ -5,15 +5,15 @@
.. _sourceforgepackage:
-----------
Sourceforge
-----------
------------------
SourceforgePackage
------------------
``SourceforgePackage`` is a
``SourceforgePackage`` is a
`mixin-class <https://en.wikipedia.org/wiki/Mixin>`_. It automatically
sets the URL based on a list of Sourceforge mirrors listed in
`sourceforge_mirror_path`, which defaults to a half dozen known mirrors.
Refer to the package source
Refer to the package source
(`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/sourceforge.py>`__) for the current list of mirrors used by Spack.
@@ -29,7 +29,7 @@ This package provides a method for populating mirror URLs.
It is decorated with `property` so its results are treated as
a package attribute.
Refer to
Refer to
`<https://spack.readthedocs.io/en/latest/packaging_guide.html#mirrors-of-the-main-url>`__
for information on how Spack uses the `urls` attribute during
fetching.

View File

@@ -5,9 +5,9 @@
.. _wafpackage:
---
Waf
---
----------
WafPackage
----------
Like SCons, Waf is a general-purpose build system that does not rely
on Makefiles to build software.
@@ -16,7 +16,7 @@ on Makefiles to build software.
Phases
^^^^^^
The ``WafBuilder`` and ``WafPackage`` base classes come with the following phases:
The ``WafPackage`` base class comes with the following phases:
#. ``configure`` - configure the project
#. ``build`` - build the project

View File

@@ -32,11 +32,14 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
link_name = os.path.abspath("_spack_root")
if not os.path.exists(link_name):
os.symlink(os.path.abspath("../../.."), link_name, target_is_directory=True)
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external"))
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/pytest-fallback"))
if sys.version_info[0] < 3:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib"))
else:
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/yaml/lib3"))
sys.path.append(os.path.abspath("_spack_root/lib/spack/"))
# Add the Spack bin directory to the path so that we can use its output in docs.
@@ -124,7 +127,6 @@ def setup(sphinx):
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_design",
"sphinxcontrib.programoutput",
]
@@ -154,8 +156,8 @@ def setup(sphinx):
master_doc = "index"
# General information about the project.
project = "Spack"
copyright = "2013-2021, Lawrence Livermore National Laboratory."
project = u"Spack"
copyright = u"2013-2021, Lawrence Livermore National Laboratory."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -199,13 +201,9 @@ def setup(sphinx):
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
("py:class", "six.moves.urllib.parse.ParseResult"),
# Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"),
("py:class", "spack.build_systems._checks.BaseBuilder"),
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.VersionBase"),
]
# The reST default role (used for this markup: `text`) to use for all documents.
@@ -344,7 +342,7 @@ class SpackStyle(DefaultStyle):
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Spack.tex", "Spack Documentation", "Todd Gamblin", "manual"),
("index", "Spack.tex", u"Spack Documentation", u"Todd Gamblin", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -372,7 +370,7 @@ class SpackStyle(DefaultStyle):
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "spack", "Spack Documentation", ["Todd Gamblin"], 1)]
man_pages = [("index", "spack", u"Spack Documentation", [u"Todd Gamblin"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
@@ -387,8 +385,8 @@ class SpackStyle(DefaultStyle):
(
"index",
"Spack",
"Spack Documentation",
"Todd Gamblin",
u"Spack Documentation",
u"Todd Gamblin",
"Spack",
"One line description of project.",
"Miscellaneous",

View File

@@ -19,9 +19,9 @@ see the default settings by looking at
These settings can be overridden in ``etc/spack/config.yaml`` or
``~/.spack/config.yaml``. See :ref:`configuration-scopes` for details.
---------------------
``install_tree:root``
---------------------
--------------------
``install_tree``
--------------------
The location where Spack will install packages and their dependencies.
Default is ``$spack/opt/spack``.
@@ -224,9 +224,9 @@ them). Please note that we currently disable ccache's ``hash_dir``
feature to avoid an issue with the stage directory (see
https://github.com/LLNL/spack/pull/3761#issuecomment-294352232).
-----------------------
``shared_linking:type``
-----------------------
------------------
``shared_linking``
------------------
Control whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries
so that they can find their dependencies. Has no effect on macOS.
@@ -245,52 +245,6 @@ the loading object.
DO NOT MIX the two options within the same install tree.
-----------------------
``shared_linking:bind``
-----------------------
This is an *experimental option* that controls whether Spack embeds absolute paths
to needed shared libraries in ELF executables and shared libraries on Linux. Setting
this option to ``true`` has two advantages:
1. **Improved startup time**: when running an executable, the dynamic loader does not
have to perform a search for needed libraries, they are loaded directly.
2. **Reliability**: libraries loaded at runtime are those that were linked to. This
minimizes the risk of accidentally picking up system libraries.
In the current implementation, Spack sets the soname (shared object name) of
libraries to their install path upon installation. This has two implications:
1. binding does not apply to libraries installed *before* the option was enabled;
2. toggling the option off does *not* prevent binding of libraries installed when
the option was still enabled.
It is also worth noting that:
1. Applications relying on ``dlopen(3)`` will continue to work, even when they open
a library by name. This is because ``RPATH``\s are retained in binaries also
when ``bind`` is enabled.
2. ``LD_PRELOAD`` continues to work for the typical use case of overriding
symbols, such as preloading a library with a more efficient ``malloc``.
However, the preloaded library will be loaded *additionally to*, instead of
*in place of* another library with the same name --- this can be problematic
in very rare cases where libraries rely on a particular ``init`` or ``fini``
order.
.. note::
In some cases packages provide *stub libraries* that only contain an interface
for linking, but lack an implementation for runtime. An example of this is
``libcuda.so``, provided by the CUDA toolkit; it can be used to link against,
but the library needed at runtime is the one installed with the CUDA driver.
To avoid binding those libraries, they can be marked as non-bindable using
a property in the package:
.. code-block:: python
class Example(Package):
non_bindable_shared_objects = ["libinterface.so"]
----------------------
``terminal_title``
----------------------

View File

@@ -394,7 +394,7 @@ are indicated at the start of the path with ``~`` or ``~user``.
Spack-specific variables
^^^^^^^^^^^^^^^^^^^^^^^^
Spack understands over a dozen special variables. These are:
Spack understands several special variables. These are:
* ``$env``: name of the currently active :ref:`environment <environments>`
* ``$spack``: path to the prefix of this Spack installation
@@ -405,19 +405,6 @@ Spack understands over a dozen special variables. These are:
* ``$user``: name of the current user
* ``$user_cache_path``: user cache directory (``~/.spack`` unless
:ref:`overridden <local-config-overrides>`)
* ``$architecture``: the architecture triple of the current host, as
detected by Spack.
* ``$arch``: alias for ``$architecture``.
* ``$platform``: the platform of the current host, as detected by Spack.
* ``$operating_system``: the operating system of the current host, as
detected by the ``distro`` python module.
* ``$os``: alias for ``$operating_system``.
* ``$target``: the ISA target for the current host, as detected by
ArchSpec. E.g. ``skylake`` or ``neoverse-n1``.
* ``$target_family``. The target family for the current host, as
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
* ``$date``: the current date in the format YYYY-MM-DD
Note that, as with shell variables, you can write these as ``$varname``
or with braces to distinguish the variable from surrounding characters:
@@ -562,7 +549,7 @@ down the problem:
You can see above that the ``build_jobs`` and ``debug`` settings are
built in and are not overridden by a configuration file. The
``verify_ssl`` setting comes from the ``--insecure`` option on the
``verify_ssl`` setting comes from the ``--insceure`` option on the
command line. ``dirty`` and ``install_tree`` come from the custom
scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration
options come from the default configuration files that ship with Spack.

View File

@@ -253,6 +253,27 @@ to update them.
multiple runs of ``spack style`` just to re-compute line numbers and
makes it much easier to fix errors directly off of the CI output.
.. warning::
Flake8 and ``pep8-naming`` require a number of dependencies in order
to run. If you installed ``py-flake8`` and ``py-pep8-naming``, the
easiest way to ensure the right packages are on your ``PYTHONPATH`` is
to run::
spack activate py-flake8
spack activate pep8-naming
so that all of the dependencies are symlinked to a central
location. If you see an error message like:
.. code-block:: console
Traceback (most recent call last):
File: "/usr/bin/flake8", line 5, in <module>
from pkg_resources import load_entry_point
ImportError: No module named pkg_resources
that means Flake8 couldn't find setuptools in your ``PYTHONPATH``.
^^^^^^^^^^^^^^^^^^^
Documentation Tests
@@ -288,9 +309,13 @@ All of these can be installed with Spack, e.g.
.. code-block:: console
$ spack load py-sphinx py-sphinx-rtd-theme py-sphinxcontrib-programoutput
$ spack activate py-sphinx
$ spack activate py-sphinx-rtd-theme
$ spack activate py-sphinxcontrib-programoutput
so that all of the dependencies are added to PYTHONPATH. If you see an error message
so that all of the dependencies are symlinked into that Python's
tree. Alternatively, you could arrange for their library
directories to be added to PYTHONPATH. If you see an error message
like:
.. code-block:: console

View File

@@ -149,9 +149,11 @@ grouped by functionality.
Package-related modules
^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.package_base`
Contains the :class:`~spack.package_base.PackageBase` class, which
is the superclass for all packages in Spack.
:mod:`spack.package`
Contains the :class:`~spack.package_base.Package` class, which
is the superclass for all packages in Spack. Methods on ``Package``
implement all phases of the :ref:`package lifecycle
<package-lifecycle>` and manage the build process.
:mod:`spack.util.naming`
Contains functions for mapping between Spack package names,
@@ -175,11 +177,14 @@ Spec-related modules
^^^^^^^^^^^^^^^^^^^^
:mod:`spack.spec`
Contains :class:`~spack.spec.Spec`. Also implements most of the logic for concretization
Contains :class:`~spack.spec.Spec` and :class:`~spack.spec.SpecParser`.
Also implements most of the logic for normalization and concretization
of specs.
:mod:`spack.parser`
Contains :class:`~spack.parser.SpecParser` and functions related to parsing specs.
:mod:`spack.parse`
Contains some base classes for implementing simple recursive descent
parsers: :class:`~spack.parse.Parser` and :class:`~spack.parse.Lexer`.
Used by :class:`~spack.spec.SpecParser`.
:mod:`spack.concretize`
Contains :class:`~spack.concretize.Concretizer` implementation,

View File

@@ -233,8 +233,8 @@ packages will be listed as roots of the Environment.
All of the Spack commands that act on the list of installed specs are
Environment-sensitive in this way, including ``install``,
``uninstall``, ``find``, ``extensions``, and more. In the
:ref:`environment-configuration` section we will discuss
``uninstall``, ``activate``, ``deactivate``, ``find``, ``extensions``,
and more. In the :ref:`environment-configuration` section we will discuss
Environment-sensitive commands further.
^^^^^^^^^^^^^^^^^^^^^
@@ -478,21 +478,14 @@ them to the Environment.
spack:
include:
- relative/path/to/config.yaml
- https://github.com/path/to/raw/config/compilers.yaml
- /absolute/path/to/packages.yaml
Environments can include files or URLs. File paths can be relative or
absolute. URLs include the path to the text for individual files or
can be the path to a directory containing configuration files.
^^^^^^^^^^^^^^^^^^^^^^^^
Configuration precedence
^^^^^^^^^^^^^^^^^^^^^^^^
Inline configurations take precedence over included configurations, so
you don't have to change shared configuration files to make small changes
to an individual environment. Included configurations listed earlier will
have higher precedence, as the included configs are applied in reverse order.
Environments can include files with either relative or absolute
paths. Inline configurations take precedence over included
configurations, so you don't have to change shared configuration files
to make small changes to an individual Environment. Included configs
listed earlier will have higher precedence, as the included configs are
applied in reverse order.
-------------------------------
Manually Editing the Specs List
@@ -519,49 +512,8 @@ available from the yaml file.
^^^^^^^^^^^^^^^^^^^
Spec concretization
^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under
any environment is determined by the ``concretizer:unify`` configuration option.
The *default* mode is to unify all specs:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This means that any package in the environment corresponds to a single concrete spec. In
the above example, when ``hdf5`` depends down the line of ``zlib``, it is required to
take ``zlib@1.2.8`` instead of a newer version. This mode of concretization is
particularly useful when environment views are used: if every package occurs in
only one flavor, it is usually possible to merge all install directories into a view.
A downside of unified concretization is that it can be overly strict. For example, a
concretization error would happen when both ``hdf5+mpi`` and ``hdf5~mpi`` are specified
in an environment.
The second mode is to *unify when possible*: this makes concretization of root specs
more independendent. Instead of requiring reuse of dependencies across different root
specs, it is only maximized:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
This means that both ``hdf5`` installations will use ``zlib@1.2.8`` as a dependency even
if newer versions of that library are available.
The third mode of operation is to concretize root specs entirely independently by
disabling unified concretization:
An environment can be concretized in three different modes and the behavior active under any environment
is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
.. code-block:: yaml
@@ -573,11 +525,45 @@ disabling unified concretization:
concretizer:
unify: false
In this example ``hdf5`` is concretized separately, and does not consider ``zlib@1.2.8``
as a constraint or preference. Instead, it will take the latest possible version.
This mode of operation permits to deploy a full software stack where multiple configurations of the same package
need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
The last two concretization options are typically useful for system administrators and
user support groups providing a large software stack for their HPC center.
If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note::
@@ -588,10 +574,10 @@ user support groups providing a large software stack for their HPC center.
.. admonition:: Re-concretization of user specs
When using *unified* concretization (when possible), the entire set of specs will be
When concretizing specs *together* or *together where possible* the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead unified concretization is
disabled, only the new specs will be concretized after any addition.
the environment remains consistent / minimal. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^
Spec Matrices
@@ -630,6 +616,31 @@ The following two Environment manifests are identical:
Spec matrices can be used to install swaths of software across various
toolchains.
The concretization logic for spec matrices differs slightly from the
rest of Spack. If a variant or dependency constraint from a matrix is
invalid, Spack will reject the constraint and try again without
it. For example, the following two Environment manifests will produce
the same specs:
.. code-block:: yaml
spack:
specs:
- matrix:
- [zlib, libelf, hdf5+mpi]
- [^mvapich2@2.2, ^openmpi@3.1.0]
spack:
specs:
- zlib
- libelf
- hdf5+mpi ^mvapich2@2.2
- hdf5+mpi ^openmpi@3.1.0
This allows one to create toolchains out of combinations of
constraints and apply them somewhat indiscriminately to packages,
without regard for the applicability of the constraint.
^^^^^^^^^^^^^^^^^^^^
Spec List References
^^^^^^^^^^^^^^^^^^^^
@@ -993,7 +1004,7 @@ A typical workflow is as follows:
spack env create -d .
spack -e . add perl
spack -e . concretize
spack -e . env depfile -o Makefile
spack -e . env depfile > Makefile
make -j64
This generates a ``Makefile`` from a concretized environment in the
@@ -1006,6 +1017,7 @@ load, even when packages are built in parallel.
By default the following phony convenience targets are available:
- ``make all``: installs the environment (default target);
- ``make fetch-all``: only fetch sources of all packages;
- ``make clean``: cleans files used by make, but does not uninstall packages.
.. tip::
@@ -1015,17 +1027,8 @@ By default the following phony convenience targets are available:
printed orderly per package install. To get synchronized output with colors,
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specifying dependencies on generated ``make`` targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An interesting question is how to include generated ``Makefile``\s in your own
``Makefile``\s. This comes up when you want to install an environment that provides
executables required in a command for a make target of your own.
The example below shows how to accomplish this: the ``env`` target specifies
the generated ``spack/env`` target as a prerequisite, meaning that the environment
gets installed and is available for use in the ``env`` target.
The following advanced example shows how generated targets can be used in a
``Makefile``:
.. code:: Makefile
@@ -1051,10 +1054,11 @@ gets installed and is available for use in the ``env`` target.
include env.mk
endif
This works as follows: when ``make`` is invoked, it first "remakes" the missing
include ``env.mk`` as there is a target for it. This triggers concretization of
the environment and makes spack output ``env.mk``. At that point the
generated target ``spack/env`` becomes available through ``include env.mk``.
When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
from its rule, which triggers concretization. When done, the generated target
``spack/env`` is available. In the above example, the ``env`` target uses this generated
target as a prerequisite, meaning that it can make use of the installed packages in
its commands.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional.
@@ -1065,28 +1069,3 @@ the include is conditional.
the ``--make-target-prefix`` flag and use the non-phony target
``<target-prefix>/env`` as prerequisite, instead of the phony target
``<target-prefix>/all``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building a subset of the environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The generated ``Makefile``\s contain install targets for each spec, identified
by ``<name>-<version>-<hash>``. This allows you to install only a subset of the
packages in the environment. When packages are unique in the environment, it's
enough to know the name and let tab-completion fill out the version and hash.
The following phony targets are available: ``install/<spec>`` to install the
spec with its dependencies, and ``install-deps/<spec>`` to *only* install
its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently:
.. code:: console
$ spack env depfile -o Makefile
# Install dependencies in parallel, only show a log on error.
$ make -j16 install-deps/python-3.11.0-<hash> SPACK_INSTALL_FLAGS=--show-log-on-error
# Install the root spec with verbose output.
$ make -j16 install/python-3.11.0-<hash> SPACK_INSTALL_FLAGS=--verbose

View File

@@ -98,42 +98,40 @@ For example, this command:
.. code-block:: console
$ spack create https://ftp.osuosl.org/pub/blfs/conglomeration/libelf/libelf-0.8.13.tar.gz
$ spack create http://www.mr511.de/software/libelf-0.8.13.tar.gz
creates a simple python file:
.. code-block:: python
from spack.package import *
from spack import *
class Libelf(AutotoolsPackage):
class Libelf(Package):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com"
url = "https://ftp.osuosl.org/pub/blfs/conglomeration/libelf/libelf-0.8.13.tar.gz"
homepage = "http://www.example.com"
url = "http://www.mr511.de/software/libelf-0.8.13.tar.gz"
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers = ["github_user1", "github_user2"]
version("0.8.13", sha256="591a9b4ec81c1f2042a97aa60564e0cb79d041c52faa7416acb38bc95bd2c76d")
version('0.8.13', '4136d7b4c04df68b686570afa26988ac')
# FIXME: Add dependencies if required.
# depends_on("foo")
# depends_on('foo')
def configure_args(self):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args
def install(self, spec, prefix):
# FIXME: Modify the configure line to suit your build system here.
configure('--prefix={0}'.format(prefix))
# FIXME: Add logic to build and install here.
make()
make('install')
It doesn't take much python coding to get from there to a working
package:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py
:lines: 5-
:lines: 6-
Spack also provides wrapper functions around common commands like
``configure``, ``make``, and ``cmake`` to make writing packages

View File

@@ -21,39 +21,10 @@ be present on the machine where Spack is run:
:header-rows: 1
These requirements can be easily installed on most modern Linux systems;
on macOS, the Command Line Tools package is required, and a full XCode suite
may be necessary for some packages such as Qt and apple-gl. Spack is designed
to run on HPC platforms like Cray. Not all packages should be expected
to work on all platforms.
A build matrix showing which packages are working on which systems is shown below.
.. tab-set::
.. tab-item:: Debian/Ubuntu
.. code-block:: console
apt update
apt install build-essential ca-certificates coreutils curl environment-modules gfortran git gpg lsb-release python3 python3-distutils python3-venv unzip zip
.. tab-item:: RHEL
.. code-block:: console
yum update -y
yum install -y epel-release
yum update -y
yum --enablerepo epel groupinstall -y "Development Tools"
yum --enablerepo epel install -y curl findutils gcc-c++ gcc gcc-gfortran git gnupg2 hostname iproute redhat-lsb-core make patch python3 python3-pip python3-setuptools unzip
python3 -m pip install boto3
.. tab-item:: macOS Brew
.. code-block:: console
brew update
brew install curl gcc git gnupg zip
on macOS, XCode is required. Spack is designed to run on HPC
platforms like Cray. Not all packages should be expected
to work on all platforms. A build matrix showing which packages are
working on which systems is planned but not yet available.
------------
Installation
@@ -125,41 +96,88 @@ Spack provides two ways of bootstrapping ``clingo``: from pre-built binaries
(default), or from sources. The fastest way to get started is to bootstrap from
pre-built binaries.
The first time you concretize a spec, Spack will bootstrap automatically:
.. note::
When bootstrapping from pre-built binaries, Spack currently requires
``patchelf`` on Linux and ``otool`` on macOS. If ``patchelf`` is not in the
``PATH``, Spack will build it from sources, and a C++ compiler is required.
The first time you concretize a spec, Spack will bootstrap in the background:
.. code-block:: console
$ spack spec zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-ba5ijauisd3uuixtmactc36vps7yfsrl.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64/gcc-10.2.1/clingo-bootstrap-spack/linux-centos7-x86_64-gcc-10.2.1-clingo-bootstrap-spack-ba5ijauisd3uuixtmactc36vps7yfsrl.spack
==> Installing "clingo-bootstrap@spack%gcc@10.2.1~docs~ipo+python+static_libstdcpp build_type=Release arch=linux-centos7-x86_64" from a buildcache
==> Bootstrapping patchelf from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.16.1-p72zyan5wrzuabtmzq7isa5mzyh6ahdp.spec.json
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.4/build_cache/linux-centos7-x86_64/gcc-10.2.1/patchelf-0.16.1/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.16.1-p72zyan5wrzuabtmzq7isa5mzyh6ahdp.spack
==> Installing "patchelf@0.16.1%gcc@10.2.1 ldflags="-static-libstdc++ -static-libgcc" build_system=autotools arch=linux-centos7-x86_64" from a buildcache
$ time spack spec zlib
Input spec
--------------------------------
zlib
Concretized
--------------------------------
zlib@1.2.13%gcc@9.4.0+optimize+pic+shared build_system=makefile arch=linux-ubuntu20.04-icelake
zlib@1.2.11%gcc@7.5.0+optimize+pic+shared arch=linux-ubuntu18.04-zen
If for security concerns you cannot bootstrap ``clingo`` from pre-built
binaries, you have to disable fetching the binaries we generated with Github Actions.
real 0m20.023s
user 0m18.351s
sys 0m0.784s
After this command you'll see that ``clingo`` has been installed for Spack's own use:
.. code-block:: console
$ spack bootstrap disable github-actions-v0.4
==> "github-actions-v0.4" is now disabled and will not be used for bootstrapping
$ spack bootstrap disable github-actions-v0.3
==> "github-actions-v0.3" is now disabled and will not be used for bootstrapping
$ spack find -b
==> Showing internal bootstrap store at "/root/.spack/bootstrap/store"
==> 3 installed packages
-- linux-rhel5-x86_64 / gcc@9.3.0 -------------------------------
clingo-bootstrap@spack python@3.6
-- linux-ubuntu18.04-zen / gcc@7.5.0 ----------------------------
patchelf@0.13
Subsequent calls to the concretizer will then be much faster:
.. code-block:: console
$ time spack spec zlib
[ ... ]
real 0m0.490s
user 0m0.431s
sys 0m0.041s
If for security concerns you cannot bootstrap ``clingo`` from pre-built
binaries, you have to mark this bootstrapping method as untrusted. This makes
Spack fall back to bootstrapping from sources:
.. code-block:: console
$ spack bootstrap untrust github-actions-v0.2
==> "github-actions-v0.2" is now untrusted and will not be used for bootstrapping
You can verify that the new settings are effective with:
.. command-output:: spack bootstrap list
.. code-block:: console
$ spack bootstrap list
Name: github-actions-v0.2 UNTRUSTED
Type: buildcache
Info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
Description:
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
[ ... ]
Name: spack-install TRUSTED
Type: install
Description:
Specs built from sources by Spack. May take a long time.
.. note::
@@ -189,7 +207,9 @@ under the ``${HOME}/.spack`` directory. The software installed there can be quer
.. code-block:: console
$ spack -b find
$ spack find --bootstrap
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
==> 3 installed packages
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
@@ -198,7 +218,7 @@ In case it's needed the bootstrap store can also be cleaned with:
.. code-block:: console
$ spack clean -b
==> Removing bootstrapped software and configuration in "/home/spack/.spack/bootstrap"
==> Removing software in "/home/spack/.spack/bootstrap/store"
^^^^^^^^^^^^^^^^^^
Check Installation
@@ -1705,11 +1725,9 @@ dependencies or incompatible build tools like autoconf. Here are several
packages known to work on Windows:
* abseil-cpp
* bzip2
* clingo
* cpuinfo
* cmake
* hdf5
* glm
* nasm
* netlib-lapack (requires Intel Fortran)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 658 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 449 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -56,6 +56,7 @@ or refer to the full manual below.
basic_usage
Tutorial: Spack 101 <https://spack-tutorial.readthedocs.io>
replace_conda_homebrew
known_issues
.. toctree::
:maxdepth: 2

View File

@@ -0,0 +1,40 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
============
Known Issues
============
This is a list of known issues in Spack. It provides ways of getting around these
problems if you encounter them.
------------------------------------------------
Spack does not seem to respect ``packages.yaml``
------------------------------------------------
.. note::
This issue is **resolved** as of v0.19.0.dev0 commit
`8281a0c5feabfc4fe180846d6fe95cfe53420bc5`, through the introduction of package
requirements. See :ref:`package-requirements`.
A common problem in Spack v0.18.0 up to v0.19.0.dev0 is that package, compiler and target
preferences specified in ``packages.yaml`` do not seem to be respected. Spack picks the
"wrong" compilers and their versions, package versions and variants, and
micro-architectures.
This is however not a bug. In order to reduce the number of builds of the same
packages, the concretizer values reuse of installed packages higher than preferences
set in ``packages.yaml``. Note that ``packages.yaml`` specifies only preferences, not
hard constraints.
There are multiple workarounds:
1. Disable reuse during concretization: ``spack install --fresh <spec>`` when installing
from the command line, or ``spack concretize --fresh --force`` when using
environments.
2. Turn preferences into constrains, by moving them to the input spec. For example,
use ``spack spec zlib%gcc@12`` when you want to force GCC 12 even if ``zlib`` was
already installed with GCC 10.

View File

@@ -77,7 +77,7 @@ installation of a package.
Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to
will not regenerate modulefiles for the package. This may to
inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file
or changing scopes or environments.

View File

@@ -34,155 +34,24 @@ ubiquitous in the scientific software community. Second, it's a modern
language and has many powerful features to help make package writing
easy.
.. _installation_procedure:
--------------------------------------
Overview of the installation procedure
--------------------------------------
Whenever Spack installs software, it goes through a series of predefined steps:
.. image:: images/installation_pipeline.png
:scale: 60 %
:align: center
All these steps are influenced by the metadata in each ``package.py`` and
by the current Spack configuration.
Since build systems are different from one another, the execution of the
last block in the figure is further expanded in a build system specific way.
An example for ``CMake`` is, for instance:
.. image:: images/builder_phases.png
:align: center
:scale: 60 %
The predefined steps for each build system are called "phases".
In general, the name and order in which the phases will be executed can be
obtained by either reading the API docs at :py:mod:`~.spack.build_systems`, or
using the ``spack info`` command:
.. code-block:: console
:emphasize-lines: 13,14
$ spack info --phases m4
AutotoolsPackage: m4
Homepage: https://www.gnu.org/software/m4/m4.html
Safe versions:
1.4.17 ftp://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.gz
Variants:
Name Default Description
sigsegv on Build the libsigsegv dependency
Installation Phases:
autoreconf configure build install
Build Dependencies:
libsigsegv
...
An extensive list of available build systems and phases is provided in :ref:`installation_process`.
------------------------
Writing a package recipe
------------------------
Since v0.19, Spack supports two ways of writing a package recipe. The most commonly used is to encode both the metadata
(directives, etc.) and the build behavior in a single class, like shown in the following example:
.. code-block:: python
class Openjpeg(CMakePackage):
"""OpenJPEG is an open-source JPEG 2000 codec written in C language"""
homepage = "https://github.com/uclouvain/openjpeg"
url = "https://github.com/uclouvain/openjpeg/archive/v2.3.1.tar.gz"
version("2.4.0", sha256="8702ba68b442657f11aaeb2b338443ca8d5fb95b0d845757968a7be31ef7f16d")
variant("codec", default=False, description="Build the CODEC executables")
depends_on("libpng", when="+codec")
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
def cmake_args(self):
args = [
self.define_from_variant("BUILD_CODEC", "codec"),
self.define("BUILD_MJ2", False),
self.define("BUILD_THIRDPARTY", False),
]
return args
A package encoded with a single class is backward compatible with versions of Spack
lower than v0.19, and so are custom repositories containing only recipes of this kind.
The downside is that *this format doesn't allow packagers to use more than one build system in a single recipe*.
To do that, we have to resort to the second way Spack has of writing packages, which involves writing a
builder class explicitly. Using the same example as above, this reads:
.. code-block:: python
class Openjpeg(CMakePackage):
"""OpenJPEG is an open-source JPEG 2000 codec written in C language"""
homepage = "https://github.com/uclouvain/openjpeg"
url = "https://github.com/uclouvain/openjpeg/archive/v2.3.1.tar.gz"
version("2.4.0", sha256="8702ba68b442657f11aaeb2b338443ca8d5fb95b0d845757968a7be31ef7f16d")
variant("codec", default=False, description="Build the CODEC executables")
depends_on("libpng", when="+codec")
def url_for_version(self, version):
if version >= Version("2.1.1"):
return super(Openjpeg, self).url_for_version(version)
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
args = [
self.define_from_variant("BUILD_CODEC", "codec"),
self.define("BUILD_MJ2", False),
self.define("BUILD_THIRDPARTY", False),
]
return args
This way of writing packages allows extending the recipe to support multiple build systems,
see :ref:`multiple_build_systems` for more details. The downside is that recipes of this kind
are only understood by Spack since v0.19+. More information on the internal architecture of
Spack can be found at :ref:`package_class_structure`.
.. note::
If a builder is implemented in ``package.py``, all build-specific methods must be moved
to the builder. This means that if you have a package like
.. code-block:: python
class Foo(CmakePackage):
def cmake_args(self):
...
and you add a builder to the ``package.py``, you must move ``cmake_args`` to the builder.
---------------------------
Creating & editing packages
---------------------------
.. _cmd-spack-create:
---------------------
Creating new packages
---------------------
^^^^^^^^^^^^^^^^
``spack create``
^^^^^^^^^^^^^^^^
To help creating a new package Spack provides a command that generates a ``package.py``
file in an existing repository, with a boilerplate package template. Here's an example:
The ``spack create`` command creates a directory with the package name and
generates a ``package.py`` file with a boilerplate package template. If given
a URL pointing to a tarball or other software archive, ``spack create`` is
smart enough to determine basic information about the package, including its name
and build system. In most cases, ``spack create`` plus a few modifications is
all you need to get a package working.
Here's an example:
.. code-block:: console
@@ -218,6 +87,23 @@ You do not *have* to download all of the versions up front. You can
always choose to download just one tarball initially, and run
:ref:`cmd-spack-checksum` later if you need more versions.
Let's say you download 3 tarballs:
.. code-block:: console
How many would you like to checksum? (default is 1, q to abort) 3
==> Downloading...
==> Fetching https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2
######################################################################## 100.0%
==> Fetching https://gmplib.org/download/gmp/gmp-6.1.1.tar.bz2
######################################################################## 100.0%
==> Fetching https://gmplib.org/download/gmp/gmp-6.1.0.tar.bz2
######################################################################## 100.0%
==> Checksummed 3 versions of gmp:
==> This package looks like it uses the autotools build system
==> Created template for gmp package
==> Created package file: /Users/Adam/spack/var/spack/repos/builtin/packages/gmp/package.py
Spack automatically creates a directory in the appropriate repository,
generates a boilerplate template for your package, and opens up the new
``package.py`` in your favorite ``$EDITOR``:
@@ -225,14 +111,6 @@ generates a boilerplate template for your package, and opens up the new
.. code-block:: python
:linenos:
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
@@ -245,8 +123,9 @@ generates a boilerplate template for your package, and opens up the new
# spack edit gmp
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
import spack.build_systems.autotools
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack.package import *
@@ -254,17 +133,19 @@ generates a boilerplate template for your package, and opens up the new
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com"
url = "https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2"
homepage = "http://www.example.com"
url = "https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2"
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers = ["github_user1", "github_user2"]
# maintainers = ['github_user1', 'github_user2']
version("6.2.1", sha256="eae9326beb4158c386e39a356818031bd28f3124cf915f8c5b1dc4c7a36b4d7c")
version('6.1.2', '8ddbb26dc3bd4e2302984debba1406a5')
version('6.1.1', '4c175f86e11eb32d8bf9872ca3a8e11d')
version('6.1.0', '86ee6e54ebfc4a90b643a65e402c4048')
# FIXME: Add dependencies if required.
# depends_on("foo")
# depends_on('foo')
def configure_args(self):
# FIXME: Add arguments other than --prefix
@@ -273,16 +154,15 @@ generates a boilerplate template for your package, and opens up the new
return args
The tedious stuff (creating the class, checksumming archives) has been
done for you. Spack correctly detected that ``gmp`` uses the ``autotools``
build system, so it created a new ``Gmp`` package that subclasses the
``AutotoolsPackage`` base class.
The default installation procedure for a package subclassing the ``AutotoolsPackage``
is to go through the typical process of:
done for you. You'll notice that ``spack create`` correctly detected that
``gmp`` uses the Autotools build system. It created a new ``Gmp`` package
that subclasses the ``AutotoolsPackage`` base class. This base class
provides basic installation methods common to all Autotools packages:
.. code-block:: bash
./configure --prefix=/path/to/installation/directory
make
make check
make install
@@ -329,14 +209,12 @@ The rest of the tasks you need to do are as follows:
Your new package may require specific flags during ``configure``.
These can be added via ``configure_args``. Specifics will differ
depending on the package and its build system.
:ref:`installation_process` is
:ref:`Implementing the install method <install-method>` is
covered in detail later.
^^^^^^^^^^^^^^^^^^^^^^^^^
Non-downloadable software
^^^^^^^^^^^^^^^^^^^^^^^^^
If your software cannot be downloaded from a URL you can still create a boilerplate
Passing a URL to ``spack create`` is a convenient and easy way to get
a basic package template, but what if your software is licensed and
cannot be downloaded from a URL? You can still create a boilerplate
``package.py`` by telling ``spack create`` what name you want to use:
.. code-block:: console
@@ -345,23 +223,40 @@ If your software cannot be downloaded from a URL you can still create a boilerpl
This will create a simple ``intel`` package with an ``install()``
method that you can craft to install your package.
Likewise, you can force the build system to be used with ``--template`` and,
in case it's needed, you can overwrite a package already in the repository
with ``--force``:
What if ``spack create <url>`` guessed the wrong name or build system?
For example, if your package uses the Autotools build system but does
not come with a ``configure`` script, Spack won't realize it uses
Autotools. You can overwrite the old package with ``--force`` and specify
a name with ``--name`` or a build system template to use with ``--template``:
.. code-block:: console
$ spack create --name gmp https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2
$ spack create --force --template autotools https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2
.. note::
If you are creating a package that uses the Autotools build system
but does not come with a ``configure`` script, you'll need to add an
``autoreconf`` method to your package that explains how to generate
the ``configure`` script. You may also need the following dependencies:
.. code-block:: python
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
A complete list of available build system templates can be found by running
``spack create --help``.
.. _cmd-spack-edit:
-------------------------
Editing existing packages
-------------------------
^^^^^^^^^^^^^^
``spack edit``
^^^^^^^^^^^^^^
One of the easiest ways to learn how to write packages is to look at
existing ones. You can edit a package file by name with the ``spack
@@ -371,15 +266,10 @@ edit`` command:
$ spack edit gmp
If you used ``spack create`` to create a package, you can get back to
it later with ``spack edit``. For instance, the ``gmp`` package actually
lives in:
.. code-block:: console
$ spack location -p gmp
${SPACK_ROOT}/var/spack/repos/builtin/packages/gmp/package.py
So, if you used ``spack create`` to create a package, then saved and
closed the resulting file, you can get back to it with ``spack edit``.
The ``gmp`` package actually lives in
``$SPACK_ROOT/var/spack/repos/builtin/packages/gmp/package.py``,
but ``spack edit`` provides a much simpler shortcut and saves you the
trouble of typing the full path.
@@ -2532,7 +2422,7 @@ Spack provides a mechanism for dependencies to influence the
environment of their dependents by overriding the
:meth:`setup_dependent_run_environment <spack.package_base.PackageBase.setup_dependent_run_environment>`
or the
:meth:`setup_dependent_build_environment <spack.builder.Builder.setup_dependent_build_environment>`
:meth:`setup_dependent_build_environment <spack.package_base.PackageBase.setup_dependent_build_environment>`
methods.
The Qt package, for instance, uses this call:
@@ -2634,12 +2524,9 @@ extendable package:
extends('python')
...
This accomplishes a few things. Firstly, the Python package can set special
variables such as ``PYTHONPATH`` for all extensions when the run or build
environment is set up. Secondly, filesystem views can ensure that extensions
are put in the same prefix as their extendee. This ensures that Python in
a view can always locate its Python packages, even without environment
variables set.
Now, the ``py-numpy`` package can be used as an argument to ``spack
activate``. When it is activated, all the files in its prefix will be
symbolically linked into the prefix of the python package.
A package can only extend one other package at a time. To support packages
that may extend one of a list of other packages, Spack supports multiple
@@ -2687,8 +2574,9 @@ variant(s) are selected. This may be accomplished with conditional
...
Sometimes, certain files in one package will conflict with those in
another, which means they cannot both be used in a view at the
same time. In this case, you can tell Spack to ignore those files:
another, which means they cannot both be activated (symlinked) at the
same time. In this case, you can tell Spack to ignore those files
when it does the activation:
.. code-block:: python
@@ -2700,7 +2588,7 @@ same time. In this case, you can tell Spack to ignore those files:
...
The code above will prevent everything in the ``$prefix/bin/`` directory
from being linked in a view.
from being linked in at activation time.
.. note::
@@ -2724,6 +2612,67 @@ extensions; as a consequence python extension packages (those inheriting from
``PythonPackage``) likewise override ``add_files_to_view`` in order to rewrite
shebang lines which point to the Python interpreter.
^^^^^^^^^^^^^^^^^^^^^^^^^
Activation & deactivation
^^^^^^^^^^^^^^^^^^^^^^^^^
Adding an extension to a view is referred to as an activation. If the view is
maintained in the Spack installation prefix of the extendee this is called a
global activation. Activations may involve updating some centralized state
that is maintained by the extendee package, so there can be additional work
for adding extensions compared with non-extension packages.
Spack's ``Package`` class has default ``activate`` and ``deactivate``
implementations that handle symbolically linking extensions' prefixes
into a specified view. Extendable packages can override these methods
to add custom activate/deactivate logic of their own. For example,
the ``activate`` and ``deactivate`` methods in the Python class handle
symbolic linking of extensions, but they also handle details surrounding
Python's ``.pth`` files, and other aspects of Python packaging.
Spack's extensions mechanism is designed to be extensible, so that
other packages (like Ruby, R, Perl, etc.) can provide their own
custom extension management logic, as they may not handle modules the
same way that Python does.
Let's look at Python's activate function:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
:pyobject: Python.activate
:linenos:
This function is called on the *extendee* (Python). It first calls
``activate`` in the superclass, which handles symlinking the
extension package's prefix into the specified view. It then does
some special handling of the ``easy-install.pth`` file, part of
Python's setuptools.
Deactivate behaves similarly to activate, but it unlinks files:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
:pyobject: Python.deactivate
:linenos:
Both of these methods call some custom functions in the Python
package. See the source for Spack's Python package for details.
^^^^^^^^^^^^^^^^^^^^
Activation arguments
^^^^^^^^^^^^^^^^^^^^
You may have noticed that the ``activate`` function defined above
takes keyword arguments. These are the keyword arguments from
``extends()``, and they are passed to both activate and deactivate.
This capability allows an extension to customize its own activation by
passing arguments to the extendee. Extendees can likewise implement
custom ``activate()`` and ``deactivate()`` functions to suit their
needs.
The only keyword argument supported by default is the ``ignore``
argument, which can take a regex, list of regexes, or a predicate to
determine which files *not* to symlink during activation.
.. _virtual-dependencies:
--------------------
@@ -3331,91 +3280,67 @@ the Python extensions provided by them: once for ``+python`` and once
for ``~python``. Other than using a little extra disk space, that
solution has no serious problems.
.. _installation_process:
.. _installation_procedure:
--------------------------------
Overriding build system defaults
--------------------------------
---------------------------------------
Implementing the installation procedure
---------------------------------------
.. note::
The last element of a package is its **installation procedure**. This is
where the real work of installation happens, and it's the main part of
the package you'll need to customize for each piece of software.
If you code a single class in ``package.py`` all the functions shown in the table below
can be implemented with the same signature on the ``*Package`` instead of the corresponding builder.
Most of the time the default implementation of methods or attributes in build system base classes
is what a packager needs, and just a very few entities need to be overwritten. Typically we just
need to override methods like ``configure_args``:
.. code-block:: python
def configure_args(self):
args = ["--enable-cxx"] + self.enable_or_disable("libs")
if "libs=static" in self.spec:
args.append("--with-pic")
return args
The actual set of entities available for overriding in ``package.py`` depend on
the build system. The build systems currently supported by Spack are:
Defining an installation procedure means overriding a set of methods or attributes
that will be called at some point during the installation of the package.
The package base class, usually specialized for a given build system, determines the
actual set of entities available for overriding.
The classes that are currently provided by Spack are:
+----------------------------------------------------------+----------------------------------+
| **API docs** | **Description** |
| **Base Class** | **Purpose** |
+==========================================================+==================================+
| :class:`~spack.build_systems.generic` | Generic build system without any |
| | base implementation |
| :class:`~spack.package_base.Package` | General base class not |
| | specialized for any build system |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.makefile` | Specialized build system for |
| | software built invoking |
| :class:`~spack.build_systems.makefile.MakefilePackage` | Specialized class for packages |
| | built invoking |
| | hand-written Makefiles |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.autotools` | Specialized build system for |
| | software built using |
| | GNU Autotools |
| :class:`~spack.build_systems.autotools.AutotoolsPackage` | Specialized class for packages |
| | built using GNU Autotools |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.cmake` | Specialized build system for |
| | software built using CMake |
| :class:`~spack.build_systems.cmake.CMakePackage` | Specialized class for packages |
| | built using CMake |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.maven` | Specialized build system for |
| | software built using Maven |
| :class:`~spack.build_systems.cuda.CudaPackage` | A helper class for packages that |
| | use CUDA |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.meson` | Specialized build system for |
| | software built using Meson |
| :class:`~spack.build_systems.qmake.QMakePackage` | Specialized class for packages |
| | built using QMake |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.nmake` | Specialized build system for |
| | software built using NMake |
| :class:`~spack.build_systems.rocm.ROCmPackage` | A helper class for packages that |
| | use ROCm |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.qmake` | Specialized build system for |
| | software built using QMake |
| :class:`~spack.build_systems.scons.SConsPackage` | Specialized class for packages |
| | built using SCons |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.scons` | Specialized build system for |
| | software built using SCons |
| :class:`~spack.build_systems.waf.WafPackage` | Specialized class for packages |
| | built using Waf |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.waf` | Specialized build system for |
| | software built using Waf |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.r` | Specialized build system for |
| :class:`~spack.build_systems.r.RPackage` | Specialized class for |
| | R extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.octave` | Specialized build system for |
| :class:`~spack.build_systems.octave.OctavePackage` | Specialized class for |
| | Octave packages |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.python` | Specialized build system for |
| :class:`~spack.build_systems.python.PythonPackage` | Specialized class for |
| | Python extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.perl` | Specialized build system for |
| :class:`~spack.build_systems.perl.PerlPackage` | Specialized class for |
| | Perl extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.ruby` | Specialized build system for |
| | Ruby extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.intel` | Specialized build system for |
| | licensed Intel software |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.oneapi` | Specialized build system for |
| | Intel onaAPI software |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.aspell_dict` | Specialized build system for |
| | Aspell dictionaries |
| :class:`~spack.build_systems.intel.IntelPackage` | Specialized class for licensed |
| | Intel software |
+----------------------------------------------------------+----------------------------------+
@@ -3428,17 +3353,52 @@ the build system. The build systems currently supported by Spack are:
For example, a Python extension installed with CMake would ``extends('python')`` and
subclass from :class:`~spack.build_systems.cmake.CMakePackage`.
^^^^^^^^^^^^^^^^^^^^^^^^^^
Overriding builder methods
^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^
Installation pipeline
^^^^^^^^^^^^^^^^^^^^^
Build-system "phases" have default implementations that fit most of the common cases:
When a user runs ``spack install``, Spack:
1. Fetches an archive for the correct version of the software.
2. Expands the archive.
3. Sets the current working directory to the root directory of the expanded archive.
Then, depending on the base class of the package under consideration, it will execute
a certain number of **phases** that reflect the way a package of that type is usually built.
The name and order in which the phases will be executed can be obtained either reading the API
docs at :py:mod:`~.spack.build_systems`, or using the ``spack info`` command:
.. code-block:: console
:emphasize-lines: 13,14
$ spack info m4
AutotoolsPackage: m4
Homepage: https://www.gnu.org/software/m4/m4.html
Safe versions:
1.4.17 ftp://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.gz
Variants:
Name Default Description
sigsegv on Build the libsigsegv dependency
Installation Phases:
autoreconf configure build install
Build Dependencies:
libsigsegv
...
Typically, phases have default implementations that fit most of the common cases:
.. literalinclude:: _spack_root/lib/spack/spack/build_systems/autotools.py
:pyobject: AutotoolsBuilder.configure
:pyobject: AutotoolsPackage.configure
:linenos:
It is usually sufficient for a packager to override a few
It is thus just sufficient for a packager to override a few
build system specific helper methods or attributes to provide, for instance,
configure arguments:
@@ -3446,31 +3406,31 @@ configure arguments:
:pyobject: M4.configure_args
:linenos:
Each specific build system has a list of attributes and methods that can be overridden to
fine-tune the installation of a package without overriding an entire phase. To
have more information on them the place to go is the API docs of the :py:mod:`~.spack.build_systems`
module.
.. note::
Each specific build system has a list of attributes that can be overridden to
fine-tune the installation of a package without overriding an entire phase. To
have more information on them the place to go is the API docs of the :py:mod:`~.spack.build_systems`
module.
^^^^^^^^^^^^^^^^^^^^^^^^^^
Overriding an entire phase
^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes it is necessary to override an entire phase. If the ``package.py`` contains
a single class recipe, see :ref:`package_class_structure`, then the signature for a
phase is:
In extreme cases it may be necessary to override an entire phase. Regardless
of the build system, the signature is the same. For example, the signature
for the install phase is:
.. code-block:: python
class Openjpeg(CMakePackage):
class Foo(Package):
def install(self, spec, prefix):
...
regardless of the build system. The arguments for the phase are:
``self``
This is the package object, which extends ``CMakePackage``.
For API docs on Package objects, see
:py:class:`Package <spack.package_base.PackageBase>`.
For those not used to Python instance methods, this is the
package itself. In this case it's an instance of ``Foo``, which
extends ``Package``. For API docs on Package objects, see
:py:class:`Package <spack.package_base.Package>`.
``spec``
This is the concrete spec object created by Spack from an
@@ -3485,111 +3445,12 @@ regardless of the build system. The arguments for the phase are:
The arguments ``spec`` and ``prefix`` are passed only for convenience, as they always
correspond to ``self.spec`` and ``self.spec.prefix`` respectively.
If the ``package.py`` encodes builders explicitly, the signature for a phase changes slightly:
.. code-block:: python
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def install(self, pkg, spec, prefix):
...
In this case the package is passed as the second argument, and ``self`` is the builder instance.
.. _multiple_build_systems:
^^^^^^^^^^^^^^^^^^^^^^
Multiple build systems
^^^^^^^^^^^^^^^^^^^^^^
There are cases where a software actively supports two build systems, or changes build systems
as it evolves, or needs different build systems on different platforms. Spack allows dealing with
these cases natively, if a recipe is written using builders explicitly.
For instance, software that supports two build systems unconditionally should derive from
both ``*Package`` base classes, and declare the possible use of multiple build systems using
a directive:
.. code-block:: python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system("cmake", "autotools", default="cmake")
In this case the software can be built with both ``autotools`` and ``cmake``. Since the package
supports multiple build systems, it is necessary to declare which one is the default. The ``package.py``
will likely contain some overriding of default builder methods:
.. code-block:: python
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
pass
class AutotoolsBuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
pass
In more complex cases it might happen that the build system changes according to certain conditions,
for instance across versions. That can be expressed with conditional variant values:
.. code-block:: python
class ArpackNg(CMakePackage, AutotoolsPackage):
build_system(
conditional("cmake", when="@0.64:"),
conditional("autotools", when="@:0.63"),
default="cmake",
)
In the example the directive impose a change from ``Autotools`` to ``CMake`` going
from ``v0.63`` to ``v0.64``.
^^^^^^^^^^^^^^^^^^
Mixin base classes
^^^^^^^^^^^^^^^^^^
Besides build systems, there are other cases where common metadata and behavior can be extracted
and reused by many packages. For instance, packages that depend on ``Cuda`` or ``Rocm``, share
common dependencies and constraints. To factor these attributes into a single place, Spack provides
a few mixin classes in the ``spack.build_systems`` module:
+---------------------------------------------------------------+----------------------------------+
| **API docs** | **Description** |
+===============================================================+==================================+
| :class:`~spack.build_systems.cuda.CudaPackage` | A helper class for packages that |
| | use CUDA |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.rocm.ROCmPackage` | A helper class for packages that |
| | use ROCm |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.gnu.GNUMirrorPackage` | A helper class for GNU packages |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.python.PythonExtension` | A helper class for Python |
| | extensions |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.sourceforge.SourceforgePackage` | A helper class for packages |
| | from sourceforge.org |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.sourceware.SourcewarePackage` | A helper class for packages |
| | from sourceware.org |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.xorg.XorgPackage` | A helper class for x.org |
| | packages |
+---------------------------------------------------------------+----------------------------------+
These classes should be used by adding them to the inheritance tree of the package that needs them,
for instance:
.. code-block:: python
class Cp2k(MakefilePackage, CudaPackage):
"""CP2K is a quantum chemistry and solid state physics software package
that can perform atomistic simulations of solid state, liquid, molecular,
periodic, material, crystal, and biological systems
"""
In the example above ``Cp2k`` inherits all the conflicts and variants that ``CudaPackage`` defines.
As mentioned in :ref:`install-environment`, you will usually not need to refer
to dependencies explicitly in your package file, as the compiler wrappers take care of most of
the heavy lifting here. There will be times, though, when you need to refer to
the install locations of dependencies, or when you need to do something different
depending on the version, compiler, dependencies, etc. that your package is
built with. These parameters give you access to this type of information.
.. _install-environment:
@@ -4347,9 +4208,16 @@ In addition to invoking the right compiler, the compiler wrappers add
flags to the compile line so that dependencies can be easily found.
These flags are added for each dependency, if they exist:
* Compile-time library search paths: ``-L$dep_prefix/lib``, ``-L$dep_prefix/lib64``
* Runtime library search paths (RPATHs): ``$rpath_flag$dep_prefix/lib``, ``$rpath_flag$dep_prefix/lib64``
* Include search paths: ``-I$dep_prefix/include``
Compile-time library search paths
* ``-L$dep_prefix/lib``
* ``-L$dep_prefix/lib64``
Runtime library search paths (RPATHs)
* ``$rpath_flag$dep_prefix/lib``
* ``$rpath_flag$dep_prefix/lib64``
Include search paths
* ``-I$dep_prefix/include``
An example of this would be the ``libdwarf`` build, which has one
dependency: ``libelf``. Every call to ``cc`` in the ``libdwarf``
@@ -4693,9 +4561,6 @@ other checks.
* - :ref:`AutotoolsPackage <autotoolspackage>`
- ``check`` (``make test``, ``make check``)
- ``installcheck`` (``make installcheck``)
* - :ref:`CachedCMakePackage <cachedcmakepackage>`
- ``check`` (``make check``, ``make test``)
- Not applicable
* - :ref:`CMakePackage <cmakepackage>`
- ``check`` (``make check``, ``make test``)
- Not applicable
@@ -4720,9 +4585,6 @@ other checks.
* - :ref:`SIPPackage <sippackage>`
- Not applicable
- ``test`` (module imports)
* - :ref:`WafPackage <wafpackage>`
- ``build_test`` (must be overridden)
- ``install_test`` (must be overridden)
For example, the ``Libelf`` package inherits from ``AutotoolsPackage``
and its ``Makefile`` has a standard ``check`` target. So Spack will
@@ -5194,16 +5056,6 @@ where each argument has the following meaning:
will run.
The default of ``None`` corresponds to the current directory (``'.'``).
Each call starts with the working directory set to the spec's test stage
directory (i.e., ``self.test_suite.test_dir_for_spec(self.spec)``).
.. warning::
Use of the package spec's installation directory for building and running
tests is **strongly** discouraged. Doing so has caused permission errors
for shared spack instances *and* for facilities that install the software
in read-only file systems or directories.
"""""""""""""""""""""""""""""""""""""""""
Accessing package- and test-related files
@@ -5211,10 +5063,10 @@ Accessing package- and test-related files
You may need to access files from one or more locations when writing
stand-alone tests. This can happen if the software's repository does not
include test source files or includes files but has no way to build the
executables using the installed headers and libraries. In these cases,
you may need to reference the files relative to one or more root
directory. The properties containing package- (or spec-) and test-related
include test source files or includes files but no way to build the
executables using the installed headers and libraries. In these
cases, you may need to reference the files relative to one or more
root directory. The properties containing package- and test-related
directory paths are provided in the table below.
.. list-table:: Directory-to-property mapping
@@ -5223,22 +5075,19 @@ directory paths are provided in the table below.
* - Root Directory
- Package Property
- Example(s)
* - Package (Spec) Installation
* - Package Installation Files
- ``self.prefix``
- ``self.prefix.include``, ``self.prefix.lib``
* - Dependency Installation
* - Package Dependency's Files
- ``self.spec['<dependency-package>'].prefix``
- ``self.spec['trilinos'].prefix.include``
* - Test Suite Stage
* - Test Suite Stage Files
- ``self.test_suite.stage``
- ``join_path(self.test_suite.stage, 'results.txt')``
* - Spec's Test Stage
- ``self.test_suite.test_dir_for_spec``
- ``self.test_suite.test_dir_for_spec(self.spec)``
* - Current Spec's Build-time Files
* - Staged Cached Build-time Files
- ``self.test_suite.current_test_cache_dir``
- ``join_path(self.test_suite.current_test_cache_dir, 'examples', 'foo.c')``
* - Current Spec's Custom Test Files
* - Staged Custom Package Files
- ``self.test_suite.current_test_data_dir``
- ``join_path(self.test_suite.current_test_data_dir, 'hello.f90')``
@@ -6244,82 +6093,3 @@ might write:
DWARF_PREFIX = $(spack location --install-dir libdwarf)
CXXFLAGS += -I$DWARF_PREFIX/include
CXXFLAGS += -L$DWARF_PREFIX/lib
.. _package_class_structure:
--------------------------
Package class architecture
--------------------------
.. note::
This section aims to provide a high-level knowledge of how the package class architecture evolved
in Spack, and provides some insights on the current design.
Packages in Spack were originally designed to support only a single build system. The overall
class structure for a package looked like:
.. image:: images/original_package_architecture.png
:scale: 60 %
:align: center
In this architecture the base class ``AutotoolsPackage`` was responsible for both the metadata
related to the ``autotools`` build system (e.g. dependencies or variants common to all packages
using it), and for encoding the default installation procedure.
In reality, a non-negligible number of packages are either changing their build system during the evolution of the
project, or using different build systems for different platforms. An architecture based on a single class
requires hacks or other workarounds to deal with these cases.
To support a model more adherent to reality, Spack v0.19 changed its internal design by extracting
the attributes and methods related to building a software into a separate hierarchy:
.. image:: images/builder_package_architecture.png
:scale: 60 %
:align: center
In this new format each ``package.py`` contains one ``*Package`` class that gathers all the metadata,
and one or more ``*Builder`` classes that encode the installation procedure. A specific builder object
is created just before the software is built, so at a time where Spack knows which build system needs
to be used for the current installation, and receives a ``package`` object during initialization.
^^^^^^^^^^^^^^^^^^^^^^^^
``build_system`` variant
^^^^^^^^^^^^^^^^^^^^^^^^
To allow imposing conditions based on the build system, each package must a have ``build_system`` variant,
which is usually inherited from base classes. This variant allows for writing metadata that is conditional
on the build system:
.. code-block:: python
with when("build_system=cmake"):
depends_on("cmake", type="build")
and also for selecting a specific build system from a spec literal, like in the following command:
.. code-block:: console
$ spack install arpack-ng build_system=autotools
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Compatibility with single-class format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Internally, Spack always uses builders to perform operations related to the installation of a specific software.
The builders are created in the ``spack.builder.create`` function
.. literalinclude:: _spack_root/lib/spack/spack/builder.py
:pyobject: create
To achieve backward compatibility with the single-class format Spack creates in this function a special
"adapter builder", if no custom builder is detected in the recipe:
.. image:: images/adapter.png
:scale: 60 %
:align: center
Overall the role of the adapter is to route access to attributes of methods first through the ``*Package``
hierarchy, and then back to the base class builder. This is schematically shown in the diagram above, where
the adapter role is to "emulate" a method resolution order like the one represented by the red arrows.

View File

@@ -5,9 +5,9 @@
.. _pipelines:
============
CI Pipelines
============
=========
Pipelines
=========
Spack provides commands that support generating and running automated build
pipelines designed for Gitlab CI. At the highest level it works like this:
@@ -168,7 +168,7 @@ which specs are up to date and which need to be rebuilt (it's a good idea for ot
reasons as well, but those are out of scope for this discussion). In this case we
have disabled it (using ``rebuild-index: False``) because the index would only be
generated in the artifacts mirror anyway, and consequently would not be available
during subsequent pipeline runs.
during subesequent pipeline runs.
.. note::
With the addition of reproducible builds (#22887) a previously working
@@ -267,64 +267,24 @@ generated by jobs in the pipeline.
``spack ci rebuild``
^^^^^^^^^^^^^^^^^^^^^
The purpose of ``spack ci rebuild`` is straightforward: take its assigned
spec and ensure a binary of a successful build exists on the target mirror.
If the binary does not already exist, it is built from source and pushed
to the mirror. The associated stand-alone tests are optionally run against
the new build. Additionally, files for reproducing the build outside of the
CI environment are created to facilitate debugging.
The purpose of the ``spack ci rebuild`` is straightforward: take its assigned
spec job, check whether the target mirror already has a binary for that spec,
and if not, build the spec from source and push the binary to the mirror. To
accomplish this in a reproducible way, the sub-command prepares a ``spack install``
command line to build a single spec in the DAG, saves that command in a
shell script, ``install.sh``, in the current working directory, and then runs
it to install the spec. The shell script is also exported as an artifact to
aid in reproducing the build outside of the CI environment.
If a binary for the spec does not exist on the target mirror, an install
shell script, ``install.sh``, is created and saved in the current working
directory. The script is run in a job to install the spec from source. The
resulting binary package is pushed to the mirror. If ``cdash`` is configured
for the environment, then the build results will be uploaded to the site.
If it was necessary to install the spec from source, ``spack ci rebuild`` will
also subsequently create a binary package for the spec and try to push it to the
mirror.
Environment variables and values in the ``gitlab-ci`` section of the
``spack.yaml`` environment file provide inputs to this process. The
two main sources of environment variables are variables written into
``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime.
Several key CI pipeline variables are described in
:ref:`ci_environment_variables`.
If the ``--tests`` option is provided, stand-alone tests are performed but
only if the build was successful *and* the package does not appear in the
list of ``broken-tests-packages``. A shell script, ``test.sh``, is created
and run to perform the tests. On completion, test logs are exported as job
artifacts for review and to facilitate debugging. If `cdash` is configured,
test results are also uploaded to the site.
A snippet from an example ``spack.yaml`` file illustrating use of this
option *and* specification of a package with broken tests is given below.
The inclusion of a spec for building ``gptune`` is not shown here. Note
that ``--tests`` is passed to ``spack ci rebuild`` as part of the
``gitlab-ci`` script.
.. code-block:: yaml
gitlab-ci:
script:
- . "./share/spack/setup-env.sh"
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
- spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
broken-tests-packages:
- gptune
In this case, even if ``gptune`` is successfully built from source, the
pipeline will *not* run its stand-alone tests since the package is listed
under ``broken-tests-packages``.
Spack's cloud pipelines provide actual, up-to-date examples of the CI/CD
configuration and environment files used by Spack. You can find them
under Spack's `stacks
<https://github.com/spack/spack/tree/develop/share/spack/gitlab/cloud_pipelines/stacks>`_ repository directory.
The ``spack ci rebuild`` sub-command mainly expects its "input" to come either
from environment variables or from the ``gitlab-ci`` section of the ``spack.yaml``
environment file. There are two main sources of the environment variables, some
are written into ``.gitlab-ci.yml`` by ``spack ci generate``, and some are
provided by the GitLab CI runtime.
.. _cmd-spack-ci-rebuild-index:
@@ -487,7 +447,7 @@ Note about "no-op" jobs
^^^^^^^^^^^^^^^^^^^^^^^
If no specs in an environment need to be rebuilt during a given pipeline run
(meaning all are already up to date on the mirror), a single successful job
(meaning all are already up to date on the mirror), a single succesful job
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab
considers to be an error). An optional ``service-job-attributes`` section
can be added to your ``spack.yaml`` where you can provide ``tags`` and
@@ -765,7 +725,7 @@ above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
On the other hand, if you're pointing to a spack repository and branch under your
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
and you can instead just clone using the variables you define (``SPACK_REPO``
and ``SPACK_REF`` in the example above).
and ``SPACK_REF`` in the example aboves).
.. _custom_workflow:

View File

@@ -3,7 +3,6 @@
sphinx>=3.4,!=4.1.2,!=5.1.0
sphinxcontrib-programoutput
sphinx-design
sphinx-rtd-theme
python-levenshtein
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.

View File

@@ -18,10 +18,7 @@ spack:
- "py-sphinx@3.4:4.1.1,4.1.3:"
- py-sphinxcontrib-programoutput
- py-docutils@:0.16
- py-sphinx-design
- py-sphinx-rtd-theme
- py-pygments@:2.12
# VCS
- git
- mercurial

View File

@@ -1,5 +1,5 @@
Name, Supported Versions, Notes, Requirement Reason
Python, 3.6--3.11, , Interpreter for Spack
Python, 2.7/3.6-3.10, , Interpreter for Spack
C/C++ Compilers, , , Building software
make, , , Build software
patch, , , Build software
@@ -11,7 +11,6 @@ bzip2, , , Compress/Decompress archives
xz, , , Compress/Decompress archives
zstd, , Optional, Compress/Decompress archives
file, , , Create/Use Buildcaches
lsb-release, , , Linux: identify operating system version
gnupg2, , , Sign/Verify Buildcaches
git, , , Manage Software Repositories
svn, , Optional, Manage Software Repositories
1 Name Supported Versions Notes Requirement Reason
2 Python 3.6--3.11 2.7/3.6-3.10 Interpreter for Spack
3 C/C++ Compilers Building software
4 make Build software
5 patch Build software
11 xz Compress/Decompress archives
12 zstd Optional Compress/Decompress archives
13 file Create/Use Buildcaches
lsb-release Linux: identify operating system version
14 gnupg2 Sign/Verify Buildcaches
15 git Manage Software Repositories
16 svn Optional Manage Software Repositories

49
lib/spack/env/cc vendored
View File

@@ -241,28 +241,28 @@ case "$command" in
mode=cpp
debug_flags="-g"
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe)
command="$SPACK_CC"
language="C"
comp="CC"
lang_flags=C
debug_flags="-g"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++)
command="$SPACK_CXX"
language="C++"
comp="CXX"
lang_flags=CXX
debug_flags="-g"
;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang)
command="$SPACK_FC"
language="Fortran 90"
comp="FC"
lang_flags=F
debug_flags="-g"
;;
f77|xlf|xlf_r|pgf77)
f77|xlf|xlf_r|pgf77|amdflang)
command="$SPACK_F77"
language="Fortran 77"
comp="F77"
@@ -440,47 +440,6 @@ while [ $# -ne 0 ]; do
continue
fi
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append other_args_list \"\$1\"
shift
continue
;;
esac
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
replaced="$after$stripped"
# it matched, remove it
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
case "$1" in
-isystem*)
arg="${1#-isystem}"

View File

@@ -1 +0,0 @@
../../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.0 (commit 77640e572725ad97f18e63a04857155752ace045)
* Version: 0.1.4 (commit b8eea9df2b4204ff27d204452cd46f5199a0b423)
argparse
--------

View File

@@ -132,15 +132,9 @@ def sysctl(*args):
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
model = (
"m1" if "Apple" in sysctl("-n", "machdep.cpu.brand_string") else "unknown"
)
info = {
"vendor_id": "Apple",
"flags": [],
@@ -328,26 +322,14 @@ def compatibility_check_for_aarch64(info, target):
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != "aarch64":
return False
arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in (
vendor,
"generic",
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
)
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target):

View File

@@ -106,7 +106,7 @@ def __eq__(self, other):
self.name == other.name
and self.vendor == other.vendor
and self.features == other.features
and self.parents == other.parents # avoid ancestors here
and self.ancestors == other.ancestors
and self.compilers == other.compilers
and self.generation == other.generation
)

View File

@@ -85,7 +85,7 @@
"intel": [
{
"versions": ":",
"name": "pentium4",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
@@ -1099,7 +1099,8 @@
"avx512cd",
"avx512vbmi",
"avx512ifma",
"sha"
"sha",
"umip"
],
"compilers": {
"gcc": [
@@ -1262,6 +1263,7 @@
"avx512vbmi",
"avx512ifma",
"sha_ni",
"umip",
"clwb",
"rdpid",
"gfni",
@@ -2093,163 +2095,8 @@
]
}
},
"armv8.1a": {
"from": ["aarch64"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "5:",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"clang": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.1-a -mtune=generic"
}
]
}
},
"armv8.2a": {
"from": ["armv8.1a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "6:",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"clang": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.2-a -mtune=generic"
}
]
}
},
"armv8.3a": {
"from": ["armv8.2a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "6:",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"clang": [
{
"versions": "6:",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.3-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.3-a -mtune=generic"
}
]
}
},
"armv8.4a": {
"from": ["armv8.3a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "8:",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"clang": [
{
"versions": "8:",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.4-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.4-a -mtune=generic"
}
]
}
},
"armv8.5a": {
"from": ["armv8.4a"],
"vendor": "generic",
"features": [],
"compilers": {
"gcc": [
{
"versions": "9:",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"clang": [
{
"versions": "11:",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"apple-clang": [
{
"versions": ":",
"flags": "-march=armv8.5-a -mtune=generic"
}
],
"arm": [
{
"versions": ":",
"flags": "-march=armv8.5-a -mtune=generic"
}
]
}
},
"thunderx2": {
"from": ["armv8.1a"],
"from": ["aarch64"],
"vendor": "Cavium",
"features": [
"fp",
@@ -2296,7 +2143,7 @@
}
},
"a64fx": {
"from": ["armv8.2a"],
"from": ["aarch64"],
"vendor": "Fujitsu",
"features": [
"fp",
@@ -2364,7 +2211,7 @@
]
}
},
"cortex_a72": {
"graviton": {
"from": ["aarch64"],
"vendor": "ARM",
"features": [
@@ -2390,19 +2237,19 @@
},
{
"versions": "6:",
"flags" : "-mcpu=cortex-a72"
"flags" : "-march=armv8-a+crc+crypto -mtune=cortex-a72"
}
],
"clang" : [
{
"versions": "3.9:",
"flags" : "-mcpu=cortex-a72"
"flags" : "-march=armv8-a+crc+crypto"
}
]
}
},
"neoverse_n1": {
"from": ["cortex_a72", "armv8.2a"],
"graviton2": {
"from": ["aarch64"],
"vendor": "ARM",
"features": [
"fp",
@@ -2451,7 +2298,7 @@
},
{
"versions": "9.0:",
"flags" : "-mcpu=neoverse-n1"
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto -mtune=neoverse-n1"
}
],
"clang" : [
@@ -2462,10 +2309,6 @@
{
"versions": "5:",
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
},
{
"versions": "10:",
"flags" : "-mcpu=neoverse-n1"
}
],
"arm" : [
@@ -2476,113 +2319,8 @@
]
}
},
"neoverse_v1": {
"from": ["neoverse_n1", "armv8.4a"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"paca",
"pacg",
"dcpodp",
"svei8mm",
"svebf16",
"i8mm",
"bf16",
"dgh",
"rng"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:4.8.9",
"flags": "-march=armv8-a"
},
{
"versions": "4.9:5.9",
"flags": "-march=armv8-a+crc+crypto"
},
{
"versions": "6:6.9",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7:7.9",
"flags" : "-march=armv8.2-a+crypto+fp16 -mtune=cortex-a72"
},
{
"versions": "8.0:8.9",
"flags" : "-march=armv8.2-a+fp16+dotprod+crypto -mtune=cortex-a72"
},
{
"versions": "9.0:9.9",
"flags" : "-mcpu=neoverse-v1"
},
{
"versions": "10.0:",
"flags" : "-mcpu=neoverse-v1"
}
],
"clang" : [
{
"versions": "3.9:4.9",
"flags" : "-march=armv8.2-a+fp16+crc+crypto"
},
{
"versions": "5:10",
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
},
{
"versions": "11:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
},
{
"versions": "12:",
"flags" : "-mcpu=neoverse-v1"
}
],
"arm" : [
{
"versions": "20:21.9",
"flags" : "-march=armv8.2-a+sve+fp16+rcpc+dotprod+crypto"
},
{
"versions": "22:",
"flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
}
]
}
},
"m1": {
"from": ["armv8.4a"],
"from": ["aarch64"],
"vendor": "Apple",
"features": [
"fp",
@@ -2647,76 +2385,6 @@
]
}
},
"m2": {
"from": ["m1", "armv8.5a"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint",
"ecv",
"bf16",
"i8mm",
"bti"
],
"compilers": {
"gcc": [
{
"versions": "8.0:",
"flags" : "-march=armv8.5-a -mtune=generic"
}
],
"clang" : [
{
"versions": "9.0:12.0",
"flags" : "-march=armv8.5-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"flags" : "-march=armv8.5-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=vortex"
}
]
}
},
"arm": {
"from": [],
"vendor": "generic",

View File

@@ -71,12 +71,11 @@
import re
import math
import multiprocessing
import io
import sys
import threading
import time
from contextlib import contextmanager
from six import StringIO
from six import string_types
_error_matches = [
"^FAIL: ",
@@ -245,7 +244,7 @@ def __getitem__(self, line_no):
def __str__(self):
"""Returns event lines and context."""
out = io.StringIO()
out = StringIO()
for i in range(self.start, self.end):
if i == self.line_no:
out.write(' >> %-6d%s' % (i, self[i]))
@@ -385,7 +384,7 @@ def parse(self, stream, context=6, jobs=None):
(tuple): two lists containing ``BuildError`` and
``BuildWarning`` objects.
"""
if isinstance(stream, str):
if isinstance(stream, string_types):
with open(stream) as f:
return self.parse(f, context, jobs)
@@ -410,12 +409,7 @@ def parse(self, stream, context=6, jobs=None):
pool = multiprocessing.Pool(jobs)
try:
# this is a workaround for a Python bug in Pool with ctrl-C
if sys.version_info >= (3, 2):
max_timeout = threading.TIMEOUT_MAX
else:
max_timeout = 9999999
results = pool.map_async(_parse_unpack, args, 1).get(max_timeout)
results = pool.map_async(_parse_unpack, args, 1).get(9999999)
errors, warnings, timings = zip(*results)
finally:
pool.terminate()

2392
lib/spack/external/py2/argparse.py vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,289 @@
A. HISTORY OF THE SOFTWARE
==========================
Python was created in the early 1990s by Guido van Rossum at Stichting
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
as a successor of a language called ABC. Guido remains Python's
principal author, although it includes many contributions from others.
In 1995, Guido continued his work on Python at the Corporation for
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
in Reston, Virginia where he released several versions of the
software.
In May 2000, Guido and the Python core development team moved to
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
year, the PythonLabs team moved to Digital Creations (now Zope
Corporation, see http://www.zope.com). In 2001, the Python Software
Foundation (PSF, see http://www.python.org/psf/) was formed, a
non-profit organization created specifically to own Python-related
Intellectual Property. Zope Corporation is a sponsoring member of
the PSF.
All Python releases are Open Source (see http://www.opensource.org for
the Open Source Definition). Historically, most, but not all, Python
releases have also been GPL-compatible; the table below summarizes
the various releases.
Release Derived Year Owner GPL-
from compatible? (1)
0.9.0 thru 1.2 1991-1995 CWI yes
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
1.6 1.5.2 2000 CNRI no
2.0 1.6 2000 BeOpen.com no
1.6.1 1.6 2001 CNRI yes (2)
2.1 2.0+1.6.1 2001 PSF no
2.0.1 2.0+1.6.1 2001 PSF yes
2.1.1 2.1+2.0.1 2001 PSF yes
2.2 2.1.1 2001 PSF yes
2.1.2 2.1.1 2002 PSF yes
2.1.3 2.1.2 2002 PSF yes
2.2.1 2.2 2002 PSF yes
2.2.2 2.2.1 2002 PSF yes
2.2.3 2.2.2 2003 PSF yes
2.3 2.2.2 2002-2003 PSF yes
2.3.1 2.3 2002-2003 PSF yes
2.3.2 2.3.1 2002-2003 PSF yes
2.3.3 2.3.2 2002-2003 PSF yes
2.3.4 2.3.3 2004 PSF yes
2.3.5 2.3.4 2005 PSF yes
2.4 2.3 2004 PSF yes
2.4.1 2.4 2005 PSF yes
2.4.2 2.4.1 2005 PSF yes
2.4.3 2.4.2 2006 PSF yes
2.4.4 2.4.3 2006 PSF yes
2.5 2.4 2006 PSF yes
2.5.1 2.5 2007 PSF yes
2.5.2 2.5.1 2008 PSF yes
2.5.3 2.5.2 2008 PSF yes
2.6 2.5 2008 PSF yes
2.6.1 2.6 2008 PSF yes
2.6.2 2.6.1 2009 PSF yes
2.6.3 2.6.2 2009 PSF yes
2.6.4 2.6.3 2009 PSF yes
2.6.5 2.6.4 2010 PSF yes
3.0 2.6 2008 PSF yes
3.0.1 3.0 2009 PSF yes
3.1 3.0.1 2009 PSF yes
3.1.1 3.1 2009 PSF yes
3.1.2 3.1.1 2010 PSF yes
3.1.3 3.1.2 2010 PSF yes
3.1.4 3.1.3 2011 PSF yes
3.2 3.1 2011 PSF yes
3.2.1 3.2 2011 PSF yes
3.2.2 3.2.1 2011 PSF yes
3.2.3 3.2.2 2012 PSF yes
Footnotes:
(1) GPL-compatible doesn't mean that we're distributing Python under
the GPL. All Python licenses, unlike the GPL, let you distribute
a modified version without making your changes open source. The
GPL-compatible licenses make it possible to combine Python with
other software that is released under the GPL; the others don't.
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
because its license has a choice of law clause. According to
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
is "not incompatible" with the GPL.
Thanks to the many outside volunteers who have worked under Guido's
direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012 Python Software Foundation; All Rights Reserved" are retained in Python
alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
-------------------------------------------
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
Individual or Organization ("Licensee") accessing and otherwise using
this software in source or binary form and its associated
documentation ("the Software").
2. Subject to the terms and conditions of this BeOpen Python License
Agreement, BeOpen hereby grants Licensee a non-exclusive,
royalty-free, world-wide license to reproduce, analyze, test, perform
and/or display publicly, prepare derivative works, distribute, and
otherwise use the Software alone or in any derivative version,
provided, however, that the BeOpen Python License is retained in the
Software, alone or in any derivative version prepared by Licensee.
3. BeOpen is making the Software available to Licensee on an "AS IS"
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
5. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
6. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of California, excluding conflict of
law provisions. Nothing in this License Agreement shall be deemed to
create any relationship of agency, partnership, or joint venture
between BeOpen and Licensee. This License Agreement does not grant
permission to use BeOpen trademarks or trade names in a trademark
sense to endorse or promote products or services of Licensee, or any
third party. As an exception, the "BeOpen Python" logos available at
http://www.pythonlabs.com/logos.html may be used according to the
permissions granted on that web page.
7. By copying, installing or otherwise using the software, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
---------------------------------------
1. This LICENSE AGREEMENT is between the Corporation for National
Research Initiatives, having an office at 1895 Preston White Drive,
Reston, VA 20191 ("CNRI"), and the Individual or Organization
("Licensee") accessing and otherwise using Python 1.6.1 software in
source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, CNRI
hereby grants Licensee a nonexclusive, royalty-free, world-wide
license to reproduce, analyze, test, perform and/or display publicly,
prepare derivative works, distribute, and otherwise use Python 1.6.1
alone or in any derivative version, provided, however, that CNRI's
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
1995-2001 Corporation for National Research Initiatives; All Rights
Reserved" are retained in Python 1.6.1 alone or in any derivative
version prepared by Licensee. Alternately, in lieu of CNRI's License
Agreement, Licensee may substitute the following text (omitting the
quotes): "Python 1.6.1 is made available subject to the terms and
conditions in CNRI's License Agreement. This Agreement together with
Python 1.6.1 may be located on the Internet using the following
unique, persistent identifier (known as a handle): 1895.22/1013. This
Agreement may also be obtained from a proxy server on the Internet
using the following URL: http://hdl.handle.net/1895.22/1013".
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python 1.6.1 or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python 1.6.1.
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. This License Agreement shall be governed by the federal
intellectual property law of the United States, including without
limitation the federal copyright law, and, to the extent such
U.S. federal law does not apply, by the law of the Commonwealth of
Virginia, excluding Virginia's conflict of law provisions.
Notwithstanding the foregoing, with regard to derivative works based
on Python 1.6.1 that incorporate non-separable material that was
previously distributed under the GNU General Public License (GPL), the
law of the Commonwealth of Virginia shall govern this License
Agreement only as to issues arising under or with respect to
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
License Agreement shall be deemed to create any relationship of
agency, partnership, or joint venture between CNRI and Licensee. This
License Agreement does not grant permission to use CNRI trademarks or
trade name in a trademark sense to endorse or promote products or
services of Licensee, or any third party.
8. By clicking on the "ACCEPT" button where indicated, or by copying,
installing or otherwise using Python 1.6.1, Licensee agrees to be
bound by the terms and conditions of this License Agreement.
ACCEPT
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
--------------------------------------------------
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
The Netherlands. All rights reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of Stichting Mathematisch
Centrum or CWI not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -0,0 +1 @@
from .functools32 import *

View File

@@ -0,0 +1,158 @@
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
try:
import _thread # Python >= 3
except:
import thread as _thread # Python < 3
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
class error(Exception):
"""Dummy implementation of _thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True

View File

@@ -0,0 +1,423 @@
"""functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>
# and Raymond Hettinger <python at rcn.com>
# Copyright (C) 2006-2010 Python Software Foundation.
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
from _functools import partial, reduce
from collections import MutableMapping, namedtuple
from .reprlib32 import recursive_repr as _recursive_repr
from weakref import proxy as _proxy
import sys as _sys
try:
from thread import allocate_lock as Lock
except ImportError:
from ._dummy_thread32 import allocate_lock as Lock
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
wrapper.__wrapped__ = wrapped
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
__hash__ = None
return K
_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
def lru_cache(maxsize=100):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function,
tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock() # needed because OrderedDict isn't threadsafe
if maxsize is None:
cache = dict() # simple cache without ordering or size limit
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
cache = OrderedDict() # ordered least recent to most recent
cache_popitem = cache.popitem
cache_renew = cache.move_to_end
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
try:
result = cache[key]
cache_renew(key) # record recent use of this key
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
with lock:
cache[key] = result # record recent use of this key
misses[0] += 1
if len(cache) > maxsize:
cache_popitem(0) # purge least recently used cache entry
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function

View File

@@ -0,0 +1,157 @@
"""Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import __builtin__ as builtins
from itertools import islice
try:
from thread import get_ident
except ImportError:
from _dummy_thread32 import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr

103
lib/spack/external/py2/typing.py vendored Normal file
View File

@@ -0,0 +1,103 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This is a fake set of symbols to allow spack to import typing in python
versions where we do not support type checking (<3)
"""
from collections import defaultdict
# (1) Unparameterized types.
Annotated = object
Any = object
AnyStr = object
ByteString = object
Counter = object
Final = object
Hashable = object
NoReturn = object
Sized = object
SupportsAbs = object
SupportsBytes = object
SupportsComplex = object
SupportsFloat = object
SupportsIndex = object
SupportsInt = object
SupportsRound = object
# (2) Parameterized types.
AbstractSet = defaultdict(lambda: object)
AsyncContextManager = defaultdict(lambda: object)
AsyncGenerator = defaultdict(lambda: object)
AsyncIterable = defaultdict(lambda: object)
AsyncIterator = defaultdict(lambda: object)
Awaitable = defaultdict(lambda: object)
Callable = defaultdict(lambda: object)
ChainMap = defaultdict(lambda: object)
ClassVar = defaultdict(lambda: object)
Collection = defaultdict(lambda: object)
Container = defaultdict(lambda: object)
ContextManager = defaultdict(lambda: object)
Coroutine = defaultdict(lambda: object)
DefaultDict = defaultdict(lambda: object)
Deque = defaultdict(lambda: object)
Dict = defaultdict(lambda: object)
ForwardRef = defaultdict(lambda: object)
FrozenSet = defaultdict(lambda: object)
Generator = defaultdict(lambda: object)
Generic = defaultdict(lambda: object)
ItemsView = defaultdict(lambda: object)
Iterable = defaultdict(lambda: object)
Iterator = defaultdict(lambda: object)
KeysView = defaultdict(lambda: object)
List = defaultdict(lambda: object)
Literal = defaultdict(lambda: object)
Mapping = defaultdict(lambda: object)
MappingView = defaultdict(lambda: object)
MutableMapping = defaultdict(lambda: object)
MutableSequence = defaultdict(lambda: object)
MutableSet = defaultdict(lambda: object)
NamedTuple = defaultdict(lambda: object)
Optional = defaultdict(lambda: object)
OrderedDict = defaultdict(lambda: object)
Reversible = defaultdict(lambda: object)
Sequence = defaultdict(lambda: object)
Set = defaultdict(lambda: object)
Tuple = defaultdict(lambda: object)
Type = defaultdict(lambda: object)
TypedDict = defaultdict(lambda: object)
Union = defaultdict(lambda: object)
ValuesView = defaultdict(lambda: object)
# (3) Type variable declarations.
TypeVar = lambda *args, **kwargs: None
# (4) Functions.
cast = lambda _type, x: x
get_args = None
get_origin = None
get_type_hints = None
no_type_check = None
no_type_check_decorator = None
## typing_extensions
# We get a ModuleNotFoundError when attempting to import anything from typing_extensions
# if we separate this into a separate typing_extensions.py file for some reason.
# (1) Unparameterized types.
IntVar = object
Literal = object
NewType = object
Text = object
# (2) Parameterized types.
Protocol = defaultdict(lambda: object)
# (3) Macro for avoiding evaluation except during type checking.
TYPE_CHECKING = False
# (4) Decorators.
final = lambda x: x
overload = lambda x: x
runtime_checkable = lambda x: x

View File

@@ -7,10 +7,11 @@
import argparse
import errno
import io
import re
import sys
from six import StringIO
class Command(object):
"""Parsed representation of a command from argparse.
@@ -180,7 +181,7 @@ def __init__(self, prog, out=None, aliases=False, rst_levels=_rst_levels):
self.rst_levels = rst_levels
def format(self, cmd):
string = io.StringIO()
string = StringIO()
string.write(self.begin_command(cmd.prog))
if cmd.description:

View File

@@ -0,0 +1,39 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# isort: off
import sys
if sys.version_info < (3,):
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from itertools import izip_longest as zip_longest # novm
from urllib import urlencode as urlencode
from urllib import urlopen as urlopen
else:
filter = filter
map = map
zip = zip
from itertools import zip_longest as zip_longest # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
if sys.version_info >= (3, 3):
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import MutableSequence as MutableSequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
else:
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import MutableSequence as MutableSequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401

View File

@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import collections.abc
import errno
import glob
import hashlib
@@ -18,11 +17,14 @@
from contextlib import contextmanager
from sys import platform as _platform
from llnl.util import tty
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, symlink
import six
from spack.util.executable import CommandNotFoundError, Executable, which
from llnl.util import tty
from llnl.util.compat import Sequence
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import symlink
from spack.util.executable import Executable
from spack.util.path import path_to_os_path, system_path_filter
is_windows = _platform == "win32"
@@ -111,69 +113,6 @@ def path_contains_subdirectory(path, root):
return norm_path.startswith(norm_root)
@memoized
def file_command(*args):
"""Creates entry point to `file` system command with provided arguments"""
try:
file_cmd = which("file", required=True)
except CommandNotFoundError as e:
if is_windows:
raise CommandNotFoundError("`file` utility is not available on Windows")
else:
raise e
for arg in args:
file_cmd.add_default_arg(arg)
return file_cmd
@memoized
def _get_mime_type():
"""Generate method to call `file` system command to aquire mime type
for a specified path
"""
return file_command("-b", "-h", "--mime-type")
@memoized
def _get_mime_type_compressed():
"""Same as _get_mime_type but attempts to check for
compression first
"""
mime_uncompressed = _get_mime_type()
mime_uncompressed.add_default_arg("-Z")
return mime_uncompressed
def mime_type(filename):
"""Returns the mime type and subtype of a file.
Args:
filename: file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
def compressed_mime_type(filename):
"""Same as mime_type but checks for type that has been compressed
Args:
filename (str): file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -231,14 +170,9 @@ def filter_file(regex, repl, *filenames, **kwargs):
Keyword Arguments:
string (bool): Treat regex as a plain string. Default it False
backup (bool): Make backup file(s) suffixed with ``~``. Default is False
backup (bool): Make backup file(s) suffixed with ``~``. Default is True
ignore_absent (bool): Ignore any files that don't exist.
Default is False
start_at (str): Marker used to start applying the replacements. If a
text line matches this marker filtering is started at the next line.
All contents before the marker and the marker itself are copied
verbatim. Default is to start filtering from the first line of the
file.
stop_at (str): Marker used to stop scanning the file further. If a text
line matches this marker filtering is stopped and the rest of the
file is copied verbatim. Default is to filter until the end of the
@@ -247,7 +181,6 @@ def filter_file(regex, repl, *filenames, **kwargs):
string = kwargs.get("string", False)
backup = kwargs.get("backup", False)
ignore_absent = kwargs.get("ignore_absent", False)
start_at = kwargs.get("start_at", None)
stop_at = kwargs.get("stop_at", None)
# Allow strings to use \1, \2, etc. for replacement, like sed
@@ -288,13 +221,14 @@ def groupid_to_group(x):
shutil.copy(filename, tmp_filename)
try:
extra_kwargs = {"errors": "surrogateescape"}
extra_kwargs = {}
if sys.version_info > (3, 0):
extra_kwargs = {"errors": "surrogateescape"}
# Open as a text file and filter until the end of the file is
# reached or we found a marker in the line if it was specified
with open(tmp_filename, mode="r", **extra_kwargs) as input_file:
with open(filename, mode="w", **extra_kwargs) as output_file:
do_filtering = start_at is None
# Using iter and readline is a workaround needed not to
# disable input_file.tell(), which will happen if we call
# input_file.next() implicitly via the for loop
@@ -304,12 +238,8 @@ def groupid_to_group(x):
if stop_at == line.strip():
output_file.write(line)
break
if do_filtering:
filtered_line = re.sub(regex, repl, line)
output_file.write(filtered_line)
else:
do_filtering = start_at == line.strip()
output_file.write(line)
filtered_line = re.sub(regex, repl, line)
output_file.write(filtered_line)
else:
current_position = None
@@ -501,15 +431,8 @@ def group_ids(uid=None):
if uid is None:
uid = getuid()
pwd_entry = pwd.getpwuid(uid)
user = pwd_entry.pw_name
# user's primary group id may not be listed in grp (i.e. /etc/group)
# you have to check pwd for that, so start the list with that
gids = [pwd_entry.pw_gid]
return sorted(set(gids + [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]))
user = pwd.getpwuid(uid).pw_name
return [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
@system_path_filter(arg_slice=slice(1))
@@ -518,7 +441,7 @@ def chgrp(path, group, follow_symlinks=True):
if is_windows:
raise OSError("Function 'chgrp' is not supported on Windows")
if isinstance(group, str):
if isinstance(group, six.string_types):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
@@ -714,11 +637,7 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
if symlinks:
target = os.readlink(s)
if os.path.isabs(target):
def escaped_path(path):
return path.replace("\\", r"\\")
new_target = re.sub(escaped_path(abs_src), escaped_path(abs_dest), target)
new_target = re.sub(abs_src, abs_dest, target)
if new_target != target:
tty.debug("Redirecting link {0} to {1}".format(target, new_target))
target = new_target
@@ -1015,7 +934,7 @@ def open_if_filename(str_or_file, mode="r"):
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, str):
if isinstance(str_or_file, six.string_types):
with open(str_or_file, mode) as f:
yield f
else:
@@ -1086,11 +1005,7 @@ def temp_cwd():
with working_dir(tmp_dir):
yield tmp_dir
finally:
kwargs = {}
if is_windows:
kwargs["ignore_errors"] = False
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
shutil.rmtree(tmp_dir, **kwargs)
shutil.rmtree(tmp_dir)
@contextmanager
@@ -1305,34 +1220,46 @@ def visit_directory_tree(root, visitor, rel_path="", depth=0):
depth (str): current depth from the root
"""
dir = os.path.join(root, rel_path)
dir_entries = sorted(os.scandir(dir), key=lambda d: d.name)
if sys.version_info >= (3, 5, 0):
dir_entries = sorted(os.scandir(dir), key=lambda d: d.name) # novermin
else:
dir_entries = os.listdir(dir)
dir_entries.sort()
for f in dir_entries:
rel_child = os.path.join(rel_path, f.name)
islink = f.is_symlink()
# On Windows, symlinks to directories are distinct from
# symlinks to files, and it is possible to create a
# broken symlink to a directory (e.g. using os.symlink
# without `target_is_directory=True`), invoking `isdir`
# on a symlink on Windows that is broken in this manner
# will result in an error. In this case we can work around
# the issue by reading the target and resolving the
# directory ourselves
try:
isdir = f.is_dir()
except OSError as e:
if is_windows and hasattr(e, "winerror") and e.winerror == 5 and islink:
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
# link_target might be relative but
# resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative
# to the CWD and therefore
# makes sense
isdir = os.path.isdir(link_target)
else:
raise e
if sys.version_info >= (3, 5, 0):
rel_child = os.path.join(rel_path, f.name)
islink = f.is_symlink()
# On Windows, symlinks to directories are distinct from
# symlinks to files, and it is possible to create a
# broken symlink to a directory (e.g. using os.symlink
# without `target_is_directory=True`), invoking `isdir`
# on a symlink on Windows that is broken in this manner
# will result in an error. In this case we can work around
# the issue by reading the target and resolving the
# directory ourselves
try:
isdir = f.is_dir()
except OSError as e:
if is_windows and hasattr(e, "winerror") and e.winerror == 5 and islink:
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
# link_target might be relative but
# resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative
# to the CWD and therefore
# makes sense
isdir = os.path.isdir(link_target)
else:
raise e
else:
rel_child = os.path.join(rel_path, f)
lexists, islink, isdir = lexists_islink_isdir(os.path.join(dir, f))
if not lexists:
continue
if not isdir and not islink:
# handle non-symlink files
@@ -1593,14 +1520,14 @@ def find(root, files, recursive=True):
Parameters:
root (str): The root directory to start searching from
files (str or collections.abc.Sequence): Library name(s) to search for
files (str or Sequence): Library name(s) to search for
recursive (bool): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns:
list: The files that have been found
"""
if isinstance(files, str):
if isinstance(files, six.string_types):
files = [files]
if recursive:
@@ -1657,14 +1584,14 @@ def _find_non_recursive(root, search_files):
# Utilities for libraries and headers
class FileList(collections.abc.Sequence):
class FileList(Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
"""
def __init__(self, files):
if isinstance(files, str):
if isinstance(files, six.string_types):
files = [files]
self.files = list(dedupe(files))
@@ -1760,7 +1687,7 @@ def directories(self):
def directories(self, value):
value = value or []
# Accept a single directory as input
if isinstance(value, str):
if isinstance(value, six.string_types):
value = [value]
self._directories = [path_to_os_path(os.path.normpath(x))[0] for x in value]
@@ -1896,9 +1823,9 @@ def find_headers(headers, root, recursive=False):
Returns:
HeaderList: The headers that have been found
"""
if isinstance(headers, str):
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, collections.abc.Sequence):
elif not isinstance(headers, Sequence):
message = "{0} expects a string or sequence of strings as the "
message += "first argument [got {1} instead]"
message = message.format(find_headers.__name__, type(headers))
@@ -1976,11 +1903,7 @@ def names(self):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
# on non Windows platform
# Windows valid library extensions are:
# ['.dll', '.lib']
valid_exts = [".dll", ".lib"] if is_windows else [".dylib", ".so", ".a"]
for ext in valid_exts:
for ext in [".dylib", ".so", ".a"]:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
@@ -2062,9 +1985,9 @@ def find_system_libraries(libraries, shared=True):
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, str):
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence):
elif not isinstance(libraries, Sequence):
message = "{0} expects a string or sequence of strings as the "
message += "first argument [got {1} instead]"
message = message.format(find_system_libraries.__name__, type(libraries))
@@ -2090,7 +2013,7 @@ def find_system_libraries(libraries, shared=True):
return libraries_found
def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
def find_libraries(libraries, root, shared=True, recursive=False):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
@@ -2111,41 +2034,27 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
otherwise for static. Defaults to True.
recursive (bool): if False search only root folder,
if True descends top-down from the root. Defaults to False.
runtime (bool): Windows only option, no-op elsewhere. If true,
search for runtime shared libs (.DLL), otherwise, search
for .Lib files. If shared is false, this has no meaning.
Defaults to True.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, str):
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence):
elif not isinstance(libraries, Sequence):
message = "{0} expects a string or sequence of strings as the "
message += "first argument [got {1} instead]"
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
if is_windows:
static_ext = "lib"
# For linking (runtime=False) you need the .lib files regardless of
# whether you are doing a shared or static link
shared_ext = "dll" if runtime else "lib"
else:
# Used on both Linux and macOS
static_ext = "a"
shared_ext = "so"
# Construct the right suffix for the library
if shared:
# Used on both Linux and macOS
suffixes = [shared_ext]
suffixes = ["so"]
if sys.platform == "darwin":
# Only used on macOS
suffixes.append("dylib")
else:
suffixes = [static_ext]
suffixes = ["a"]
# List of libraries we are searching with suffixes
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
@@ -2158,11 +2067,7 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
# perform first non-recursive search in root/lib then in root/lib64 and
# finally search all of root recursively. The search stops when the first
# match is found.
common_lib_dirs = ["lib", "lib64"]
if is_windows:
common_lib_dirs.extend(["bin", "Lib"])
for subdir in common_lib_dirs:
for subdir in ("lib", "lib64"):
dirname = join_path(root, subdir)
if not os.path.isdir(dirname):
continue
@@ -2175,147 +2080,6 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
return LibraryList(found_libs)
def find_all_shared_libraries(root, recursive=False, runtime=True):
"""Convenience function that returns the list of all shared libraries found
in the directory passed as argument.
See documentation for `llnl.util.filesystem.find_libraries` for more information
"""
return find_libraries("*", root=root, shared=True, recursive=recursive, runtime=runtime)
def find_all_static_libraries(root, recursive=False):
"""Convenience function that returns the list of all static libraries found
in the directory passed as argument.
See documentation for `llnl.util.filesystem.find_libraries` for more information
"""
return find_libraries("*", root=root, shared=False, recursive=recursive)
def find_all_libraries(root, recursive=False):
"""Convenience function that returns the list of all libraries found
in the directory passed as argument.
See documentation for `llnl.util.filesystem.find_libraries` for more information
"""
return find_all_shared_libraries(root, recursive=recursive) + find_all_static_libraries(
root, recursive=recursive
)
class WindowsSimulatedRPath(object):
"""Class representing Windows filesystem rpath analog
One instance of this class is associated with a package (only on Windows)
For each lib/binary directory in an associated package, this class introduces
a symlink to any/all dependent libraries/binaries. This includes the packages
own bin/lib directories, meaning the libraries are linked to the bianry directory
and vis versa.
"""
def __init__(self, package, link_install_prefix=True):
"""
Args:
package (spack.package_base.PackageBase): Package requiring links
link_install_prefix (bool): Link against package's own install or stage root.
Packages that run their own executables during build and require rpaths to
the build directory during build time require this option. Default: install
root
"""
self.pkg = package
self._addl_rpaths = set()
self.link_install_prefix = link_install_prefix
self._additional_library_dependents = set()
@property
def library_dependents(self):
"""
Set of directories where package binaries/libraries are located.
"""
return set([self.pkg.prefix.bin]) | self._additional_library_dependents
def add_library_dependent(self, *dest):
"""
Add paths to directories or libraries/binaries to set of
common paths that need to link against other libraries
Specified paths should fall outside of a package's common
link paths, i.e. the bin
directories.
"""
for pth in dest:
if os.path.isfile(pth):
self._additional_library_dependents.add(os.path.dirname)
else:
self._additional_library_dependents.add(pth)
@property
def rpaths(self):
"""
Set of libraries this package needs to link against during runtime
These packages will each be symlinked into the packages lib and binary dir
"""
dependent_libs = []
for path in self.pkg.rpath:
dependent_libs.extend(list(find_all_shared_libraries(path, recursive=True)))
for extra_path in self._addl_rpaths:
dependent_libs.extend(list(find_all_shared_libraries(extra_path, recursive=True)))
return set(dependent_libs)
def add_rpath(self, *paths):
"""
Add libraries found at the root of provided paths to runtime linking
These are libraries found outside of the typical scope of rpath linking
that require manual inclusion in a runtime linking scheme.
These links are unidirectional, and are only
intended to bring outside dependencies into this package
Args:
*paths (str): arbitrary number of paths to be added to runtime linking
"""
self._addl_rpaths = self._addl_rpaths | set(paths)
def _link(self, path, dest):
file_name = os.path.basename(path)
dest_file = os.path.join(dest, file_name)
if os.path.exists(dest):
try:
symlink(path, dest_file)
# For py2 compatibility, we have to catch the specific Windows error code
# associate with trying to create a file that already exists (winerror 183)
except OSError as e:
if e.winerror == 183:
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(dest_file)
tty.debug(
"Linking library %s to %s failed, " % (path, dest_file) + "already linked."
if already_linked
else "library with name %s already exists at location %s."
% (file_name, dest)
)
pass
else:
raise e
def establish_link(self):
"""
(sym)link packages to runtime dependencies based on RPath configuration for
Windows heuristics
"""
# from build_environment.py:463
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally
# for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib)
# install a symlink to each dependent library
for library, lib_dir in itertools.product(self.rpaths, self.library_dependents):
self._link(library, lib_dir)
@system_path_filter
@memoized
def can_access_dir(path):

View File

@@ -5,11 +5,9 @@
from __future__ import division
import collections.abc
import contextlib
import functools
import inspect
import itertools
import os
import re
import sys
@@ -17,6 +15,11 @@
from datetime import datetime, timedelta
from typing import Any, Callable, Iterable, List, Tuple
import six
from six import string_types
from llnl.util.compat import MutableMapping, MutableSequence, zip_longest
# Ignore emacs backups when listing modules
ignore_modules = [r"^\.#", "~$"]
@@ -197,9 +200,14 @@ def _memoized_function(*args, **kwargs):
return ret
except TypeError as e:
# TypeError is raised when indexing into a dict if the key is unhashable.
raise UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format(key, func.__name__),
) from e
raise six.raise_from(
UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format(
key, func.__name__
),
),
e,
)
return _memoized_function
@@ -304,7 +312,7 @@ def lazy_eq(lseq, rseq):
# zip_longest is implemented in native code, so use it for speed.
# use zip_longest instead of zip because it allows us to tell
# which iterator was longer.
for left, right in itertools.zip_longest(liter, riter, fillvalue=done):
for left, right in zip_longest(liter, riter, fillvalue=done):
if (left is done) or (right is done):
return False
@@ -324,7 +332,7 @@ def lazy_lt(lseq, rseq):
liter = lseq()
riter = rseq()
for left, right in itertools.zip_longest(liter, riter, fillvalue=done):
for left, right in zip_longest(liter, riter, fillvalue=done):
if (left is done) or (right is done):
return left is done # left was shorter than right
@@ -474,7 +482,7 @@ def add_func_to_class(name, func):
@lazy_lexicographic_ordering
class HashableMap(collections.abc.MutableMapping):
class HashableMap(MutableMapping):
"""This is a hashable, comparable dictionary. Hash is performed on
a tuple of the values in the dictionary."""
@@ -566,7 +574,7 @@ def match_predicate(*args):
def match(string):
for arg in args:
if isinstance(arg, str):
if isinstance(arg, string_types):
if re.search(arg, string):
return True
elif isinstance(arg, list) or isinstance(arg, tuple):
@@ -741,26 +749,6 @@ def _n_xxx_ago(x):
raise ValueError(msg)
def pretty_seconds(seconds):
"""Seconds to string with appropriate units
Arguments:
seconds (float): Number of seconds
Returns:
str: Time string with units
"""
if seconds >= 1:
value, unit = seconds, "s"
elif seconds >= 1e-3:
value, unit = seconds * 1e3, "ms"
elif seconds >= 1e-6:
value, unit = seconds * 1e6, "us"
else:
value, unit = seconds * 1e9, "ns"
return "%.3f%s" % (value, unit)
class RequiredAttributeError(ValueError):
def __init__(self, message):
super(RequiredAttributeError, self).__init__(message)
@@ -879,28 +867,32 @@ def load_module_from_file(module_name, module_path):
ImportError: when the module can't be loaded
FileNotFoundError: when module_path doesn't exist
"""
import importlib.util
if module_name in sys.modules:
return sys.modules[module_name]
# This recipe is adapted from https://stackoverflow.com/a/67692/771663
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path) # novm
module = importlib.util.module_from_spec(spec) # novm
# The module object needs to exist in sys.modules before the
# loader executes the module code.
#
# See https://docs.python.org/3/reference/import.html#loading
sys.modules[spec.name] = module
try:
spec.loader.exec_module(module)
except BaseException:
spec = importlib.util.spec_from_file_location(module_name, module_path) # novm
module = importlib.util.module_from_spec(spec) # novm
# The module object needs to exist in sys.modules before the
# loader executes the module code.
#
# See https://docs.python.org/3/reference/import.html#loading
sys.modules[spec.name] = module
try:
del sys.modules[spec.name]
except KeyError:
pass
raise
spec.loader.exec_module(module)
except BaseException:
try:
del sys.modules[spec.name]
except KeyError:
pass
raise
elif sys.version_info[0] == 2:
import imp
module = imp.load_source(module_name, module_path)
return module
@@ -1010,15 +1002,7 @@ def stable_partition(
return true_items, false_items
def ensure_last(lst, *elements):
"""Performs a stable partition of lst, ensuring that ``elements``
occur at the end of ``lst`` in specified order. Mutates ``lst``.
Raises ``ValueError`` if any ``elements`` are not already in ``lst``."""
for elt in elements:
lst.append(lst.pop(lst.index(elt)))
class TypedMutableSequence(collections.abc.MutableSequence):
class TypedMutableSequence(MutableSequence):
"""Base class that behaves like a list, just with a different type.
Client code can inherit from this base class:
@@ -1127,3 +1111,32 @@ def __init__(self, callback):
def __get__(self, instance, owner):
return self.callback(owner)
def dict_list_to_tuple(dlist):
if isinstance(dlist, (list, tuple)):
return tuple(dict_list_to_tuple(elt) for elt in dlist)
elif isinstance(dlist, dict):
return tuple((key, dict_list_to_tuple(val)) for key, val in dlist.items())
else:
return dlist
class dict_list(list):
"""A list with an interface like an ordereddict that can be turned into a tuple."""
def __setitem__(self, key, value):
self.append((key, value))
def update(self, dict_like):
if isinstance(dict_like, (list, tuple)):
iterable = dict_like
else:
iterable = dict_like.items()
for i in iterable:
self.append(i)
def items(self):
for i in self:
yield i

View File

@@ -9,9 +9,9 @@
import sys
import time
from datetime import datetime
from typing import Dict, Tuple # novm
import llnl.util.tty as tty
from llnl.util.lang import pretty_seconds
import spack.util.string
@@ -80,7 +80,7 @@ class OpenFileTracker(object):
def __init__(self):
"""Create a new ``OpenFileTracker``."""
self._descriptors = {}
self._descriptors = {} # type: Dict[Tuple[int, int], OpenFile]
def get_fh(self, path):
"""Get a filehandle for a lockfile.
@@ -102,7 +102,7 @@ def get_fh(self, path):
try:
# see whether we've seen this inode/pid before
stat = os.stat(path)
key = (stat.st_dev, stat.st_ino, pid)
key = (stat.st_ino, pid)
open_file = self._descriptors.get(key)
except OSError as e:
@@ -128,32 +128,32 @@ def get_fh(self, path):
# if we just created the file, we'll need to get its inode here
if not stat:
stat = os.fstat(fd)
key = (stat.st_dev, stat.st_ino, pid)
inode = os.fstat(fd).st_ino
key = (inode, pid)
self._descriptors[key] = open_file
open_file.refs += 1
return open_file.fh
def release_by_stat(self, stat):
key = (stat.st_dev, stat.st_ino, os.getpid())
def release_fh(self, path):
"""Release a filehandle, only closing it if there are no more references."""
try:
inode = os.stat(path).st_ino
except OSError as e:
if e.errno != errno.ENOENT: # only handle file not found
raise
inode = None # this will not be in self._descriptors
key = (inode, os.getpid())
open_file = self._descriptors.get(key)
assert open_file, "Attempted to close non-existing inode: %s" % stat.st_inode
assert open_file, "Attempted to close non-existing lock path: %s" % path
open_file.refs -= 1
if not open_file.refs:
del self._descriptors[key]
open_file.fh.close()
def release_by_fh(self, fh):
self.release_by_stat(os.fstat(fh.fileno()))
def purge(self):
for key in list(self._descriptors.keys()):
self._descriptors[key].fh.close()
del self._descriptors[key]
#: Open file descriptors for locks in this process. Used to prevent one process
#: from opening the sam file many times for different byte range locks
@@ -166,7 +166,7 @@ def _attempts_str(wait_time, nattempts):
return ""
attempts = spack.util.string.plural(nattempts, "attempt")
return " after {} and {}".format(pretty_seconds(wait_time), attempts)
return " after {0:0.2f}s and {1}".format(wait_time, attempts)
class LockType(object):
@@ -318,8 +318,8 @@ def _lock(self, op, timeout=None):
raise LockROFileError(self.path)
self._log_debug(
"{} locking [{}:{}]: timeout {}".format(
op_str.lower(), self._start, self._length, pretty_seconds(timeout or 0)
"{0} locking [{1}:{2}]: timeout {3} sec".format(
op_str.lower(), self._start, self._length, timeout
)
)
@@ -340,8 +340,7 @@ def _lock(self, op, timeout=None):
total_wait_time = time.time() - start_time
return total_wait_time, num_attempts
total_wait_time = time.time() - start_time
raise LockTimeoutError(op_str.lower(), self.path, total_wait_time, num_attempts)
raise LockTimeoutError("Timed out waiting for a {0} lock.".format(op_str.lower()))
def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether
@@ -387,12 +386,8 @@ def _ensure_parent_directory(self):
try:
os.makedirs(parent)
except OSError as e:
# os.makedirs can fail in a number of ways when the directory already exists.
# With EISDIR, we know it exists, and others like EEXIST, EACCES, and EROFS
# are fine if we ensure that the directory exists.
# Python 3 allows an exist_ok parameter and ignores any OSError as long as
# the directory exists.
if not (e.errno == errno.EISDIR or os.path.isdir(parent)):
# makedirs can fail when diretory already exists.
if not (e.errno == errno.EEXIST and os.path.isdir(parent) or e.errno == errno.EISDIR):
raise
return parent
@@ -431,7 +426,8 @@ def _unlock(self):
"""
fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET)
file_tracker.release_by_fh(self._file)
file_tracker.release_fh(self.path)
self._file = None
self._reads = 0
self._writes = 0
@@ -780,18 +776,6 @@ class LockLimitError(LockError):
class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out."""
def __init__(self, lock_type, path, time, attempts):
fmt = "Timed out waiting for a {} lock after {}.\n Made {} {} on file: {}"
super(LockTimeoutError, self).__init__(
fmt.format(
lock_type,
pretty_seconds(time),
attempts,
"attempt" if attempts == 1 else "attempts",
path,
)
)
class LockUpgradeError(LockError):
"""Raised when unable to upgrade from a read to a write lock."""

View File

@@ -23,11 +23,8 @@ def symlink(real_path, link_path):
On Windows, use junctions if os.symlink fails.
"""
if not is_windows:
if not is_windows or _win32_can_symlink():
os.symlink(real_path, link_path)
elif _win32_can_symlink():
# Windows requires target_is_directory=True when the target is a dir.
os.symlink(real_path, link_path, target_is_directory=os.path.isdir(real_path))
else:
try:
# Try to use junctions

View File

@@ -6,7 +6,6 @@
from __future__ import unicode_literals
import contextlib
import io
import os
import struct
import sys
@@ -15,6 +14,10 @@
from datetime import datetime
from sys import platform as _platform
import six
from six import StringIO
from six.moves import input
if _platform != "win32":
import fcntl
import termios
@@ -180,7 +183,7 @@ def msg(message, *args, **kwargs):
else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
for arg in args:
print(indent + _output_filter(str(arg)))
print(indent + _output_filter(six.text_type(arg)))
def info(message, *args, **kwargs):
@@ -198,13 +201,13 @@ def info(message, *args, **kwargs):
st_text = process_stacktrace(st_countback)
cprint(
"@%s{%s==>} %s%s"
% (format, st_text, get_timestamp(), cescape(_output_filter(str(message)))),
% (format, st_text, get_timestamp(), cescape(_output_filter(six.text_type(message)))),
stream=stream,
)
for arg in args:
if wrap:
lines = textwrap.wrap(
_output_filter(str(arg)),
_output_filter(six.text_type(arg)),
initial_indent=indent,
subsequent_indent=indent,
break_long_words=break_long_words,
@@ -212,7 +215,7 @@ def info(message, *args, **kwargs):
for line in lines:
stream.write(line + "\n")
else:
stream.write(indent + _output_filter(str(arg)) + "\n")
stream.write(indent + _output_filter(six.text_type(arg)) + "\n")
def verbose(message, *args, **kwargs):
@@ -235,7 +238,7 @@ def error(message, *args, **kwargs):
kwargs.setdefault("format", "*r")
kwargs.setdefault("stream", sys.stderr)
info("Error: " + str(message), *args, **kwargs)
info("Error: " + six.text_type(message), *args, **kwargs)
def warn(message, *args, **kwargs):
@@ -244,7 +247,7 @@ def warn(message, *args, **kwargs):
kwargs.setdefault("format", "*Y")
kwargs.setdefault("stream", sys.stderr)
info("Warning: " + str(message), *args, **kwargs)
info("Warning: " + six.text_type(message), *args, **kwargs)
def die(message, *args, **kwargs):
@@ -268,7 +271,7 @@ def get_number(prompt, **kwargs):
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == str(abort):
if ans == six.text_type(abort):
return None
if ans:
@@ -333,11 +336,11 @@ def hline(label=None, **kwargs):
cols -= 2
cols = min(max_width, cols)
label = str(label)
label = six.text_type(label)
prefix = char * 2 + " "
suffix = " " + (cols - len(prefix) - clen(label)) * char
out = io.StringIO()
out = StringIO()
out.write(prefix)
out.write(label)
out.write(suffix)
@@ -369,5 +372,10 @@ def ioctl_gwinsz(fd):
return int(rc[0]), int(rc[1])
else:
if sys.version_info[0] < 3:
raise RuntimeError(
"Terminal size not obtainable on Windows with a\
Python version older than 3"
)
rc = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
return int(rc[0]), int(rc[1])

View File

@@ -8,10 +8,11 @@
"""
from __future__ import division, unicode_literals
import io
import os
import sys
from six import StringIO, text_type
from llnl.util.tty import terminal_size
from llnl.util.tty.color import cextra, clen
@@ -133,7 +134,7 @@ def colify(elts, **options):
)
# elts needs to be an array of strings so we can count the elements
elts = [str(elt) for elt in elts]
elts = [text_type(elt) for elt in elts]
if not elts:
return (0, ())
@@ -231,7 +232,7 @@ def transpose():
def colified(elts, **options):
"""Invokes the ``colify()`` function but returns the result as a string
instead of writing it to an output string."""
sio = io.StringIO()
sio = StringIO()
options["output"] = sio
colify(elts, **options)
return sio.getvalue()

View File

@@ -65,6 +65,8 @@
import sys
from contextlib import contextmanager
import six
class ColorParseError(Exception):
"""Raised when a color format fails to parse."""
@@ -148,17 +150,13 @@ def color_when(value):
class match_to_ansi(object):
def __init__(self, color=True, enclose=False):
def __init__(self, color=True):
self.color = _color_when_value(color)
self.enclose = enclose
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.enclose:
return r"\[\033[%sm\]" % s
else:
return "\033[%sm" % s
return "\033[%sm" % s
else:
return ""
@@ -203,11 +201,9 @@ def colorize(string, **kwargs):
Keyword Arguments:
color (bool): If False, output will be plain text without control
codes, for output to non-console devices.
enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width.
"""
color = _color_when_value(kwargs.get("color", get_color_when()))
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string)
string = re.sub(color_re, match_to_ansi(color), string)
string = string.replace("}}", "}")
return string
@@ -257,7 +253,7 @@ def cescape(string):
Returns:
(str): the string with color codes escaped
"""
string = str(string)
string = six.text_type(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
return string

View File

@@ -24,6 +24,8 @@
from types import ModuleType # novm
from typing import Optional # novm
from six import StringIO, string_types
import llnl.util.tty as tty
termios = None # type: Optional[ModuleType]
@@ -239,7 +241,8 @@ def __exit__(self, exc_type, exception, traceback):
"""If termios was available, restore old settings."""
if self.old_cfg:
self._restore_default_terminal_settings()
atexit.unregister(self._restore_default_terminal_settings)
if sys.version_info >= (3,):
atexit.unregister(self._restore_default_terminal_settings)
# restore SIGSTP and SIGCONT handlers
if self.old_handlers:
@@ -306,7 +309,7 @@ def __init__(self, file_like):
self.file_like = file_like
if isinstance(file_like, str):
if isinstance(file_like, string_types):
self.open = True
elif _file_descriptors_work(file_like):
self.open = False
@@ -320,9 +323,12 @@ def __init__(self, file_like):
def unwrap(self):
if self.open:
if self.file_like:
self.file = open(self.file_like, "w", encoding="utf-8")
if sys.version_info < (3,):
self.file = open(self.file_like, "w")
else:
self.file = open(self.file_like, "w", encoding="utf-8") # novm
else:
self.file = io.StringIO()
self.file = StringIO()
return self.file
else:
# We were handed an already-open file object. In this case we also
@@ -693,10 +699,13 @@ def __init__(self, sys_attr):
self.sys_attr = sys_attr
self.saved_stream = None
if sys.platform.startswith("win32"):
if hasattr(sys, "gettotalrefcount"): # debug build
libc = ctypes.CDLL("ucrtbased")
if sys.version_info < (3, 5):
libc = ctypes.CDLL(ctypes.util.find_library("c"))
else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0")
if hasattr(sys, "gettotalrefcount"): # debug build
libc = ctypes.CDLL("ucrtbased")
else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0")
kernel32 = ctypes.WinDLL("kernel32")
@@ -785,7 +794,7 @@ def __enter__(self):
raise RuntimeError("file argument must be set by __init__ ")
# Open both write and reading on logfile
if type(self.logfile) == io.StringIO:
if type(self.logfile) == StringIO:
self._ioflag = True
# cannot have two streams on tempfile, so we must make our own
sys.stdout = self.logfile
@@ -918,10 +927,13 @@ def _writer_daemon(
if sys.version_info < (3, 8) or sys.platform != "darwin":
os.close(write_fd)
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
if sys.version_info < (3,):
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1)
else:
# Python 3.x before 3.7 does not open with UTF-8 encoding by default
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
@@ -1011,7 +1023,7 @@ def _writer_daemon(
finally:
# send written data back to parent if we used a StringIO
if isinstance(log_file, io.StringIO):
if isinstance(log_file, StringIO):
control_pipe.send(log_file.getvalue())
log_file_wrapper.close()
close_connection_and_file(read_multiprocess_fd, in_pipe)

View File

@@ -228,8 +228,8 @@ def __init__(self, controller_function, minion_function):
self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 3
self.sleep_time = 0.1
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the controller and minion processes.

Some files were not shown because too many files have changed in this diff Show More