Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9727d70c62 |
156
.github/workflows/bootstrap.yml
vendored
156
.github/workflows/bootstrap.yml
vendored
@@ -11,7 +11,6 @@ on:
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
|
||||
- '!var/spack/repos/builtin/packages/python/**'
|
||||
- '!var/spack/repos/builtin/packages/re2c/**'
|
||||
- 'lib/spack/docs/**'
|
||||
schedule:
|
||||
# nightly at 2:16 AM
|
||||
@@ -19,7 +18,7 @@ on:
|
||||
|
||||
jobs:
|
||||
|
||||
fedora-clingo-sources:
|
||||
fedora:
|
||||
runs-on: ubuntu-latest
|
||||
container: "fedora:latest"
|
||||
steps:
|
||||
@@ -29,7 +28,7 @@ jobs:
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison bison-devel libstdc++-static
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -41,12 +40,11 @@ jobs:
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu-clingo-sources:
|
||||
ubuntu:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
steps:
|
||||
@@ -59,7 +57,7 @@ jobs:
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
cmake bison
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -71,14 +69,13 @@ jobs:
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
opensuse-clingo-sources:
|
||||
opensuse:
|
||||
runs-on: ubuntu-latest
|
||||
container: "opensuse/leap:latest"
|
||||
container: "opensuse/tumbleweed:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -87,7 +84,7 @@ jobs:
|
||||
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -96,156 +93,21 @@ jobs:
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-clingo-sources:
|
||||
macos:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install cmake bison@2.7 tree
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-clingo-binaries:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu-clingo-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu-gnupg-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
apt-get update -y && apt-get upgrade -y
|
||||
apt-get install -y \
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap GnuPG
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu-gnupg-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
apt-get update -y && apt-get upgrade -y
|
||||
apt-get install -y \
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
gawk
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap GnuPG
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack solve zlib
|
||||
spack bootstrap untrust github-actions
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-gnupg-binaries:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- uses: actions/checkout@v2
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-gnupg-sources:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install gawk tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- uses: actions/checkout@v2
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack solve zlib
|
||||
spack bootstrap untrust github-actions
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
59
.github/workflows/build-containers.yml
vendored
59
.github/workflows/build-containers.yml
vendored
@@ -1,26 +1,15 @@
|
||||
name: Containers
|
||||
|
||||
name: Build & Deploy Docker Containers
|
||||
on:
|
||||
# This Workflow can be triggered manually
|
||||
workflow_dispatch:
|
||||
# Build new Spack develop containers nightly.
|
||||
schedule:
|
||||
- cron: '34 0 * * *'
|
||||
# Run on pull requests that modify this file
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- '.github/workflows/build-containers.yml'
|
||||
# Let's also build & tag Spack containers on releases.
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
deploy-images:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
# Even if one container fails to build we still want the others
|
||||
# to continue their builds.
|
||||
@@ -28,19 +17,19 @@ jobs:
|
||||
# A matrix of Dockerfile paths, associated tags, and which architectures
|
||||
# they support.
|
||||
matrix:
|
||||
dockerfile: [[amazon-linux, amazonlinux-2.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[centos7, centos-7.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
|
||||
[leap15, leap-15.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
|
||||
[ubuntu-xenial, ubuntu-1604.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le'],
|
||||
[ubuntu-bionic, ubuntu-1804.dockerfile, 'linux/amd64,linux/arm64,linux/ppc64le']]
|
||||
dockerfile: [[amazon-linux, amazonlinux-2.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[centos7, centos-7.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[leap15, leap-15.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[ubuntu-xenial, ubuntu-1604.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[ubuntu-bionic, ubuntu-1804.dockerfile, 'linux/amd64,linux/arm64']]
|
||||
name: Build ${{ matrix.dockerfile[0] }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set Container Tag Normal (Nightly)
|
||||
run: |
|
||||
container="${{ matrix.dockerfile[0] }}:latest"
|
||||
container="ghcr.io/spack/${{ matrix.dockerfile[0]}}:latest"
|
||||
echo "container=${container}" >> $GITHUB_ENV
|
||||
echo "versioned=${container}" >> $GITHUB_ENV
|
||||
|
||||
@@ -48,7 +37,7 @@ jobs:
|
||||
- name: Set Container Tag on Release
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
versioned="${{matrix.dockerfile[0]}}:${GITHUB_REF##*/}"
|
||||
versioned="ghcr.io/spack/${{matrix.dockerfile[0]}}:${GITHUB_REF##*/}"
|
||||
echo "versioned=${versioned}" >> $GITHUB_ENV
|
||||
|
||||
- name: Check ${{ matrix.dockerfile[1] }} Exists
|
||||
@@ -59,33 +48,25 @@ jobs:
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # @v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # @v1
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # @v1
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # @v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[1] }}
|
||||
uses: docker/build-push-action@a66e35b9cbcf4ad0ea91ffcaf7bbad63ad9e0229 # @v2
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: share/spack/docker/${{matrix.dockerfile[1]}}
|
||||
platforms: ${{ matrix.dockerfile[2] }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
push: true
|
||||
tags: |
|
||||
spack/${{ env.container }}
|
||||
spack/${{ env.versioned }}
|
||||
ghcr.io/spack/${{ env.container }}
|
||||
ghcr.io/spack/${{ env.versioned }}
|
||||
${{ env.container }}
|
||||
${{ env.versioned }}
|
||||
|
26
.github/workflows/macos_python.yml
vendored
26
.github/workflows/macos_python.yml
vendored
@@ -24,8 +24,8 @@ jobs:
|
||||
name: gcc with clang
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
@@ -39,8 +39,8 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
timeout-minutes: 700
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
@@ -52,8 +52,8 @@ jobs:
|
||||
name: scipy, mpl, pd
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
@@ -62,3 +62,17 @@ jobs:
|
||||
spack install -v --fail-fast py-scipy %apple-clang
|
||||
spack install -v --fail-fast py-matplotlib %apple-clang
|
||||
spack install -v --fail-fast py-pandas %apple-clang
|
||||
|
||||
install_mpi4py_clang:
|
||||
name: mpi4py, petsc4py
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v --fail-fast py-mpi4py %apple-clang
|
||||
spack install -v --fail-fast py-petsc4py %apple-clang
|
||||
|
141
.github/workflows/unit_tests.yaml
vendored
141
.github/workflows/unit_tests.yaml
vendored
@@ -15,8 +15,8 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python Packages
|
||||
@@ -31,10 +31,10 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
@@ -48,6 +48,26 @@ jobs:
|
||||
- name: Run style tests
|
||||
run: |
|
||||
share/spack/qa/run-style-tests
|
||||
# Build the documentation
|
||||
documentation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils ninja-build graphviz
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt
|
||||
- name: Build documentation
|
||||
run: |
|
||||
share/spack/qa/run-doc-tests
|
||||
|
||||
# Check which files have been updated by the PR
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -57,12 +77,12 @@ jobs:
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@b2feaf19c27470162a626bd6fa8438ae5b263721
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
|
||||
@@ -92,17 +112,17 @@ jobs:
|
||||
|
||||
# Run unit tests with different configurations on linux
|
||||
unittests:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]
|
||||
concretizer: ['original', 'clingo']
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
@@ -111,7 +131,10 @@ jobs:
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf cmake bison libbison-dev kcov
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
@@ -125,13 +148,24 @@ jobs:
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Bootstrap clingo from sources
|
||||
if: ${{ matrix.concretizer == 'clingo' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -v solve zlib
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
@@ -151,26 +185,29 @@ jobs:
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
# Test shell integration
|
||||
shell:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for shell tests
|
||||
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
|
||||
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
@@ -179,6 +216,17 @@ jobs:
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests (without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
run: |
|
||||
@@ -189,13 +237,13 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
# Test for Python2.6 run on Centos 6
|
||||
centos6:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
container: spack/github-actions:centos6
|
||||
steps:
|
||||
@@ -205,30 +253,28 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
HOME: /home/spack-test
|
||||
SPACK_TEST_SOLVER: original
|
||||
run: |
|
||||
whoami && echo $HOME && cd $HOME
|
||||
git clone "${{ github.server_url }}/${{ github.repository }}.git" && cd spack
|
||||
git fetch origin "${{ github.ref }}:test-branch"
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
bin/spack unit-test -x
|
||||
share/spack/qa/run-unit-tests
|
||||
- name: Run unit tests (only package tests)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
HOME: /home/spack-test
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: original
|
||||
run: |
|
||||
whoami && echo $HOME && cd $HOME
|
||||
git clone "${{ github.server_url }}/${{ github.repository }}.git" && cd spack
|
||||
git fetch origin "${{ github.ref }}:test-branch"
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
bin/spack unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
share/spack/qa/run-unit-tests
|
||||
|
||||
# Test RHEL8 UBI with platform Python. This job is run
|
||||
# only on PRs modifying core Spack
|
||||
rhel8-platform-python:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
container: registry.access.redhat.com/ubi8/ubi
|
||||
@@ -238,7 +284,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -250,17 +296,16 @@ jobs:
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack -d solve zlib
|
||||
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
|
||||
# Test for the clingo based solver (using clingo-cffi)
|
||||
clingo-cffi:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
@@ -269,7 +314,21 @@ jobs:
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf kcov
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml] clingo
|
||||
@@ -294,22 +353,22 @@ jobs:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
# Run unit tests on MacOS
|
||||
build:
|
||||
needs: [ validate, style, changes ]
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.8]
|
||||
steps:
|
||||
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # @v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@dc73133d4da04e56a135ae2246682783cc7c7cb6 # @v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
@@ -320,14 +379,10 @@ jobs:
|
||||
run: |
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) bootstrap untrust spack-install
|
||||
$(which spack) solve zlib
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -x
|
||||
@@ -340,7 +395,7 @@ jobs:
|
||||
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
|
||||
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # @v2.1.0
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -136,7 +136,6 @@ venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
!/lib/spack/env
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
@@ -210,9 +209,6 @@ tramp
|
||||
/eshell/history
|
||||
/eshell/lastdir
|
||||
|
||||
# zsh byte-compiled files
|
||||
*.zwc
|
||||
|
||||
# elpa packages
|
||||
/elpa/
|
||||
|
||||
|
@@ -2,7 +2,6 @@ version: 2
|
||||
|
||||
sphinx:
|
||||
configuration: lib/spack/docs/conf.py
|
||||
fail_on_warning: true
|
||||
|
||||
python:
|
||||
version: 3.7
|
||||
|
184
CHANGELOG.md
184
CHANGELOG.md
@@ -1,187 +1,3 @@
|
||||
# v0.17.0 (2021-11-05)
|
||||
|
||||
`v0.17.0` is a major feature release.
|
||||
|
||||
## Major features in this release
|
||||
|
||||
1. **New concretizer is now default**
|
||||
The new concretizer introduced as an experimental feature in `v0.16.0`
|
||||
is now the default (#25502). The new concretizer is based on the
|
||||
[clingo](https://github.com/potassco/clingo) logic programming system,
|
||||
and it enables us to do much higher quality and faster dependency solving
|
||||
The old concretizer is still available via the `concretizer: original`
|
||||
setting, but it is deprecated and will be removed in `v0.18.0`.
|
||||
|
||||
2. **Binary Bootstrapping**
|
||||
To make it easier to use the new concretizer and binary packages,
|
||||
Spack now bootstraps `clingo` and `GnuPG` from public binaries. If it
|
||||
is not able to bootstrap them from binaries, it installs them from
|
||||
source code. With these changes, you should still be able to clone Spack
|
||||
and start using it almost immediately. (#21446, #22354, #22489, #22606,
|
||||
#22720, #22720, #23677, #23946, #24003, #25138, #25607, #25964, #26029,
|
||||
#26399, #26599).
|
||||
|
||||
3. **Reuse existing packages (experimental)**
|
||||
The most wanted feature from our
|
||||
[2020 user survey](https://spack.io/spack-user-survey-2020/) and
|
||||
the most wanted Spack feature of all time (#25310). `spack install`,
|
||||
`spack spec`, and `spack concretize` now have a `--reuse` option, which
|
||||
causes Spack to minimize the number of rebuilds it does. The `--reuse`
|
||||
option will try to find existing installations and binary packages locally
|
||||
and in registered mirrors, and will prefer to use them over building new
|
||||
versions. This will allow users to build from source *far* less than in
|
||||
prior versions of Spack. This feature will continue to be improved, with
|
||||
configuration options and better CLI expected in `v0.17.1`. It will become
|
||||
the *default* concretization mode in `v0.18.0`.
|
||||
|
||||
4. **Better error messages**
|
||||
We have improved the error messages generated by the new concretizer by
|
||||
using *unsatisfiable cores*. Spack will now print a summary of the types
|
||||
of constraints that were violated to make a spec unsatisfiable (#26719).
|
||||
|
||||
5. **Conditional variants**
|
||||
Variants can now have a `when="<spec>"` clause, allowing them to be
|
||||
conditional based on the version or other attributes of a package (#24858).
|
||||
|
||||
6. **Git commit versions**
|
||||
In an environment and on the command-line, you can now provide a full,
|
||||
40-character git commit as a version for any package with a top-level
|
||||
`git` URL. e.g., `spack install hdf5@45bb27f58240a8da7ebb4efc821a1a964d7712a8`.
|
||||
Spack will compare the commit to tags in the git repository to understand
|
||||
what versions it is ahead of or behind.
|
||||
|
||||
7. **Override local config and cache directories**
|
||||
You can now set `SPACK_DISABLE_LOCAL_CONFIG` to disable the `~/.spack` and
|
||||
`/etc/spack` configuration scopes. `SPACK_USER_CACHE_PATH` allows you to
|
||||
move caches out of `~/.spack`, as well (#27022, #26735). This addresses
|
||||
common problems where users could not isolate CI environments from local
|
||||
configuration.
|
||||
|
||||
8. **Improvements to Spack Containerize**
|
||||
For added reproducibility, you can now pin the Spack version used by
|
||||
`spack containerize` (#21910). The container build will only build
|
||||
with the Spack version pinned at build recipe creation instead of the
|
||||
latest Spack version.
|
||||
|
||||
9. **New commands for dealing with tags**
|
||||
The `spack tags` command allows you to list tags on packages (#26136), and you
|
||||
can list tests and filter tags with `spack test list` (#26842).
|
||||
|
||||
## Other new features of note
|
||||
|
||||
* Copy and relocate environment views as stand-alone installations (#24832)
|
||||
* `spack diff` command can diff two installed specs (#22283, #25169)
|
||||
* `spack -c <config>` can set one-off config parameters on CLI (#22251)
|
||||
* `spack load --list` is an alias for `spack find --loaded` (#27184)
|
||||
* `spack gpg` can export private key with `--secret` (#22557)
|
||||
* `spack style` automatically bootstraps dependencies (#24819)
|
||||
* `spack style --fix` automatically invokes `isort` (#24071)
|
||||
* build dependencies can be installed from build caches with `--include-build-deps` (#19955)
|
||||
* `spack audit` command for checking package constraints (#23053)
|
||||
* Spack can now fetch from `CVS` repositories (yep, really) (#23212)
|
||||
* `spack monitor` lets you upload analysis about installations to a
|
||||
[spack monitor server](https://github.com/spack/spack-monitor) (#23804, #24321,
|
||||
#23777, #25928))
|
||||
* `spack python --path` shows which `python` Spack is using (#22006)
|
||||
* `spack env activate --temp` can create temporary environments (#25388)
|
||||
* `--preferred` and `--latest` options for `spack checksum` (#25830)
|
||||
* `cc` is now pure posix and runs on Alpine (#26259)
|
||||
* `SPACK_PYTHON` environment variable sets which `python` spack uses (#21222)
|
||||
* `SPACK_SKIP_MODULES` lets you source `setup-env.sh` faster if you don't need modules (#24545)
|
||||
|
||||
## Major internal refactors
|
||||
|
||||
* `spec.yaml` files are now `spec.json`, yielding a large speed improvement (#22845)
|
||||
* Splicing allows Spack specs to store mixed build provenance (#20262)
|
||||
* More extensive hooks API for installations (#21930)
|
||||
* New internal API for getting the active environment (#25439)
|
||||
|
||||
## Performance Improvements
|
||||
|
||||
* Parallelize separate concretization in environments; Previously 55 min E4S solve
|
||||
now takes 2.5 min (#26264)
|
||||
* Drastically improve YamlFilesystemView file removal performance via batching (#24355)
|
||||
* Speed up spec comparison (#21618)
|
||||
* Speed up environment activation (#25633)
|
||||
|
||||
## Archspec improvements
|
||||
* support for new generic `x86_64_v2`, `x86_64_v3`, `x86_64_v4` targets
|
||||
(see [archspec#31](https://github.com/archspec/archspec-json/pull/31))
|
||||
* `spack arch --generic` lets you get the best generic architecture for
|
||||
your node (#27061)
|
||||
* added support for aocc (#20124), `arm` compiler on `graviton2` (#24904)
|
||||
and on `a64fx` (#24524),
|
||||
|
||||
## Infrastructure, buildcaches, and services
|
||||
|
||||
* Add support for GCS Bucket Mirrors (#26382)
|
||||
* Add `spackbot` to help package maintainers with notifications. See
|
||||
[spack.github.io/spackbot](https://spack.github.io/spackbot/)
|
||||
* Reproducible pipeline builds with `spack ci rebuild` (#22887)
|
||||
* Removed redundant concretizations from GitLab pipeline generation (#26622)
|
||||
* Spack CI no longer generates jobs for unbuilt specs (#20435)
|
||||
* Every pull request pipeline has its own buildcache (#25529)
|
||||
* `--no-add` installs only specified specs and only if already present in… (#22657)
|
||||
* Add environment-aware `spack buildcache sync` command (#25470)
|
||||
* Binary cache installation speedups and improvements (#19690, #20768)
|
||||
|
||||
## Deprecations and Removals
|
||||
|
||||
* `spack setup` was deprecated in v0.16.0, and has now been removed.
|
||||
Use `spack develop` and `spack dev-build`.
|
||||
* Remove unused `--dependencies` flag from `spack load` (#25731)
|
||||
* Remove stubs for `spack module [refresh|find|rm|loads]`, all of which
|
||||
were deprecated in 2018.
|
||||
|
||||
## Notable Bugfixes
|
||||
|
||||
* Deactivate previous env before activating new one (#25409)
|
||||
* Many fixes to error codes from `spack install` (#21319, #27012, #25314)
|
||||
* config add: infer type based on JSON schema validation errors (#27035)
|
||||
* `spack config edit` now works even if `spack.yaml` is broken (#24689)
|
||||
|
||||
## Packages
|
||||
|
||||
* Allow non-empty version ranges like `1.1.0:1.1` (#26402)
|
||||
* Remove `.99`'s from many version ranges (#26422)
|
||||
* Python: use platform-specific site packages dir (#25998)
|
||||
* `CachedCMakePackage` for using *.cmake initial config files (#19316)
|
||||
* `lua-lang` allows swapping `lua` and `luajit` (#22492)
|
||||
* Better support for `ld.gold` and `ld.lld` (#25626)
|
||||
* build times are now stored as metadata in `$prefix/.spack` (#21179)
|
||||
* post-install tests can be reused in smoke tests (#20298)
|
||||
* Packages can use `pypi` attribute to infer `homepage`/`url`/`list_url` (#17587)
|
||||
* Use gnuconfig package for `config.guess` file replacement (#26035)
|
||||
* patches: make re-applied patches idempotent (#26784)
|
||||
|
||||
## Spack community stats
|
||||
|
||||
* 5969 total packages, 920 new since `v0.16.0`
|
||||
* 358 new Python packages, 175 new R packages
|
||||
* 513 people contributed to this release
|
||||
* 490 committers to packages
|
||||
* 105 committers to core
|
||||
* Lots of GPU updates:
|
||||
* ~77 CUDA-related commits
|
||||
* ~66 AMD-related updates
|
||||
* ~27 OneAPI-related commits
|
||||
* 30 commits from AMD toolchain support
|
||||
* `spack test` usage in packages is increasing
|
||||
* 1669 packages with tests (mostly generic python tests)
|
||||
* 93 packages with their own tests
|
||||
|
||||
|
||||
# v0.16.3 (2021-09-21)
|
||||
|
||||
* clang/llvm: fix version detection (#19978)
|
||||
* Fix use of quotes in Python build system (#22279)
|
||||
* Cray: fix extracting paths from module files (#23472)
|
||||
* Use AWS CloudFront for source mirror (#23978)
|
||||
* Ensure all roots of an installed environment are marked explicit in db (#24277)
|
||||
* Fix fetching for Python 3.8 and 3.9 (#24686)
|
||||
* locks: only open lockfiles once instead of for every lock held (#24794)
|
||||
* Remove the EOL centos:6 docker image
|
||||
|
||||
# v0.16.2 (2021-05-22)
|
||||
|
||||
* Major performance improvement for `spack load` and other commands. (#23661)
|
||||
|
@@ -4,7 +4,6 @@
|
||||
[](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
|
||||
[](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
|
||||
[](https://codecov.io/gh/spack/spack)
|
||||
[](https://github.com/spack/spack/actions/workflows/build-containers.yml)
|
||||
[](https://spack.readthedocs.io)
|
||||
[](https://slack.spack.io)
|
||||
|
||||
@@ -27,7 +26,7 @@ for examples and highlights.
|
||||
To install spack and your first package, make sure you have Python.
|
||||
Then:
|
||||
|
||||
$ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
|
||||
$ git clone https://github.com/spack/spack.git
|
||||
$ cd spack/bin
|
||||
$ ./spack install zlib
|
||||
|
||||
|
24
SECURITY.md
24
SECURITY.md
@@ -1,24 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We provide security updates for the following releases.
|
||||
For more on Spack's release structure, see
|
||||
[`README.md`](https://github.com/spack/spack#releases).
|
||||
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| develop | :white_check_mark: |
|
||||
| 0.16.x | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability or other security
|
||||
issue, email maintainers@spack.io.
|
||||
|
||||
You can expect to hear back within two days.
|
||||
If your security issue is accepted, we will do
|
||||
our best to release a fix within a week. If
|
||||
fixing the issue will take longer than this,
|
||||
we will discuss timeline options with you.
|
23
bin/spack
23
bin/spack
@@ -28,7 +28,6 @@ exit 1
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
min_python3 = (3, 5)
|
||||
@@ -71,28 +70,6 @@ if "ruamel.yaml" in sys.modules:
|
||||
if "ruamel" in sys.modules:
|
||||
del sys.modules["ruamel"]
|
||||
|
||||
# The following code is here to avoid failures when updating
|
||||
# the develop version, due to spurious argparse.pyc files remaining
|
||||
# in the libs/spack/external directory, see:
|
||||
# https://github.com/spack/spack/pull/25376
|
||||
# TODO: Remove in v0.18.0 or later
|
||||
try:
|
||||
import argparse
|
||||
except ImportError:
|
||||
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
|
||||
if not os.path.exists(argparse_pyc):
|
||||
raise
|
||||
try:
|
||||
os.remove(argparse_pyc)
|
||||
import argparse # noqa
|
||||
except Exception:
|
||||
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
|
||||
'Either delete it manually or ask some administrator to '
|
||||
'delete it for you.')
|
||||
print(msg.format(argparse_pyc))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
import spack.main # noqa
|
||||
|
||||
# Once we've set up the system path, run the spack main method
|
||||
|
@@ -4,29 +4,4 @@ bootstrap:
|
||||
enable: true
|
||||
# Root directory for bootstrapping work. The software bootstrapped
|
||||
# by Spack is installed in a "store" subfolder of this root directory
|
||||
root: $user_cache_path/bootstrap
|
||||
# Methods that can be used to bootstrap software. Each method may or
|
||||
# may not be able to bootstrap all of the software that Spack needs,
|
||||
# depending on its type.
|
||||
sources:
|
||||
- name: 'github-actions'
|
||||
type: buildcache
|
||||
description: |
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
# This method is just Spack bootstrapping the software it needs from sources.
|
||||
# It has been added here so that users can selectively disable bootstrapping
|
||||
# from sources by "untrusting" it.
|
||||
- name: spack-install
|
||||
type: install
|
||||
description: |
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
trusted:
|
||||
# By default we trust bootstrapping from sources and from binaries
|
||||
# produced on Github via the workflow
|
||||
github-actions: true
|
||||
spack-install: true
|
||||
root: ~/.spack/bootstrap
|
||||
|
@@ -42,8 +42,8 @@ config:
|
||||
# (i.e., ``$TMP` or ``$TMPDIR``).
|
||||
#
|
||||
# Another option that prevents conflicts and potential permission issues is
|
||||
# to specify `$user_cache_path/stage`, which ensures each user builds in their
|
||||
# home directory.
|
||||
# to specify `~/.spack/stage`, which ensures each user builds in their home
|
||||
# directory.
|
||||
#
|
||||
# A more traditional path uses the value of `$spack/var/spack/stage`, which
|
||||
# builds directly inside Spack's instance without staging them in a
|
||||
@@ -60,13 +60,13 @@ config:
|
||||
# identifies Spack staging to avoid accidentally wiping out non-Spack work.
|
||||
build_stage:
|
||||
- $tempdir/$user/spack-stage
|
||||
- $user_cache_path/stage
|
||||
- ~/.spack/stage
|
||||
# - $spack/var/spack/stage
|
||||
|
||||
# Directory in which to run tests and store test results.
|
||||
# Tests will be stored in directories named by date/time and package
|
||||
# name/hash.
|
||||
test_stage: $user_cache_path/test
|
||||
test_stage: ~/.spack/test
|
||||
|
||||
# Cache directory for already downloaded source tarballs and archived
|
||||
# repositories. This can be purged with `spack clean --downloads`.
|
||||
@@ -75,7 +75,7 @@ config:
|
||||
|
||||
# Cache directory for miscellaneous files, like the package index.
|
||||
# This can be purged with `spack clean --misc-cache`
|
||||
misc_cache: $user_cache_path/cache
|
||||
misc_cache: ~/.spack/cache
|
||||
|
||||
|
||||
# Timeout in seconds used for downloading sources etc. This only applies
|
||||
@@ -134,7 +134,7 @@ config:
|
||||
# enabling locks.
|
||||
locks: true
|
||||
|
||||
# The default url fetch method to use.
|
||||
# The default url fetch method to use.
|
||||
# If set to 'curl', Spack will require curl on the user's system
|
||||
# If set to 'urllib', Spack will use python built-in libs to fetch
|
||||
url_fetch_method: urllib
|
||||
@@ -160,10 +160,11 @@ config:
|
||||
# sufficiently for many specs.
|
||||
#
|
||||
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
|
||||
# backtracking and optimization for user preferences. Spack will
|
||||
# try to bootstrap the logic solver, if not already available.
|
||||
# backtracking and optimization for user preferences.
|
||||
#
|
||||
concretizer: clingo
|
||||
# 'clingo' currently requires the clingo ASP solver to be installed and
|
||||
# built with python bindings. 'original' is built in.
|
||||
concretizer: original
|
||||
|
||||
|
||||
# How long to wait to lock the Spack installation database. This lock is used
|
||||
@@ -190,8 +191,3 @@ config:
|
||||
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
|
||||
# manipulation by unprivileged user (e.g. AFS)
|
||||
allow_sgid: true
|
||||
|
||||
# Whether to set the terminal title to display status information during
|
||||
# building and installing packages. This gives information about Spack's
|
||||
# current progress as well as the current and total number of packages.
|
||||
terminal_title: false
|
||||
|
@@ -31,13 +31,13 @@ colorized output with a flag
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack --color always find | less -R
|
||||
$ spack --color always | less -R
|
||||
|
||||
or an environment variable
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ SPACK_COLOR=always spack find | less -R
|
||||
$ SPACK_COLOR=always spack | less -R
|
||||
|
||||
--------------------------
|
||||
Listing available packages
|
||||
@@ -188,34 +188,6 @@ configuration a **spec**. In the commands above, ``mpileaks`` and
|
||||
``mpileaks@3.0.4`` are both valid *specs*. We'll talk more about how
|
||||
you can use them to customize an installation in :ref:`sec-specs`.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Reusing installed dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. warning::
|
||||
|
||||
The ``--reuse`` option described here is experimental, and it will
|
||||
likely be replaced with a different option and configuration settings
|
||||
in the next Spack release.
|
||||
|
||||
By default, when you run ``spack install``, Spack tries to build a new
|
||||
version of the package you asked for, along with updated versions of
|
||||
its dependencies. This gets you the latest versions and configurations,
|
||||
but it can result in unwanted rebuilds if you update Spack frequently.
|
||||
|
||||
If you want Spack to try hard to reuse existing installations as dependencies,
|
||||
you can add the ``--reuse`` option:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --reuse mpich
|
||||
|
||||
This will not do anything if ``mpich`` is already installed. If ``mpich``
|
||||
is not installed, but dependencies like ``hwloc`` and ``libfabric`` are,
|
||||
the ``mpich`` will be build with the installed versions, if possible.
|
||||
You can use the :ref:`spack spec -I <cmd-spack-spec>` command to see what
|
||||
will be reused and what will be built before you install.
|
||||
|
||||
.. _cmd-spack-uninstall:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@@ -785,7 +757,7 @@ The output is colored, and written in the style of a git diff. This means that y
|
||||
can copy and paste it into a GitHub markdown as a code block with language "diff"
|
||||
and it will render nicely! Here is an example:
|
||||
|
||||
.. code-block:: md
|
||||
.. code-block:: markdown
|
||||
|
||||
```diff
|
||||
--- zlib@1.2.11/efzjziyc3dmb5h5u5azsthgbgog5mj7g
|
||||
@@ -896,9 +868,8 @@ your path:
|
||||
These commands will add appropriate directories to your ``PATH``,
|
||||
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
|
||||
:ref:`prefix inspections <customize-env-modifications>` defined in your
|
||||
modules configuration.
|
||||
When you no longer want to use a package, you can type unload or
|
||||
unuse similarly:
|
||||
modules configuration. When you no longer want to use a package, you
|
||||
can type unload or unuse similarly:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -939,22 +910,6 @@ first ``libelf`` above, you would run:
|
||||
|
||||
$ spack load /qmm4kso
|
||||
|
||||
To see which packages that you have loaded to your enviornment you would
|
||||
use ``spack find --loaded``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --loaded
|
||||
==> 2 installed packages
|
||||
-- linux-debian7 / gcc@4.4.7 ------------------------------------
|
||||
libelf@0.8.13
|
||||
|
||||
-- linux-debian7 / intel@15.0.0 ---------------------------------
|
||||
libelf@0.8.13
|
||||
|
||||
You can also use ``spack load --list`` to get the same output, but it
|
||||
does not have the full set of query options that ``spack find`` offers.
|
||||
|
||||
We'll learn more about Spack's spec syntax in the next section.
|
||||
|
||||
|
||||
@@ -1694,7 +1649,6 @@ and it will be added to the ``PYTHONPATH`` in your current shell:
|
||||
|
||||
Now ``import numpy`` will succeed for as long as you keep your current
|
||||
session open.
|
||||
The loaded packages can be checked using ``spack find --loaded``
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Loading Extensions via Modules
|
||||
|
@@ -63,7 +63,6 @@ on these ideas for each distinct build system that Spack supports:
|
||||
build_systems/intelpackage
|
||||
build_systems/rocmpackage
|
||||
build_systems/custompackage
|
||||
build_systems/multiplepackage
|
||||
|
||||
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
|
||||
provide a list of build systems and methods/attributes that can be
|
||||
|
@@ -112,44 +112,20 @@ phase runs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ autoreconf --install --verbose --force -I <aclocal-prefix>/share/aclocal
|
||||
|
||||
In case you need to add more arguments, override ``autoreconf_extra_args``
|
||||
in your ``package.py`` on class scope like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
autoreconf_extra_args = ["-Im4"]
|
||||
$ libtoolize
|
||||
$ aclocal
|
||||
$ autoreconf --install --verbose --force
|
||||
|
||||
All you need to do is add a few Autotools dependencies to the package.
|
||||
Most stable releases will come with a ``configure`` script, but if you
|
||||
check out a commit from the ``master`` branch, you would want to add:
|
||||
check out a commit from the ``develop`` branch, you would want to add:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('autoconf', type='build', when='@master')
|
||||
depends_on('automake', type='build', when='@master')
|
||||
depends_on('libtool', type='build', when='@master')
|
||||
|
||||
It is typically redundant to list the ``m4`` macro processor package as a
|
||||
dependency, since ``autoconf`` already depends on it.
|
||||
|
||||
"""""""""""""""""""""""""""""""
|
||||
Using a custom autoreconf phase
|
||||
"""""""""""""""""""""""""""""""
|
||||
|
||||
In some cases, it might be needed to replace the default implementation
|
||||
of the autoreconf phase with one running a script interpreter. In this
|
||||
example, the ``bash`` shell is used to run the ``autogen.sh`` script.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def autoreconf(self, spec, prefix):
|
||||
which('bash')('autogen.sh')
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""
|
||||
patching configure or Makefile.in files
|
||||
"""""""""""""""""""""""""""""""""""""""
|
||||
depends_on('autoconf', type='build', when='@develop')
|
||||
depends_on('automake', type='build', when='@develop')
|
||||
depends_on('libtool', type='build', when='@develop')
|
||||
depends_on('m4', type='build', when='@develop')
|
||||
|
||||
In some cases, developers might need to distribute a patch that modifies
|
||||
one of the files used to generate ``configure`` or ``Makefile.in``.
|
||||
@@ -159,57 +135,6 @@ create a new patch that directly modifies ``configure``. That way,
|
||||
Spack can use the secondary patch and additional build system
|
||||
dependencies aren't necessary.
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
Old Autotools helper scripts
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
Autotools based tarballs come with helper scripts such as ``config.sub`` and
|
||||
``config.guess``. It is the responsibility of the developers to keep these files
|
||||
up to date so that they run on every platform, but for very old software
|
||||
releases this is impossible. In these cases Spack can help to replace these
|
||||
files with newer ones, without having to add the heavy dependency on
|
||||
``automake``.
|
||||
|
||||
Automatic helper script replacement is currently enabled by default on
|
||||
``ppc64le`` and ``aarch64``, as these are the known cases where old scripts fail.
|
||||
On these targets, ``AutotoolsPackage`` adds a build dependency on ``gnuconfig``,
|
||||
which is a very light-weight package with newer versions of the helper files.
|
||||
Spack then tries to run all the helper scripts it can find in the release, and
|
||||
replaces them on failure with the helper scripts from ``gnuconfig``.
|
||||
|
||||
To opt out of this feature, use the following setting:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
patch_config_files = False
|
||||
|
||||
To enable it conditionally on different architectures, define a property and
|
||||
make the package depend on ``gnuconfig`` as a build dependency:
|
||||
|
||||
.. code-block
|
||||
|
||||
depends_on('gnuconfig', when='@1.0:')
|
||||
|
||||
@property
|
||||
def patch_config_files(self):
|
||||
return self.spec.satisfies("@1.0:")
|
||||
|
||||
.. note::
|
||||
|
||||
On some exotic architectures it is necessary to use system provided
|
||||
``config.sub`` and ``config.guess`` files. In this case, the most
|
||||
transparent solution is to mark the ``gnuconfig`` package as external and
|
||||
non-buildable, with a prefix set to the directory containing the files:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
gnuconfig:
|
||||
buildable: false
|
||||
externals:
|
||||
- spec: gnuconfig@master
|
||||
prefix: /usr/share/configure_files/
|
||||
|
||||
|
||||
""""""""""""""""
|
||||
force_autoreconf
|
||||
""""""""""""""""
|
||||
@@ -399,29 +324,8 @@ options:
|
||||
|
||||
--with-libfabric=</path/to/libfabric>
|
||||
|
||||
"""""""""""""""""""""""
|
||||
The ``variant`` keyword
|
||||
"""""""""""""""""""""""
|
||||
|
||||
When Spack variants and configure flags do not correspond one-to-one, the
|
||||
``variant`` keyword can be passed to ``with_or_without`` and
|
||||
``enable_or_disable``. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
variant('debug_tools', default=False)
|
||||
config_args += self.enable_or_disable('debug-tools', variant='debug_tools')
|
||||
|
||||
Or when one variant controls multiple flags:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
variant('debug_tools', default=False)
|
||||
config_args += self.with_or_without('memchecker', variant='debug_tools')
|
||||
config_args += self.with_or_without('profiler', variant='debug_tools')
|
||||
|
||||
""""""""""""""""""""
|
||||
Activation overrides
|
||||
activation overrides
|
||||
""""""""""""""""""""
|
||||
|
||||
Finally, the behavior of either ``with_or_without`` or
|
||||
|
@@ -1,350 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _multiplepackage:
|
||||
|
||||
----------------------
|
||||
Multiple Build Systems
|
||||
----------------------
|
||||
|
||||
Quite frequently, a package will change build systems from one version to the
|
||||
next. For example, a small project that once used a single Makefile to build
|
||||
may now require Autotools to handle the increased number of files that need to
|
||||
be compiled. Or, a package that once used Autotools may switch to CMake for
|
||||
Windows support. In this case, it becomes a bit more challenging to write a
|
||||
single build recipe for this package in Spack.
|
||||
|
||||
There are several ways that this can be handled in Spack:
|
||||
|
||||
#. Subclass the new build system, and override phases as needed (preferred)
|
||||
#. Subclass ``Package`` and implement ``install`` as needed
|
||||
#. Create separate ``*-cmake``, ``*-autotools``, etc. packages for each build system
|
||||
#. Rename the old package to ``*-legacy`` and create a new package
|
||||
#. Move the old package to a ``legacy`` repository and create a new package
|
||||
#. Drop older versions that only support the older build system
|
||||
|
||||
Of these options, 1 is preferred, and will be demonstrated in this
|
||||
documentation. Options 3-5 have issues with concretization, so shouldn't be
|
||||
used. Options 4-5 also don't support more than two build systems. Option 6 only
|
||||
works if the old versions are no longer needed. Option 1 is preferred over 2
|
||||
because it makes it easier to drop the old build system entirely.
|
||||
|
||||
The exact syntax of the package depends on which build systems you need to
|
||||
support. Below are a couple of common examples.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Makefile -> Autotools
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(MakefilePackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
The package subclasses from :ref:`makefilepackage`, which has three phases:
|
||||
|
||||
#. ``edit`` (does nothing by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, the ``install`` phase needed to be overridden because the
|
||||
Makefile did not have an install target. We also modify the Makefile to use
|
||||
Spack's compiler wrappers. The default ``build`` phase is not changed.
|
||||
|
||||
Starting with version 1.3.0, we want to use Autotools to build instead.
|
||||
:ref:`autotoolspackage` has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def autoreconf(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def configure(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* We moved the contents of the ``edit`` phase to the ``patch`` function. Since
|
||||
``AutotoolsPackage`` doesn't have an ``edit`` phase, the only way for this
|
||||
step to be executed is to move it to the ``patch`` function, which always
|
||||
gets run.
|
||||
* The ``autoreconf`` and ``configure`` phases become no-ops. Since the old
|
||||
Makefile-based build system doesn't use these, we ignore these phases when
|
||||
building ``foo@1.2.0``.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``foo@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Autotools -> CMake
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(AutotoolsPackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
The package subclasses from :ref:`autotoolspackage`, which has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
Starting with version 1.3.0, we want to use CMake to build instead.
|
||||
:ref:`cmakepackage` has three phases:
|
||||
|
||||
#. ``cmake`` (runs ``cmake ...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``cmake_args`` to specify additional flags to pass to ``cmake``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* Since CMake and Autotools are so similar, we only need to override the
|
||||
``cmake`` phase, we can use the default ``build`` and ``install`` phases.
|
||||
* We override ``cmake`` to run ``./configure`` for older versions.
|
||||
``configure_args`` remains the same.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``bar@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Multiple build systems for the same version
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
During the transition from one build system to another, developers often
|
||||
support multiple build systems at the same time. Spack can only use a single
|
||||
build system for a single version. To decide which build system to use for a
|
||||
particular version, take the following things into account:
|
||||
|
||||
1. If the developers explicitly state that one build system is preferred over
|
||||
another, use that one.
|
||||
2. If one build system is considered "experimental" while another is considered
|
||||
"stable", use the stable build system.
|
||||
3. Otherwise, use the newer build system.
|
||||
|
||||
The developer preference for which build system to use can change over time as
|
||||
a newer build system becomes stable/recommended.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dropping support for old build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When older versions of a package don't support a newer build system, it can be
|
||||
tempting to simply delete them from a package. This significantly reduces
|
||||
package complexity and makes the build recipe much easier to maintain. However,
|
||||
other packages or Spack users may rely on these older versions. The recommended
|
||||
approach is to first support both build systems (as demonstrated above),
|
||||
:ref:`deprecate <deprecate>` versions that rely on the old build system, and
|
||||
remove those versions and any phases that needed to be overridden in the next
|
||||
Spack release.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Three or more build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In rare cases, a package may change build systems multiple times. For example,
|
||||
a package may start with Makefiles, then switch to Autotools, then switch to
|
||||
CMake. The same logic used above can be extended to any number of build systems.
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Baz(CMakePackage):
|
||||
version("1.4.0", sha256="...") # CMake
|
||||
version("1.3.0", sha256="...") # Autotools
|
||||
version("1.2.0", sha256="...") # Makefile
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.3.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@1.3")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Additional examples
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When writing new packages, it often helps to see examples of existing packages.
|
||||
Here is an incomplete list of existing Spack packages that have changed build
|
||||
systems before:
|
||||
|
||||
================ ===================== ================
|
||||
Package Previous Build System New Build System
|
||||
================ ===================== ================
|
||||
amber custom CMake
|
||||
arpack-ng Autotools CMake
|
||||
atk Autotools Meson
|
||||
blast None Autotools
|
||||
dyninst Autotools CMake
|
||||
evtgen Autotools CMake
|
||||
fish Autotools CMake
|
||||
gdk-pixbuf Autotools Meson
|
||||
glib Autotools Meson
|
||||
glog Autotools CMake
|
||||
gmt Autotools CMake
|
||||
gtkplus Autotools Meson
|
||||
hpl Makefile Autotools
|
||||
interproscan Perl Maven
|
||||
jasper Autotools CMake
|
||||
kahip SCons CMake
|
||||
kokkos Makefile CMake
|
||||
kokkos-kernels Makefile CMake
|
||||
leveldb Makefile CMake
|
||||
libdrm Autotools Meson
|
||||
libjpeg-turbo Autotools CMake
|
||||
mesa Autotools Meson
|
||||
metis None CMake
|
||||
mpifileutils Autotools CMake
|
||||
muparser Autotools CMake
|
||||
mxnet Makefile CMake
|
||||
nest Autotools CMake
|
||||
neuron Autotools CMake
|
||||
nsimd CMake nsconfig
|
||||
opennurbs Makefile CMake
|
||||
optional-lite None CMake
|
||||
plasma Makefile CMake
|
||||
preseq Makefile Autotools
|
||||
protobuf Autotools CMake
|
||||
py-pygobject Autotools Python
|
||||
singularity Autotools Makefile
|
||||
span-lite None CMake
|
||||
ssht Makefile CMake
|
||||
string-view-lite None CMake
|
||||
superlu Makefile CMake
|
||||
superlu-dist Makefile CMake
|
||||
uncrustify Autotools CMake
|
||||
================ ===================== ================
|
||||
|
||||
Packages that support multiple build systems can be a bit confusing to write.
|
||||
Don't hesitate to open an issue or draft pull request and ask for advice from
|
||||
other Spack developers!
|
@@ -336,7 +336,7 @@ This would be translated to:
|
||||
.. code-block:: python
|
||||
|
||||
extends('python')
|
||||
depends_on('python@3.5:3', type=('build', 'run'))
|
||||
depends_on('python@3.5:3.999', type=('build', 'run'))
|
||||
|
||||
|
||||
Many ``setup.py`` or ``setup.cfg`` files also contain information like::
|
||||
@@ -568,7 +568,7 @@ check the ``METADATA`` file for lines like::
|
||||
Lines that use ``Requires-Dist`` are similar to ``install_requires``.
|
||||
Lines that use ``Provides-Extra`` are similar to ``extra_requires``,
|
||||
and you can add a variant for those dependencies. The ``~=1.11.0``
|
||||
syntax is equivalent to ``1.11.0:1.11``.
|
||||
syntax is equivalent to ``1.11.0:1.11.999``.
|
||||
|
||||
""""""""""
|
||||
setuptools
|
||||
|
@@ -259,16 +259,3 @@ and ld.so will ONLY search for dependencies in the ``RUNPATH`` of
|
||||
the loading object.
|
||||
|
||||
DO NOT MIX the two options within the same install tree.
|
||||
|
||||
----------------------
|
||||
``terminal_title``
|
||||
----------------------
|
||||
|
||||
By setting this option to ``true``, Spack will update the terminal's title to
|
||||
provide information about its current progress as well as the current and
|
||||
total package numbers.
|
||||
|
||||
To work properly, this requires your terminal to reset its title after
|
||||
Spack has finished its work, otherwise Spack's status information will
|
||||
remain in the terminal's title indefinitely. Most terminals should already
|
||||
be set up this way and clear Spack's status information.
|
||||
|
@@ -402,15 +402,12 @@ Spack-specific variables
|
||||
|
||||
Spack understands several special variables. These are:
|
||||
|
||||
* ``$env``: name of the currently active :ref:`environment <environments>`
|
||||
* ``$spack``: path to the prefix of this Spack installation
|
||||
* ``$tempdir``: default system temporary directory (as specified in
|
||||
Python's `tempfile.tempdir
|
||||
<https://docs.python.org/2/library/tempfile.html#tempfile.tempdir>`_
|
||||
variable.
|
||||
* ``$user``: name of the current user
|
||||
* ``$user_cache_path``: user cache directory (``~/.spack`` unless
|
||||
:ref:`overridden <local-config-overrides>`)
|
||||
|
||||
Note that, as with shell variables, you can write these as ``$varname``
|
||||
or with braces to distinguish the variable from surrounding characters:
|
||||
@@ -565,39 +562,3 @@ built in and are not overridden by a configuration file. The
|
||||
command line. ``dirty`` and ``install_tree`` come from the custom
|
||||
scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration
|
||||
options come from the default configuration files that ship with Spack.
|
||||
|
||||
.. _local-config-overrides:
|
||||
|
||||
------------------------------
|
||||
Overriding Local Configuration
|
||||
------------------------------
|
||||
|
||||
Spack's ``system`` and ``user`` scopes provide ways for administrators and users to set
|
||||
global defaults for all Spack instances, but for use cases where one wants a clean Spack
|
||||
installation, these scopes can be undesirable. For example, users may want to opt out of
|
||||
global system configuration, or they may want to ignore their own home directory
|
||||
settings when running in a continuous integration environment.
|
||||
|
||||
Spack also, by default, keeps various caches and user data in ``~/.spack``, but
|
||||
users may want to override these locations.
|
||||
|
||||
Spack provides three environment variables that allow you to override or opt out of
|
||||
configuration locations:
|
||||
|
||||
* ``SPACK_USER_CONFIG_PATH``: Override the path to use for the
|
||||
``user`` scope (``~/.spack`` by default).
|
||||
* ``SPACK_SYSTEM_CONFIG_PATH``: Override the path to use for the
|
||||
``system`` scope (``/etc/spack`` by default).
|
||||
* ``SPACK_DISABLE_LOCAL_CONFIG``: set this environment variable to completely disable
|
||||
**both** the system and user configuration directories. Spack will only consider its
|
||||
own defaults and ``site`` configuration locations.
|
||||
|
||||
And one that allows you to move the default cache location:
|
||||
|
||||
* ``SPACK_USER_CACHE_PATH``: Override the default path to use for user data
|
||||
(misc_cache, tests, reports, etc.)
|
||||
|
||||
With these settings, if you want to isolate Spack in a CI environment, you can do this::
|
||||
|
||||
export SPACK_DISABLE_LOCAL_CONFIG=true
|
||||
export SPACK_USER_CACHE_PATH=/tmp/spack
|
||||
|
@@ -126,6 +126,9 @@ are currently supported are summarized in the table below:
|
||||
* - Ubuntu 18.04
|
||||
- ``ubuntu:18.04``
|
||||
- ``spack/ubuntu-bionic``
|
||||
* - CentOS 6
|
||||
- ``centos:6``
|
||||
- ``spack/centos6``
|
||||
* - CentOS 7
|
||||
- ``centos:7``
|
||||
- ``spack/centos7``
|
||||
@@ -197,7 +200,7 @@ Setting Base Images
|
||||
|
||||
The ``images`` subsection is used to select both the image where
|
||||
Spack builds the software and the image where the built software
|
||||
is installed. This attribute can be set in different ways and
|
||||
is installed. This attribute can be set in two different ways and
|
||||
which one to use depends on the use case at hand.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -257,54 +260,10 @@ software is respectively built and installed:
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
This is the simplest available method of selecting base images, and we advise
|
||||
This method of selecting base images is the simplest of the two, and we advise
|
||||
to use it whenever possible. There are cases though where using Spack official
|
||||
images is not enough to fit production needs. In these situations users can
|
||||
extend the recipe to start with the bootstrapping of Spack at a certain pinned
|
||||
version or manually select which base image to start from in the recipe,
|
||||
as we'll see next.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Use a Bootstrap Stage for Spack
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In some cases users may want to pin the commit sha that is used for Spack, to ensure later
|
||||
reproducibility, or start from a fork of the official Spack repository to try a bugfix or
|
||||
a feature in the early stage of development. This is possible by being just a little more
|
||||
verbose when specifying information about Spack in the ``spack.yaml`` file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
images:
|
||||
os: amazonlinux:2
|
||||
spack:
|
||||
# URL of the Spack repository to be used in the container image
|
||||
url: <to-use-a-fork>
|
||||
# Either a commit sha, a branch name or a tag
|
||||
ref: <sha/tag/branch>
|
||||
# If true turn a branch name or a tag into the corresponding commit
|
||||
# sha at the time of recipe generation
|
||||
resolve_sha: <true/false>
|
||||
|
||||
``url`` specifies the URL from which to clone Spack and defaults to https://github.com/spack/spack.
|
||||
The ``ref`` attribute can be either a commit sha, a branch name or a tag. The default value in
|
||||
this case is to use the ``develop`` branch, but it may change in the future to point to the latest stable
|
||||
release. Finally ``resolve_sha`` transform branch names or tags into the corresponding commit
|
||||
shas at the time of recipe generation, to allow for a greater reproducibility of the results
|
||||
at a later time.
|
||||
|
||||
The list of operating systems that can be used to bootstrap Spack can be
|
||||
obtained with:
|
||||
|
||||
.. command-output:: spack containerize --list-os
|
||||
|
||||
.. note::
|
||||
|
||||
The ``resolve_sha`` option uses ``git rev-parse`` under the hood and thus it requires
|
||||
to checkout the corresponding Spack repository in a temporary folder before generating
|
||||
the recipe. Recipe generation may take longer when this option is set to true because
|
||||
of this additional step.
|
||||
|
||||
images is not enough to fit production needs. In these situations users can manually
|
||||
select which base image to start from in the recipe, as we'll see next.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Use Custom Images Provided by Users
|
||||
@@ -456,18 +415,6 @@ to customize the generation of container recipes:
|
||||
- Version of Spack use in the ``build`` stage
|
||||
- Valid tags for ``base:image``
|
||||
- Yes, if using constrained selection of base images
|
||||
* - ``images:spack:url``
|
||||
- Repository from which Spack is cloned
|
||||
- Any fork of Spack
|
||||
- No
|
||||
* - ``images:spack:ref``
|
||||
- Reference for the checkout of Spack
|
||||
- Either a commit sha, a branch name or a tag
|
||||
- No
|
||||
* - ``images:spack:resolve_sha``
|
||||
- Resolve branches and tags in ``spack.yaml`` to commits in the generated recipe
|
||||
- True or False (default: False)
|
||||
- No
|
||||
* - ``images:build``
|
||||
- Image to be used in the ``build`` stage
|
||||
- Any valid container image
|
||||
|
@@ -338,6 +338,15 @@ Once all of the dependencies are installed, you can try building the documentati
|
||||
If you see any warning or error messages, you will have to correct those before
|
||||
your PR is accepted.
|
||||
|
||||
.. note::
|
||||
|
||||
There is also a ``run-doc-tests`` script in ``share/spack/qa``. The only
|
||||
difference between running this script and running ``make`` by hand is that
|
||||
the script will exit immediately if it encounters an error or warning. This
|
||||
is necessary for CI. If you made a lot of documentation changes, it is
|
||||
much quicker to run ``make`` by hand so that you can see all of the warnings
|
||||
at once.
|
||||
|
||||
If you are editing the documentation, you should obviously be running the
|
||||
documentation tests. But even if you are simply adding a new package, your
|
||||
changes could cause the documentation tests to fail:
|
||||
|
@@ -210,6 +210,15 @@ Spec-related modules
|
||||
but compilers aren't fully integrated with the build process
|
||||
yet.
|
||||
|
||||
:mod:`spack.architecture`
|
||||
:func:`architecture.sys_type <spack.architecture.sys_type>` is used
|
||||
to determine the host architecture while building.
|
||||
|
||||
.. warning::
|
||||
|
||||
Not yet implemented. Should eventually have architecture
|
||||
descriptions for cross-compiling.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Build environment
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
@@ -732,17 +732,13 @@ Configuring environment views
|
||||
The Spack Environment manifest file has a top-level keyword
|
||||
``view``. Each entry under that heading is a view descriptor, headed
|
||||
by a name. The view descriptor contains the root of the view, and
|
||||
optionally the projections for the view, ``select`` and
|
||||
``exclude`` lists for the view and link information via ``link`` and
|
||||
``link_type``. For example, in the following manifest
|
||||
optionally the projections for the view, and ``select`` and
|
||||
``exclude`` lists for the view. For example, in the following manifest
|
||||
file snippet we define a view named ``mpis``, rooted at
|
||||
``/path/to/view`` in which all projections use the package name,
|
||||
version, and compiler name to determine the path for a given
|
||||
package. This view selects all packages that depend on MPI, and
|
||||
excludes those built with the PGI compiler at version 18.5.
|
||||
All the dependencies of each root spec in the environment will be linked
|
||||
in the view due to the command ``link: all`` and the files in the view will
|
||||
be symlinks to the spack install directories.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -755,16 +751,11 @@ be symlinks to the spack install directories.
|
||||
exclude: ['%pgi@18.5']
|
||||
projections:
|
||||
all: {name}/{version}-{compiler.name}
|
||||
link: all
|
||||
link_type: symlink
|
||||
|
||||
For more information on using view projections, see the section on
|
||||
:ref:`adding_projections_to_views`. The default for the ``select`` and
|
||||
``exclude`` values is to select everything and exclude nothing. The
|
||||
default projection is the default view projection (``{}``). The ``link``
|
||||
defaults to ``all`` but can also be ``roots`` when only the root specs
|
||||
in the environment are desired in the view. The ``link_type`` defaults
|
||||
to ``symlink`` but can also take the value of ``hardlink`` or ``copy``.
|
||||
default projection is the default view projection (``{}``).
|
||||
|
||||
Any number of views may be defined under the ``view`` heading in a
|
||||
Spack Environment.
|
||||
|
@@ -9,16 +9,22 @@
|
||||
Getting Started
|
||||
===============
|
||||
|
||||
--------------------
|
||||
System Prerequisites
|
||||
--------------------
|
||||
-------------
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
Spack has the following minimum system requirements, which are assumed to
|
||||
be present on the machine where Spack is run:
|
||||
Spack has the following minimum requirements, which must be installed
|
||||
before Spack is run:
|
||||
|
||||
.. csv-table:: System prerequisites for Spack
|
||||
:file: tables/system_prerequisites.csv
|
||||
:header-rows: 1
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
|
||||
#. A C/C++ compiler for building and the ``bash`` shell for Spack's compiler
|
||||
wrapper
|
||||
#. The ``make`` executable for building
|
||||
#. The ``tar``, ``gzip``, ``unzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
executables for extracting source code
|
||||
#. The ``patch`` command to apply patches
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
|
||||
|
||||
These requirements can be easily installed on most modern Linux systems;
|
||||
on macOS, XCode is required. Spack is designed to run on HPC
|
||||
@@ -35,7 +41,7 @@ Getting Spack is easy. You can clone it from the `github repository
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
|
||||
$ git clone https://github.com/spack/spack.git
|
||||
|
||||
This will create a directory called ``spack``.
|
||||
|
||||
@@ -84,140 +90,6 @@ sourcing time, ensuring future invocations of the ``spack`` command will
|
||||
continue to use the same consistent python version regardless of changes in
|
||||
the environment.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack uses ``clingo`` under the hood to resolve optimal versions and variants of
|
||||
dependencies when installing a package. Since ``clingo`` itself is a binary,
|
||||
Spack has to install it on initial use, which is called bootstrapping.
|
||||
|
||||
Spack provides two ways of bootstrapping ``clingo``: from pre-built binaries
|
||||
(default), or from sources. The fastest way to get started is to bootstrap from
|
||||
pre-built binaries.
|
||||
|
||||
.. note::
|
||||
|
||||
When bootstrapping from pre-built binaries, Spack currently requires
|
||||
``patchelf`` on Linux and ``otool`` on macOS. If ``patchelf`` is not in the
|
||||
``PATH``, Spack will build it from sources, and a C++ compiler is required.
|
||||
|
||||
The first time you concretize a spec, Spack will bootstrap in the background:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ time spack spec zlib
|
||||
Input spec
|
||||
--------------------------------
|
||||
zlib
|
||||
|
||||
Concretized
|
||||
--------------------------------
|
||||
zlib@1.2.11%gcc@7.5.0+optimize+pic+shared arch=linux-ubuntu18.04-zen
|
||||
|
||||
real 0m20.023s
|
||||
user 0m18.351s
|
||||
sys 0m0.784s
|
||||
|
||||
After this command you'll see that ``clingo`` has been installed for Spack's own use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find -b
|
||||
==> Showing internal bootstrap store at "/root/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-rhel5-x86_64 / gcc@9.3.0 -------------------------------
|
||||
clingo-bootstrap@spack python@3.6
|
||||
|
||||
-- linux-ubuntu18.04-zen / gcc@7.5.0 ----------------------------
|
||||
patchelf@0.13
|
||||
|
||||
Subsequent calls to the concretizer will then be much faster:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ time spack spec zlib
|
||||
[ ... ]
|
||||
real 0m0.490s
|
||||
user 0m0.431s
|
||||
sys 0m0.041s
|
||||
|
||||
|
||||
If for security concerns you cannot bootstrap ``clingo`` from pre-built
|
||||
binaries, you have to mark this bootstrapping method as untrusted. This makes
|
||||
Spack fall back to bootstrapping from sources:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap untrust github-actions
|
||||
==> "github-actions" is now untrusted and will not be used for bootstrapping
|
||||
|
||||
You can verify that the new settings are effective with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap list
|
||||
Name: github-actions UNTRUSTED
|
||||
|
||||
Type: buildcache
|
||||
|
||||
Info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
|
||||
Description:
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
|
||||
|
||||
Name: spack-install TRUSTED
|
||||
|
||||
Type: install
|
||||
|
||||
Description:
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
|
||||
.. note::
|
||||
|
||||
When bootstrapping from sources, Spack requires a full install of Python
|
||||
including header files (e.g. ``python3-dev`` on Debian), and a compiler
|
||||
with support for C++14 (GCC on Linux, Apple Clang on macOS) and static C++
|
||||
standard libraries on Linux.
|
||||
|
||||
Spack will build the required software on the first request to concretize a spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack spec zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
"""""""""""""""""""
|
||||
The Bootstrap Store
|
||||
"""""""""""""""""""
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check Installation
|
||||
@@ -246,6 +118,53 @@ environment*, especially for ``PATH``. Only software that comes with
|
||||
the system, or that you know you wish to use with Spack, should be
|
||||
included. This procedure will avoid many strange build errors.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using clingo as an external solver to compute which software
|
||||
needs to be installed. If you have a default compiler supporting C++14 Spack
|
||||
can automatically bootstrap this tool from sources the first time it is
|
||||
needed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
|
||||
on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Alternate Prefix
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -449,34 +368,6 @@ then inject those flags into the compiler command. Compiler flags
|
||||
entered from the command line will be discussed in more detail in the
|
||||
following section.
|
||||
|
||||
Some compilers also require additional environment configuration.
|
||||
Examples include Intels oneAPI and AMDs AOCC compiler suites,
|
||||
which have custom scripts for loading environment variables and setting paths.
|
||||
These variables should be specified in the ``environment`` section of the compiler
|
||||
specification. The operations available to modify the environment are ``set``, ``unset``,
|
||||
``prepend_path``, ``append_path``, and ``remove_path``. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
modules: []
|
||||
operating_system: centos6
|
||||
paths:
|
||||
cc: /opt/intel/oneapi/compiler/latest/linux/bin/icx
|
||||
cxx: /opt/intel/oneapi/compiler/latest/linux/bin/icpx
|
||||
f77: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
fc: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
spec: oneapi@latest
|
||||
environment:
|
||||
set:
|
||||
MKL_ROOT: "/path/to/mkl/root"
|
||||
unset: # A list of environment variables to unset
|
||||
- CC
|
||||
prepend_path: # Similar for append|remove_path
|
||||
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build Your Own Compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -631,9 +522,8 @@ Fortran.
|
||||
#. Run ``spack compiler find`` to locate Clang.
|
||||
|
||||
#. There are different ways to get ``gfortran`` on macOS. For example, you can
|
||||
install GCC with Spack (``spack install gcc``), with Homebrew (``brew install
|
||||
gcc``), or from a `DMG installer
|
||||
<https://github.com/fxcoudert/gfortran-for-macOS/releases>`_.
|
||||
install GCC with Spack (``spack install gcc``) or with Homebrew
|
||||
(``brew install gcc``).
|
||||
|
||||
#. The only thing left to do is to edit ``~/.spack/darwin/compilers.yaml`` to provide
|
||||
the path to ``gfortran``:
|
||||
@@ -654,8 +544,7 @@ Fortran.
|
||||
If you used Spack to install GCC, you can get the installation prefix by
|
||||
``spack location -i gcc`` (this will only work if you have a single version
|
||||
of GCC installed). Whereas for Homebrew, GCC is installed in
|
||||
``/usr/local/Cellar/gcc/x.y.z``. With the DMG installer, the correct path
|
||||
will be ``/usr/local/gfortran``.
|
||||
``/usr/local/Cellar/gcc/x.y.z``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Compiler Verification
|
||||
|
@@ -39,7 +39,7 @@ package:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
|
||||
$ git clone https://github.com/spack/spack.git
|
||||
$ cd spack/bin
|
||||
$ ./spack install libelf
|
||||
|
||||
|
@@ -213,18 +213,6 @@ location). The set ``my_custom_lmod_modules`` will install its lmod
|
||||
modules to ``/path/to/install/custom/lmod/modules`` (and still install
|
||||
its tcl modules, if any, to the default location).
|
||||
|
||||
By default, an architecture-specific directory is added to the root
|
||||
directory. A module set may override that behavior by setting the
|
||||
``arch_folder`` config value to ``False``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
roots:
|
||||
tcl: /path/to/install/tcl/modules
|
||||
arch_folder: false
|
||||
|
||||
Obviously, having multiple module sets install modules to the default
|
||||
location could be confusing to users of your modules. In the next
|
||||
section, we will discuss enabling and disabling module types (module
|
||||
@@ -461,36 +449,6 @@ that are already in the LMod hierarchy.
|
||||
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
|
||||
See `this discussion on the LMod project <https://github.com/TACC/Lmod/issues/114>`_.
|
||||
|
||||
""""""""""""""""""""""
|
||||
Select default modules
|
||||
""""""""""""""""""""""
|
||||
|
||||
By default, when multiple modules of the same name share a directory,
|
||||
the highest version number will be the default module. This behavior
|
||||
of the ``module`` command can be overridden with a symlink named
|
||||
``default`` to the desired default module. If you wish to configure
|
||||
default modules with Spack, add a ``defaults`` key to your modules
|
||||
configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
my-module-set:
|
||||
tcl:
|
||||
defaults:
|
||||
- gcc@10.2.1
|
||||
- hdf5@1.2.10+mpi+hl%gcc
|
||||
|
||||
These defaults may be arbitrarily specific. For any package that
|
||||
satisfies a default, Spack will generate the module file in the
|
||||
appropriate path, and will generate a default symlink to the module
|
||||
file as well.
|
||||
|
||||
.. warning::
|
||||
If Spack is configured to generate multiple default packages in the
|
||||
same directory, the last modulefile to be generated will be the
|
||||
default module.
|
||||
|
||||
.. _customize-env-modifications:
|
||||
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
|
@@ -612,7 +612,6 @@ it executable, then runs it with some arguments.
|
||||
installer = Executable(self.stage.archive_file)
|
||||
installer('--prefix=%s' % prefix, 'arg1', 'arg2', 'etc.')
|
||||
|
||||
.. _deprecate:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Deprecating old versions
|
||||
@@ -695,23 +694,20 @@ example, ``py-sphinx-rtd-theme@0.1.10a0``. In this case, numbers are
|
||||
always considered to be "newer" than letters. This is for consistency
|
||||
with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
|
||||
|
||||
Spack versions may also be arbitrary non-numeric strings, for example
|
||||
``@develop``, ``@master``, ``@local``.
|
||||
Spack versions may also be arbitrary non-numeric strings; any string
|
||||
here will suffice; for example, ``@develop``, ``@master``, ``@local``.
|
||||
Versions are compared as follows. First, a version string is split into
|
||||
multiple fields based on delimiters such as ``.``, ``-`` etc. Then
|
||||
matching fields are compared using the rules below:
|
||||
|
||||
The order on versions is defined as follows. A version string is split
|
||||
into a list of components based on delimiters such as ``.``, ``-`` etc.
|
||||
Lists are then ordered lexicographically, where components are ordered
|
||||
as follows:
|
||||
#. The following develop-like strings are greater (newer) than all
|
||||
numbers and are ordered as ``develop > main > master > head > trunk``.
|
||||
|
||||
#. The following special strings are considered larger than any other
|
||||
numeric or non-numeric version component, and satisfy the following
|
||||
order between themselves: ``develop > main > master > head > trunk``.
|
||||
#. Numbers are all less than the chosen develop-like strings above,
|
||||
and are sorted numerically.
|
||||
|
||||
#. Numbers are ordered numerically, are less than special strings, and
|
||||
larger than other non-numeric components.
|
||||
|
||||
#. All other non-numeric components are less than numeric components,
|
||||
and are ordered alphabetically.
|
||||
#. All other non-numeric versions are less than numeric versions, and
|
||||
are sorted alphabetically.
|
||||
|
||||
The logic behind this sort order is two-fold:
|
||||
|
||||
@@ -732,7 +728,7 @@ Version selection
|
||||
When concretizing, many versions might match a user-supplied spec.
|
||||
For example, the spec ``python`` matches all available versions of the
|
||||
package ``python``. Similarly, ``python@3:`` matches all versions of
|
||||
Python 3 and above. Given a set of versions that match a spec, Spack
|
||||
Python3. Given a set of versions that match a spec, Spack
|
||||
concretization uses the following priorities to decide which one to
|
||||
use:
|
||||
|
||||
@@ -1422,60 +1418,6 @@ other similar operations:
|
||||
).with_default('auto').with_non_feature_values('auto'),
|
||||
)
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Conditional Variants
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The variant directive accepts a ``when`` clause. The variant will only
|
||||
be present on specs that otherwise satisfy the spec listed as the
|
||||
``when`` clause. For example, the following class has a variant
|
||||
``bar`` when it is at version 2.0 or higher.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(Package):
|
||||
...
|
||||
variant('bar', default=False, when='@2.0:', description='help message')
|
||||
|
||||
The ``when`` clause follows the same syntax and accepts the same
|
||||
values as the ``when`` argument of
|
||||
:py:func:`spack.directives.depends_on`
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Overriding Variants
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Packages may override variants for several reasons, most often to
|
||||
change the default from a variant defined in a parent class or to
|
||||
change the conditions under which a variant is present on the spec.
|
||||
|
||||
When a variant is defined multiple times, whether in the same package
|
||||
file or in a subclass and a superclass, the last definition is used
|
||||
for all attributes **except** for the ``when`` clauses. The ``when``
|
||||
clauses are accumulated through all invocations, and the variant is
|
||||
present on the spec if any of the accumulated conditions are
|
||||
satisfied.
|
||||
|
||||
For example, consider the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(Package):
|
||||
...
|
||||
variant('bar', default=False, when='@1.0', description='help1')
|
||||
variant('bar', default=True, when='platform=darwin', description='help2')
|
||||
...
|
||||
|
||||
This package ``foo`` has a variant ``bar`` when the spec satisfies
|
||||
either ``@1.0`` or ``platform=darwin``, but not for other platforms at
|
||||
other versions. The default for this variant, when it is present, is
|
||||
always ``True``, regardless of which condition of the variant is
|
||||
satisfied. This allows packages to override variants in packages or
|
||||
build system classes from which they inherit, by modifying the variant
|
||||
values without modifying the ``when`` clause. It also allows a package
|
||||
to implement ``or`` semantics for a variant ``when`` clause by
|
||||
duplicating the variant definition.
|
||||
|
||||
------------------------------------
|
||||
Resources (expanding extra tarballs)
|
||||
------------------------------------
|
||||
@@ -2120,7 +2062,7 @@ Version ranges
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Although some packages require a specific version for their dependencies,
|
||||
most can be built with a range of versions. For example, if you are
|
||||
most can be built with a range of version. For example, if you are
|
||||
writing a package for a legacy Python module that only works with Python
|
||||
2.4 through 2.6, this would look like:
|
||||
|
||||
@@ -2129,9 +2071,9 @@ writing a package for a legacy Python module that only works with Python
|
||||
depends_on('python@2.4:2.6')
|
||||
|
||||
Version ranges in Spack are *inclusive*, so ``2.4:2.6`` means any version
|
||||
greater than or equal to ``2.4`` and up to and including any ``2.6.x``. If
|
||||
you want to specify that a package works with any version of Python 3 (or
|
||||
higher), this would look like:
|
||||
greater than or equal to ``2.4`` and up to and including ``2.6``. If you
|
||||
want to specify that a package works with any version of Python 3, this
|
||||
would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -2142,30 +2084,29 @@ requires Python 2, you can similarly leave out the lower bound:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@:2')
|
||||
depends_on('python@:2.9')
|
||||
|
||||
Notice that we didn't use ``@:3``. Version ranges are *inclusive*, so
|
||||
``@:3`` means "up to and including any 3.x version".
|
||||
``@:3`` means "up to and including 3".
|
||||
|
||||
What if a package can only be built with Python 2.7? You might be
|
||||
What if a package can only be built with Python 2.6? You might be
|
||||
inclined to use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@2.7')
|
||||
depends_on('python@2.6')
|
||||
|
||||
However, this would be wrong. Spack assumes that all version constraints
|
||||
are exact, so it would try to install Python not at ``2.7.18``, but
|
||||
exactly at ``2.7``, which is a non-existent version. The correct way to
|
||||
specify this would be:
|
||||
are absolute, so it would try to install Python at exactly ``2.6``. The
|
||||
correct way to specify this would be:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@2.7.0:2.7')
|
||||
depends_on('python@2.6.0:2.6.999')
|
||||
|
||||
A spec can contain a version list of ranges and individual versions
|
||||
separated by commas. For example, if you need Boost 1.59.0 or newer,
|
||||
but there are known issues with 1.64.0, 1.65.0, and 1.66.0, you can say:
|
||||
A spec can contain multiple version ranges separated by commas.
|
||||
For example, if you need Boost 1.59.0 or newer, but there are known
|
||||
issues with 1.64.0, 1.65.0, and 1.66.0, you can say:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -2882,7 +2823,7 @@ is equivalent to:
|
||||
|
||||
depends_on('elpa+openmp', when='+openmp+elpa')
|
||||
|
||||
Constraints from nested context managers are also combined together, but they are rarely
|
||||
Constraints from nested context managers are also added together, but they are rarely
|
||||
needed or recommended.
|
||||
|
||||
.. _install-method:
|
||||
@@ -4426,9 +4367,9 @@ The signature for ``cache_extra_test_sources`` is:
|
||||
|
||||
where ``srcs`` is a string or a list of strings corresponding to
|
||||
the paths for the files and or subdirectories, relative to the staged
|
||||
source, that are to be copied to the corresponding relative test path
|
||||
under the prefix. All of the contents within each subdirectory will
|
||||
also be copied.
|
||||
source, that are to be copied to the corresponding path relative to
|
||||
``self.install_test_root``. All of the contents within each subdirectory
|
||||
will be also be copied.
|
||||
|
||||
For example, a package method for copying everything in the ``tests``
|
||||
subdirectory plus the ``foo.c`` and ``bar.c`` files from ``examples``
|
||||
@@ -4436,13 +4377,8 @@ can be implemented as shown below.
|
||||
|
||||
.. note::
|
||||
|
||||
The method name ``copy_test_sources`` here is for illustration
|
||||
purposes. You are free to use a name that is more suited to your
|
||||
package.
|
||||
|
||||
The key to copying the files at build time for stand-alone testing
|
||||
is use of the ``run_after`` directive, which ensures the associated
|
||||
files are copied **after** the provided build stage.
|
||||
The ``run_after`` directive ensures associated files are copied
|
||||
**after** the package is installed by the build process.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -4459,13 +4395,18 @@ can be implemented as shown below.
|
||||
In this case, the method copies the associated files from the build
|
||||
stage **after** the software is installed to the package's metadata
|
||||
directory. The result is the directory and files will be cached in
|
||||
a special test subdirectory under the installation prefix.
|
||||
paths under ``self.install_test_root`` as follows:
|
||||
|
||||
* ``join_path(self.install_test_root, 'tests')`` along with its files
|
||||
and subdirectories
|
||||
* ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* ``join_path(self.install_test_root, 'examples', 'bar.c')``
|
||||
|
||||
These paths are **automatically copied** to the test stage directory
|
||||
during stand-alone testing. The package's ``test`` method can access
|
||||
them using the ``self.test_suite.current_test_cache_dir`` property.
|
||||
In our example, the method would use the following paths to reference
|
||||
the copy of each entry listed in ``srcs``, respectively:
|
||||
where they are available to the package's ``test`` method through the
|
||||
``self.test_suite.current_test_cache_dir`` property. In our example,
|
||||
the method can access the directory and files using the following
|
||||
paths:
|
||||
|
||||
* ``join_path(self.test_suite.current_test_cache_dir, 'tests')``
|
||||
* ``join_path(self.test_suite.current_test_cache_dir, 'examples', 'foo.c')``
|
||||
@@ -4473,8 +4414,9 @@ the copy of each entry listed in ``srcs``, respectively:
|
||||
|
||||
.. note::
|
||||
|
||||
Library developers will want to build the associated tests
|
||||
against their **installed** libraries before running them.
|
||||
Library developers will want to build the associated tests under
|
||||
the ``self.test_suite.current_test_cache_dir`` and against their
|
||||
**installed** libraries before running them.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -4484,6 +4426,11 @@ the copy of each entry listed in ``srcs``, respectively:
|
||||
would be appropriate for ensuring the installed software continues
|
||||
to work as the underlying system evolves.
|
||||
|
||||
.. note::
|
||||
|
||||
You are free to use a method name that is more suitable for
|
||||
your package.
|
||||
|
||||
.. _cache_custom_files:
|
||||
|
||||
"""""""""""""""""""
|
||||
@@ -4562,8 +4509,7 @@ can retrieve the expected output from ``examples/foo.out`` using:
|
||||
|
||||
def test(self):
|
||||
..
|
||||
filename = join_path(self.test_suite.current_test_cache_dir,
|
||||
'examples', 'foo.out')
|
||||
filename = join_path(self.install_test_root, 'examples', 'foo.out')
|
||||
expected = get_escaped_text_output(filename)
|
||||
..
|
||||
|
||||
@@ -4731,6 +4677,9 @@ directory paths are provided in the table below.
|
||||
* - Test Suite Stage Files
|
||||
- ``self.test_suite.stage``
|
||||
- ``join_path(self.test_suite.stage, 'results.txt')``
|
||||
* - Cached Build-time Files
|
||||
- ``self.install_test_root``
|
||||
- ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* - Staged Cached Build-time Files
|
||||
- ``self.test_suite.current_test_cache_dir``
|
||||
- ``join_path(self.test_suite.current_test_cache_dir, 'examples', 'foo.c')``
|
||||
|
@@ -48,9 +48,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
|
||||
topics are outside the scope of this document.
|
||||
|
||||
Spack's pipelines are now making use of the
|
||||
`trigger <https://docs.gitlab.com/ee/ci/yaml/#trigger>`_ syntax to run
|
||||
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
|
||||
dynamically generated
|
||||
`child pipelines <https://docs.gitlab.com/ee/ci/pipelines/parent_child_pipelines.html>`_.
|
||||
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
|
||||
Note that the use of dynamic child pipelines requires running Gitlab version
|
||||
``>= 12.9``.
|
||||
|
||||
|
@@ -335,7 +335,7 @@ merged YAML from all configuration files, use ``spack config get repos``:
|
||||
- ~/myrepo
|
||||
- $spack/var/spack/repos/builtin
|
||||
|
||||
Note that, unlike ``spack repo list``, this does not include the
|
||||
mNote that, unlike ``spack repo list``, this does not include the
|
||||
namespace, which is read from each repo's ``repo.yaml``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -5,6 +5,3 @@ sphinx>=3.4,!=4.1.2
|
||||
sphinxcontrib-programoutput
|
||||
sphinx-rtd-theme
|
||||
python-levenshtein
|
||||
# Restrict to docutils <0.17 to workaround a list rendering issue in sphinx.
|
||||
# https://stackoverflow.com/questions/67542699
|
||||
docutils <0.17
|
||||
|
@@ -17,7 +17,6 @@ spack:
|
||||
# Sphinx
|
||||
- "py-sphinx@3.4:4.1.1,4.1.3:"
|
||||
- py-sphinxcontrib-programoutput
|
||||
- py-docutils@:0.16
|
||||
- py-sphinx-rtd-theme
|
||||
# VCS
|
||||
- git
|
||||
|
@@ -1,18 +0,0 @@
|
||||
Name, Supported Versions, Notes, Requirement Reason
|
||||
Python, 2.6/2.7/3.5-3.9, , Interpreter for Spack
|
||||
C/C++ Compilers, , , Building software
|
||||
make, , , Build software
|
||||
patch, , , Build software
|
||||
bash, , , Compiler wrappers
|
||||
tar, , , Extract/create archives
|
||||
gzip, , , Compress/Decompress archives
|
||||
unzip, , , Compress/Decompress archives
|
||||
bzip, , , Compress/Decompress archives
|
||||
xz, , , Compress/Decompress archives
|
||||
zstd, , Optional, Compress/Decompress archives
|
||||
file, , , Create/Use Buildcaches
|
||||
gnupg2, , , Sign/Verify Buildcaches
|
||||
git, , , Manage Software Repositories
|
||||
svn, , Optional, Manage Software Repositories
|
||||
hg, , Optional, Manage Software Repositories
|
||||
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
|
|
@@ -387,7 +387,7 @@ some nice features:
|
||||
Spack-built compiler can be given to an IDE without requiring the
|
||||
IDE to load that compiler's module.
|
||||
|
||||
Unfortunately, Spack's RPATH support does not work in every case. For example:
|
||||
Unfortunately, Spack's RPATH support does not work in all case. For example:
|
||||
|
||||
#. Software comes in many forms --- not just compiled ELF binaries,
|
||||
but also as interpreted code in Python, R, JVM bytecode, etc.
|
||||
|
573
lib/spack/env/cc
vendored
573
lib/spack/env/cc
vendored
@@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
# shellcheck disable=SC2034 # evals in this script fool shellcheck
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
@@ -21,33 +20,25 @@
|
||||
# -Wl,-rpath arguments for dependency /lib directories.
|
||||
#
|
||||
|
||||
# Reset IFS to the default: whitespace-separated lists. When we use
|
||||
# other separators, we set and reset it.
|
||||
unset IFS
|
||||
|
||||
# Separator for lists whose names end with `_list`.
|
||||
# We pick the alarm bell character, which is highly unlikely to
|
||||
# conflict with anything. This is a literal bell character (which
|
||||
# we have to use since POSIX sh does not convert escape sequences
|
||||
# like '\a' outside of the format argument of `printf`).
|
||||
# NOTE: Depending on your editor this may look empty, but it is not.
|
||||
readonly lsep=''
|
||||
|
||||
# This is an array of environment variables that need to be set before
|
||||
# the script runs. They are set by routines in spack.build_environment
|
||||
# as part of the package installation process.
|
||||
readonly params="\
|
||||
SPACK_ENV_PATH
|
||||
SPACK_DEBUG_LOG_DIR
|
||||
SPACK_DEBUG_LOG_ID
|
||||
SPACK_COMPILER_SPEC
|
||||
SPACK_CC_RPATH_ARG
|
||||
SPACK_CXX_RPATH_ARG
|
||||
SPACK_F77_RPATH_ARG
|
||||
SPACK_FC_RPATH_ARG
|
||||
SPACK_LINKER_ARG
|
||||
SPACK_SHORT_SPEC
|
||||
SPACK_SYSTEM_DIRS"
|
||||
parameters=(
|
||||
SPACK_ENV_PATH
|
||||
SPACK_DEBUG_LOG_DIR
|
||||
SPACK_DEBUG_LOG_ID
|
||||
SPACK_COMPILER_SPEC
|
||||
SPACK_CC_RPATH_ARG
|
||||
SPACK_CXX_RPATH_ARG
|
||||
SPACK_F77_RPATH_ARG
|
||||
SPACK_FC_RPATH_ARG
|
||||
SPACK_TARGET_ARGS
|
||||
SPACK_DTAGS_TO_ADD
|
||||
SPACK_DTAGS_TO_STRIP
|
||||
SPACK_LINKER_ARG
|
||||
SPACK_SHORT_SPEC
|
||||
SPACK_SYSTEM_DIRS
|
||||
)
|
||||
|
||||
# Optional parameters that aren't required to be set
|
||||
|
||||
@@ -67,157 +58,60 @@ SPACK_SYSTEM_DIRS"
|
||||
# Test command is used to unit test the compiler script.
|
||||
# SPACK_TEST_COMMAND
|
||||
|
||||
# die MESSAGE
|
||||
# Print a message and exit with error code 1.
|
||||
die() {
|
||||
echo "[spack cc] ERROR: $*"
|
||||
# die()
|
||||
# Prints a message and exits with error 1.
|
||||
function die {
|
||||
echo "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# empty VARNAME
|
||||
# Return whether the variable VARNAME is unset or set to the empty string.
|
||||
empty() {
|
||||
eval "test -z \"\${$1}\""
|
||||
}
|
||||
# read input parameters into proper bash arrays.
|
||||
# SYSTEM_DIRS is delimited by :
|
||||
IFS=':' read -ra SPACK_SYSTEM_DIRS <<< "${SPACK_SYSTEM_DIRS}"
|
||||
|
||||
# setsep LISTNAME
|
||||
# Set the global variable 'sep' to the separator for a list with name LISTNAME.
|
||||
# There are three types of lists:
|
||||
# 1. regular lists end with _list and are separated by $lsep
|
||||
# 2. directory lists end with _dirs/_DIRS/PATH(S) and are separated by ':'
|
||||
# 3. any other list is assumed to be separated by spaces: " "
|
||||
setsep() {
|
||||
case "$1" in
|
||||
*_dirs|*_DIRS|*PATH|*PATHS)
|
||||
sep=':'
|
||||
;;
|
||||
*_list)
|
||||
sep="$lsep"
|
||||
;;
|
||||
*)
|
||||
sep=" "
|
||||
;;
|
||||
esac
|
||||
}
|
||||
# SPACK_<LANG>FLAGS and SPACK_LDLIBS are split by ' '
|
||||
IFS=' ' read -ra SPACK_FFLAGS <<< "$SPACK_FFLAGS"
|
||||
IFS=' ' read -ra SPACK_CPPFLAGS <<< "$SPACK_CPPFLAGS"
|
||||
IFS=' ' read -ra SPACK_CFLAGS <<< "$SPACK_CFLAGS"
|
||||
IFS=' ' read -ra SPACK_CXXFLAGS <<< "$SPACK_CXXFLAGS"
|
||||
IFS=' ' read -ra SPACK_LDFLAGS <<< "$SPACK_LDFLAGS"
|
||||
IFS=' ' read -ra SPACK_LDLIBS <<< "$SPACK_LDLIBS"
|
||||
|
||||
# prepend LISTNAME ELEMENT [SEP]
|
||||
#
|
||||
# Prepend ELEMENT to the list stored in the variable LISTNAME,
|
||||
# assuming the list is separated by SEP.
|
||||
# Handles empty lists and single-element lists.
|
||||
prepend() {
|
||||
varname="$1"
|
||||
elt="$2"
|
||||
|
||||
if empty "$varname"; then
|
||||
eval "$varname=\"\${elt}\""
|
||||
else
|
||||
# Get the appropriate separator for the list we're appending to.
|
||||
setsep "$varname"
|
||||
eval "$varname=\"\${elt}${sep}\${$varname}\""
|
||||
fi
|
||||
}
|
||||
|
||||
# append LISTNAME ELEMENT [SEP]
|
||||
#
|
||||
# Append ELEMENT to the list stored in the variable LISTNAME,
|
||||
# assuming the list is separated by SEP.
|
||||
# Handles empty lists and single-element lists.
|
||||
append() {
|
||||
varname="$1"
|
||||
elt="$2"
|
||||
|
||||
if empty "$varname"; then
|
||||
eval "$varname=\"\${elt}\""
|
||||
else
|
||||
# Get the appropriate separator for the list we're appending to.
|
||||
setsep "$varname"
|
||||
eval "$varname=\"\${$varname}${sep}\${elt}\""
|
||||
fi
|
||||
}
|
||||
|
||||
# extend LISTNAME1 LISTNAME2 [PREFIX]
|
||||
#
|
||||
# Append the elements stored in the variable LISTNAME2
|
||||
# to the list stored in LISTNAME1.
|
||||
# If PREFIX is provided, prepend it to each element.
|
||||
extend() {
|
||||
# Figure out the appropriate IFS for the list we're reading.
|
||||
setsep "$2"
|
||||
if [ "$sep" != " " ]; then
|
||||
IFS="$sep"
|
||||
fi
|
||||
eval "for elt in \${$2}; do append $1 \"$3\${elt}\"; done"
|
||||
unset IFS
|
||||
}
|
||||
|
||||
# preextend LISTNAME1 LISTNAME2 [PREFIX]
|
||||
#
|
||||
# Prepend the elements stored in the list at LISTNAME2
|
||||
# to the list at LISTNAME1, preserving order.
|
||||
# If PREFIX is provided, prepend it to each element.
|
||||
preextend() {
|
||||
# Figure out the appropriate IFS for the list we're reading.
|
||||
setsep "$2"
|
||||
if [ "$sep" != " " ]; then
|
||||
IFS="$sep"
|
||||
fi
|
||||
|
||||
# first, reverse the list to prepend
|
||||
_reversed_list=""
|
||||
eval "for elt in \${$2}; do prepend _reversed_list \"$3\${elt}\"; done"
|
||||
|
||||
# prepend reversed list to preextend in order
|
||||
IFS="${lsep}"
|
||||
for elt in $_reversed_list; do prepend "$1" "$3${elt}"; done
|
||||
unset IFS
|
||||
}
|
||||
|
||||
# system_dir PATH
|
||||
# test whether a path is a system directory
|
||||
system_dir() {
|
||||
IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
|
||||
function system_dir {
|
||||
path="$1"
|
||||
for sd in $SPACK_SYSTEM_DIRS; do
|
||||
if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
|
||||
for sd in "${SPACK_SYSTEM_DIRS[@]}"; do
|
||||
if [ "${path}" == "${sd}" ] || [ "${path}" == "${sd}/" ]; then
|
||||
# success if path starts with a system prefix
|
||||
unset IFS
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
unset IFS
|
||||
return 1 # fail if path starts no system prefix
|
||||
}
|
||||
|
||||
# Fail with a clear message if the input contains any bell characters.
|
||||
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
|
||||
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
|
||||
fi
|
||||
|
||||
# ensure required variables are set
|
||||
for param in $params; do
|
||||
if eval "test -z \"\${${param}:-}\""; then
|
||||
for param in "${parameters[@]}"; do
|
||||
if [[ -z ${!param+x} ]]; then
|
||||
die "Spack compiler must be run from Spack! Input '$param' is missing."
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if optional parameters are defined
|
||||
# If we aren't asking for debug flags, don't add them
|
||||
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
|
||||
if [[ -z ${SPACK_ADD_DEBUG_FLAGS+x} ]]; then
|
||||
SPACK_ADD_DEBUG_FLAGS="false"
|
||||
fi
|
||||
|
||||
# SPACK_ADD_DEBUG_FLAGS must be true/false/custom
|
||||
is_valid="false"
|
||||
for param in "true" "false" "custom"; do
|
||||
if [ "$param" = "$SPACK_ADD_DEBUG_FLAGS" ]; then
|
||||
if [ "$param" == "$SPACK_ADD_DEBUG_FLAGS" ]; then
|
||||
is_valid="true"
|
||||
fi
|
||||
done
|
||||
|
||||
# Exit with error if we are given an incorrect value
|
||||
if [ "$is_valid" = "false" ]; then
|
||||
die "SPACK_ADD_DEBUG_FLAGS, if defined, must be one of 'true', 'false', or 'custom'."
|
||||
if [ "$is_valid" == "false" ]; then
|
||||
die "SPACK_ADD_DEBUG_FLAGS, if defined, must be one of 'true' 'false' or 'custom'"
|
||||
fi
|
||||
|
||||
# Figure out the type of compiler, the language, and the mode so that
|
||||
@@ -234,7 +128,7 @@ fi
|
||||
# ld link
|
||||
# ccld compile & link
|
||||
|
||||
command="${0##*/}"
|
||||
command=$(basename "$0")
|
||||
comp="CC"
|
||||
case "$command" in
|
||||
cpp)
|
||||
@@ -269,7 +163,7 @@ case "$command" in
|
||||
lang_flags=F
|
||||
debug_flags="-g"
|
||||
;;
|
||||
ld|ld.gold|ld.lld)
|
||||
ld)
|
||||
mode=ld
|
||||
;;
|
||||
*)
|
||||
@@ -280,7 +174,7 @@ esac
|
||||
# If any of the arguments below are present, then the mode is vcheck.
|
||||
# In vcheck mode, nothing is added in terms of extra search paths or
|
||||
# libraries.
|
||||
if [ -z "$mode" ] || [ "$mode" = ld ]; then
|
||||
if [[ -z $mode ]] || [[ $mode == ld ]]; then
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
-v|-V|--version|-dumpversion)
|
||||
@@ -292,16 +186,16 @@ if [ -z "$mode" ] || [ "$mode" = ld ]; then
|
||||
fi
|
||||
|
||||
# Finish setting up the mode.
|
||||
if [ -z "$mode" ]; then
|
||||
if [[ -z $mode ]]; then
|
||||
mode=ccld
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "-E" ]; then
|
||||
if [[ $arg == -E ]]; then
|
||||
mode=cpp
|
||||
break
|
||||
elif [ "$arg" = "-S" ]; then
|
||||
elif [[ $arg == -S ]]; then
|
||||
mode=as
|
||||
break
|
||||
elif [ "$arg" = "-c" ]; then
|
||||
elif [[ $arg == -c ]]; then
|
||||
mode=cc
|
||||
break
|
||||
fi
|
||||
@@ -328,46 +222,42 @@ dtags_to_strip="${SPACK_DTAGS_TO_STRIP}"
|
||||
linker_arg="${SPACK_LINKER_ARG}"
|
||||
|
||||
# Set up rpath variable according to language.
|
||||
rpath="ERROR: RPATH ARG WAS NOT SET"
|
||||
eval "rpath=\${SPACK_${comp}_RPATH_ARG:?${rpath}}"
|
||||
eval rpath=\$SPACK_${comp}_RPATH_ARG
|
||||
|
||||
# Dump the mode and exit if the command is dump-mode.
|
||||
if [ "$SPACK_TEST_COMMAND" = "dump-mode" ]; then
|
||||
if [[ $SPACK_TEST_COMMAND == dump-mode ]]; then
|
||||
echo "$mode"
|
||||
exit
|
||||
fi
|
||||
|
||||
# If, say, SPACK_CC is set but SPACK_FC is not, we want to know. Compilers do not
|
||||
# *have* to set up Fortran executables, so we need to tell the user when a build is
|
||||
# about to attempt to use them unsuccessfully.
|
||||
if [ -z "$command" ]; then
|
||||
die "Compiler '$SPACK_COMPILER_SPEC' does not have a $language compiler configured."
|
||||
# Check that at least one of the real commands was actually selected,
|
||||
# otherwise we don't know what to execute.
|
||||
if [[ -z $command ]]; then
|
||||
die "ERROR: Compiler '$SPACK_COMPILER_SPEC' does not support compiling $language programs."
|
||||
fi
|
||||
|
||||
#
|
||||
# Filter '.' and Spack environment directories out of PATH so that
|
||||
# this script doesn't just call itself
|
||||
#
|
||||
new_dirs=""
|
||||
IFS=':'
|
||||
for dir in $PATH; do
|
||||
IFS=':' read -ra env_path <<< "$PATH"
|
||||
IFS=':' read -ra spack_env_dirs <<< "$SPACK_ENV_PATH"
|
||||
spack_env_dirs+=("" ".")
|
||||
export PATH=""
|
||||
for dir in "${env_path[@]}"; do
|
||||
addpath=true
|
||||
for spack_env_dir in $SPACK_ENV_PATH; do
|
||||
case "${dir%%/}" in
|
||||
"$spack_env_dir"|'.'|'')
|
||||
addpath=false
|
||||
break
|
||||
;;
|
||||
esac
|
||||
for env_dir in "${spack_env_dirs[@]}"; do
|
||||
if [[ "$dir" == "$env_dir" ]]; then
|
||||
addpath=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $addpath = true ]; then
|
||||
append new_dirs "$dir"
|
||||
if $addpath; then
|
||||
export PATH="${PATH:+$PATH:}$dir"
|
||||
fi
|
||||
done
|
||||
unset IFS
|
||||
export PATH="$new_dirs"
|
||||
|
||||
if [ "$mode" = vcheck ]; then
|
||||
if [[ $mode == vcheck ]]; then
|
||||
exec "${command}" "$@"
|
||||
fi
|
||||
|
||||
@@ -375,20 +265,16 @@ fi
|
||||
# It doesn't work with -rpath.
|
||||
# This variable controls whether they are added.
|
||||
add_rpaths=true
|
||||
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
|
||||
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "-r" ]; then
|
||||
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
|
||||
add_rpaths=false
|
||||
break
|
||||
fi
|
||||
elif [ "$arg" = "-Wl,-r" ] && [ "$mode" = ccld ]; then
|
||||
add_rpaths=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if [[ ($mode == ld || $mode == ccld) && "$SPACK_SHORT_SPEC" =~ "darwin" ]];
|
||||
then
|
||||
for arg in "$@"; do
|
||||
if [[ ($arg == -r && $mode == ld) ||
|
||||
($arg == -r && $mode == ccld) ||
|
||||
($arg == -Wl,-r && $mode == ccld) ]]; then
|
||||
add_rpaths=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Save original command for debug logging
|
||||
@@ -411,22 +297,17 @@ input_command="$*"
|
||||
# The libs variable is initialized here for completeness, and it is also
|
||||
# used later to inject flags supplied via `ldlibs` on the command
|
||||
# line. These come into the wrappers via SPACK_LDLIBS.
|
||||
|
||||
# The loop below breaks up the command line into these lists of components.
|
||||
# The lists are all bell-separated to be as flexible as possible, as their
|
||||
# contents may come from the command line, from ' '-separated lists,
|
||||
# ':'-separated lists, etc.
|
||||
include_dirs_list=""
|
||||
lib_dirs_list=""
|
||||
rpath_dirs_list=""
|
||||
system_include_dirs_list=""
|
||||
system_lib_dirs_list=""
|
||||
system_rpath_dirs_list=""
|
||||
isystem_system_include_dirs_list=""
|
||||
isystem_include_dirs_list=""
|
||||
libs_list=""
|
||||
other_args_list=""
|
||||
|
||||
#
|
||||
includes=()
|
||||
libdirs=()
|
||||
rpaths=()
|
||||
system_includes=()
|
||||
system_libdirs=()
|
||||
system_rpaths=()
|
||||
libs=()
|
||||
other_args=()
|
||||
isystem_system_includes=()
|
||||
isystem_includes=()
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
|
||||
@@ -446,32 +327,32 @@ while [ $# -ne 0 ]; do
|
||||
isystem_was_used=true
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if system_dir "$arg"; then
|
||||
append isystem_system_include_dirs_list "$arg"
|
||||
isystem_system_includes+=("$arg")
|
||||
else
|
||||
append isystem_include_dirs_list "$arg"
|
||||
isystem_includes+=("$arg")
|
||||
fi
|
||||
;;
|
||||
-I*)
|
||||
arg="${1#-I}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if system_dir "$arg"; then
|
||||
append system_include_dirs_list "$arg"
|
||||
system_includes+=("$arg")
|
||||
else
|
||||
append include_dirs_list "$arg"
|
||||
includes+=("$arg")
|
||||
fi
|
||||
;;
|
||||
-L*)
|
||||
arg="${1#-L}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if system_dir "$arg"; then
|
||||
append system_lib_dirs_list "$arg"
|
||||
system_libdirs+=("$arg")
|
||||
else
|
||||
append lib_dirs_list "$arg"
|
||||
libdirs+=("$arg")
|
||||
fi
|
||||
;;
|
||||
-l*)
|
||||
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
|
||||
# and passed by ifx to the linker, which confuses it with a
|
||||
# and passed by ifx to the linker, which confuses it with a
|
||||
# library. Filter it out.
|
||||
# TODO: generalize filtering of args with an env var, so that
|
||||
# TODO: we do not have to special case this here.
|
||||
@@ -482,76 +363,66 @@ while [ $# -ne 0 ]; do
|
||||
fi
|
||||
arg="${1#-l}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
append other_args_list "-l$arg"
|
||||
other_args+=("-l$arg")
|
||||
;;
|
||||
-Wl,*)
|
||||
arg="${1#-Wl,}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
case "$arg" in
|
||||
-rpath=*) rp="${arg#-rpath=}" ;;
|
||||
--rpath=*) rp="${arg#--rpath=}" ;;
|
||||
-rpath,*) rp="${arg#-rpath,}" ;;
|
||||
--rpath,*) rp="${arg#--rpath,}" ;;
|
||||
-rpath|--rpath)
|
||||
shift; arg="$1"
|
||||
case "$arg" in
|
||||
-Wl,*)
|
||||
rp="${arg#-Wl,}"
|
||||
;;
|
||||
*)
|
||||
die "-Wl,-rpath was not followed by -Wl,*"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"$dtags_to_strip")
|
||||
: # We want to remove explicitly this flag
|
||||
;;
|
||||
*)
|
||||
append other_args_list "-Wl,$arg"
|
||||
;;
|
||||
esac
|
||||
if [[ "$arg" = -rpath=* ]]; then
|
||||
rp="${arg#-rpath=}"
|
||||
elif [[ "$arg" = --rpath=* ]]; then
|
||||
rp="${arg#--rpath=}"
|
||||
elif [[ "$arg" = -rpath,* ]]; then
|
||||
rp="${arg#-rpath,}"
|
||||
elif [[ "$arg" = --rpath,* ]]; then
|
||||
rp="${arg#--rpath,}"
|
||||
elif [[ "$arg" =~ ^-?-rpath$ ]]; then
|
||||
shift; arg="$1"
|
||||
if [[ "$arg" != -Wl,* ]]; then
|
||||
die "-Wl,-rpath was not followed by -Wl,*"
|
||||
fi
|
||||
rp="${arg#-Wl,}"
|
||||
elif [[ "$arg" = "$dtags_to_strip" ]] ; then
|
||||
: # We want to remove explicitly this flag
|
||||
else
|
||||
other_args+=("-Wl,$arg")
|
||||
fi
|
||||
;;
|
||||
-Xlinker,*)
|
||||
arg="${1#-Xlinker,}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
|
||||
case "$arg" in
|
||||
-rpath=*) rp="${arg#-rpath=}" ;;
|
||||
--rpath=*) rp="${arg#--rpath=}" ;;
|
||||
-rpath|--rpath)
|
||||
shift; arg="$1"
|
||||
case "$arg" in
|
||||
-Xlinker,*)
|
||||
rp="${arg#-Xlinker,}"
|
||||
;;
|
||||
*)
|
||||
die "-Xlinker,-rpath was not followed by -Xlinker,*"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
append other_args_list "-Xlinker,$arg"
|
||||
;;
|
||||
esac
|
||||
if [[ "$arg" = -rpath=* ]]; then
|
||||
rp="${arg#-rpath=}"
|
||||
elif [[ "$arg" = --rpath=* ]]; then
|
||||
rp="${arg#--rpath=}"
|
||||
elif [[ "$arg" = -rpath ]] || [[ "$arg" = --rpath ]]; then
|
||||
shift; arg="$1"
|
||||
if [[ "$arg" != -Xlinker,* ]]; then
|
||||
die "-Xlinker,-rpath was not followed by -Xlinker,*"
|
||||
fi
|
||||
rp="${arg#-Xlinker,}"
|
||||
else
|
||||
other_args+=("-Xlinker,$arg")
|
||||
fi
|
||||
;;
|
||||
-Xlinker)
|
||||
if [ "$2" = "-rpath" ]; then
|
||||
if [ "$3" != "-Xlinker" ]; then
|
||||
if [[ "$2" == "-rpath" ]]; then
|
||||
if [[ "$3" != "-Xlinker" ]]; then
|
||||
die "-Xlinker,-rpath was not followed by -Xlinker,*"
|
||||
fi
|
||||
shift 3;
|
||||
rp="$1"
|
||||
elif [ "$2" = "$dtags_to_strip" ]; then
|
||||
elif [[ "$2" = "$dtags_to_strip" ]] ; then
|
||||
shift # We want to remove explicitly this flag
|
||||
else
|
||||
append other_args_list "$1"
|
||||
other_args+=("$1")
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if [ "$1" = "$dtags_to_strip" ]; then
|
||||
if [[ "$1" = "$dtags_to_strip" ]] ; then
|
||||
: # We want to remove explicitly this flag
|
||||
else
|
||||
append other_args_list "$1"
|
||||
other_args+=("$1")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
@@ -559,9 +430,9 @@ while [ $# -ne 0 ]; do
|
||||
# test rpaths against system directories in one place.
|
||||
if [ -n "$rp" ]; then
|
||||
if system_dir "$rp"; then
|
||||
append system_rpath_dirs_list "$rp"
|
||||
system_rpaths+=("$rp")
|
||||
else
|
||||
append rpath_dirs_list "$rp"
|
||||
rpaths+=("$rp")
|
||||
fi
|
||||
fi
|
||||
shift
|
||||
@@ -574,15 +445,16 @@ done
|
||||
# See the gmake manual on implicit rules for details:
|
||||
# https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html
|
||||
#
|
||||
flags_list=""
|
||||
flags=()
|
||||
|
||||
# Add debug flags
|
||||
if [ "${SPACK_ADD_DEBUG_FLAGS}" = "true" ]; then
|
||||
extend flags_list debug_flags
|
||||
if [ "${SPACK_ADD_DEBUG_FLAGS}" == "true" ]; then
|
||||
flags=("${flags[@]}" "${debug_flags}")
|
||||
|
||||
# If a custom flag is requested, derive from environment
|
||||
elif [ "$SPACK_ADD_DEBUG_FLAGS" = "custom" ]; then
|
||||
extend flags_list SPACK_DEBUG_FLAGS
|
||||
elif [ "$SPACK_ADD_DEBUG_FLAGS" == "custom" ]; then
|
||||
IFS=' ' read -ra SPACK_DEBUG_FLAGS <<< "$SPACK_DEBUG_FLAGS"
|
||||
flags=("${flags[@]}" "${SPACK_DEBUG_FLAGS[@]}")
|
||||
fi
|
||||
|
||||
# Fortran flags come before CPPFLAGS
|
||||
@@ -590,8 +462,7 @@ case "$mode" in
|
||||
cc|ccld)
|
||||
case $lang_flags in
|
||||
F)
|
||||
extend flags_list SPACK_FFLAGS
|
||||
;;
|
||||
flags=("${flags[@]}" "${SPACK_FFLAGS[@]}") ;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
@@ -599,8 +470,7 @@ esac
|
||||
# C preprocessor flags come before any C/CXX flags
|
||||
case "$mode" in
|
||||
cpp|as|cc|ccld)
|
||||
extend flags_list SPACK_CPPFLAGS
|
||||
;;
|
||||
flags=("${flags[@]}" "${SPACK_CPPFLAGS[@]}") ;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -609,67 +479,67 @@ case "$mode" in
|
||||
cc|ccld)
|
||||
case $lang_flags in
|
||||
C)
|
||||
extend flags_list SPACK_CFLAGS
|
||||
;;
|
||||
flags=("${flags[@]}" "${SPACK_CFLAGS[@]}") ;;
|
||||
CXX)
|
||||
extend flags_list SPACK_CXXFLAGS
|
||||
;;
|
||||
flags=("${flags[@]}" "${SPACK_CXXFLAGS[@]}") ;;
|
||||
esac
|
||||
|
||||
# prepend target args
|
||||
preextend flags_list SPACK_TARGET_ARGS
|
||||
flags=(${SPACK_TARGET_ARGS[@]} "${flags[@]}")
|
||||
;;
|
||||
esac
|
||||
|
||||
# Linker flags
|
||||
case "$mode" in
|
||||
ld|ccld)
|
||||
extend flags_list SPACK_LDFLAGS
|
||||
;;
|
||||
flags=("${flags[@]}" "${SPACK_LDFLAGS[@]}") ;;
|
||||
esac
|
||||
|
||||
# On macOS insert headerpad_max_install_names linker flag
|
||||
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
|
||||
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
|
||||
case "$mode" in
|
||||
ld)
|
||||
append flags_list "-headerpad_max_install_names" ;;
|
||||
ccld)
|
||||
append flags_list "-Wl,-headerpad_max_install_names" ;;
|
||||
esac
|
||||
fi
|
||||
if [[ ($mode == ld || $mode == ccld) && "$SPACK_SHORT_SPEC" =~ "darwin" ]];
|
||||
then
|
||||
case "$mode" in
|
||||
ld)
|
||||
flags=("${flags[@]}" -headerpad_max_install_names) ;;
|
||||
ccld)
|
||||
flags=("${flags[@]}" "-Wl,-headerpad_max_install_names") ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
|
||||
if [ "$add_rpaths" != "false" ]; then
|
||||
IFS=':' read -ra rpath_dirs <<< "$SPACK_RPATH_DIRS"
|
||||
if [[ $mode == ccld || $mode == ld ]]; then
|
||||
|
||||
if [[ "$add_rpaths" != "false" ]] ; then
|
||||
# Append RPATH directories. Note that in the case of the
|
||||
# top-level package these directories may not exist yet. For dependencies
|
||||
# it is assumed that paths have already been confirmed.
|
||||
extend rpath_dirs_list SPACK_RPATH_DIRS
|
||||
rpaths=("${rpaths[@]}" "${rpath_dirs[@]}")
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
|
||||
extend lib_dirs_list SPACK_LINK_DIRS
|
||||
IFS=':' read -ra link_dirs <<< "$SPACK_LINK_DIRS"
|
||||
if [[ $mode == ccld || $mode == ld ]]; then
|
||||
libdirs=("${libdirs[@]}" "${link_dirs[@]}")
|
||||
fi
|
||||
|
||||
# add RPATHs if we're in in any linking mode
|
||||
case "$mode" in
|
||||
ld|ccld)
|
||||
# Set extra RPATHs
|
||||
extend lib_dirs_list SPACK_COMPILER_EXTRA_RPATHS
|
||||
if [ "$add_rpaths" != "false" ]; then
|
||||
extend rpath_dirs_list SPACK_COMPILER_EXTRA_RPATHS
|
||||
IFS=':' read -ra extra_rpaths <<< "$SPACK_COMPILER_EXTRA_RPATHS"
|
||||
libdirs+=("${extra_rpaths[@]}")
|
||||
if [[ "$add_rpaths" != "false" ]] ; then
|
||||
rpaths+=("${extra_rpaths[@]}")
|
||||
fi
|
||||
|
||||
# Set implicit RPATHs
|
||||
if [ "$add_rpaths" != "false" ]; then
|
||||
extend rpath_dirs_list SPACK_COMPILER_IMPLICIT_RPATHS
|
||||
IFS=':' read -ra implicit_rpaths <<< "$SPACK_COMPILER_IMPLICIT_RPATHS"
|
||||
if [[ "$add_rpaths" != "false" ]] ; then
|
||||
rpaths+=("${implicit_rpaths[@]}")
|
||||
fi
|
||||
|
||||
# Add SPACK_LDLIBS to args
|
||||
for lib in $SPACK_LDLIBS; do
|
||||
append libs_list "${lib#-l}"
|
||||
for lib in "${SPACK_LDLIBS[@]}"; do
|
||||
libs+=("${lib#-l}")
|
||||
done
|
||||
;;
|
||||
esac
|
||||
@@ -677,62 +547,63 @@ esac
|
||||
#
|
||||
# Finally, reassemble the command line.
|
||||
#
|
||||
args_list="$flags_list"
|
||||
|
||||
# Includes and system includes first
|
||||
args=()
|
||||
|
||||
# flags assembled earlier
|
||||
args+=("${flags[@]}")
|
||||
|
||||
# Insert include directories just prior to any system include directories
|
||||
# NOTE: adding ${lsep} to the prefix here turns every added element into two
|
||||
extend args_list include_dirs_list "-I"
|
||||
extend args_list isystem_include_dirs_list "-isystem${lsep}"
|
||||
|
||||
case "$mode" in
|
||||
cpp|cc|as|ccld)
|
||||
if [ "$isystem_was_used" = "true" ]; then
|
||||
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
|
||||
else
|
||||
extend args_list SPACK_INCLUDE_DIRS "-I"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
for dir in "${includes[@]}"; do args+=("-I$dir"); done
|
||||
for dir in "${isystem_includes[@]}"; do args+=("-isystem" "$dir"); done
|
||||
|
||||
extend args_list system_include_dirs_list -I
|
||||
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
|
||||
IFS=':' read -ra spack_include_dirs <<< "$SPACK_INCLUDE_DIRS"
|
||||
if [[ $mode == cpp || $mode == cc || $mode == as || $mode == ccld ]]; then
|
||||
if [[ "$isystem_was_used" == "true" ]] ; then
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-isystem" "$dir"); done
|
||||
else
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-I$dir"); done
|
||||
fi
|
||||
fi
|
||||
|
||||
for dir in "${system_includes[@]}"; do args+=("-I$dir"); done
|
||||
for dir in "${isystem_system_includes[@]}"; do args+=("-isystem" "$dir"); done
|
||||
|
||||
# Library search paths
|
||||
extend args_list lib_dirs_list "-L"
|
||||
extend args_list system_lib_dirs_list "-L"
|
||||
for dir in "${libdirs[@]}"; do args+=("-L$dir"); done
|
||||
for dir in "${system_libdirs[@]}"; do args+=("-L$dir"); done
|
||||
|
||||
# RPATHs arguments
|
||||
case "$mode" in
|
||||
ccld)
|
||||
if [ -n "$dtags_to_add" ] ; then
|
||||
append args_list "$linker_arg$dtags_to_add"
|
||||
fi
|
||||
extend args_list rpath_dirs_list "$rpath"
|
||||
extend args_list system_rpath_dirs_list "$rpath"
|
||||
if [ -n "$dtags_to_add" ] ; then args+=("$linker_arg$dtags_to_add") ; fi
|
||||
for dir in "${rpaths[@]}"; do args+=("$rpath$dir"); done
|
||||
for dir in "${system_rpaths[@]}"; do args+=("$rpath$dir"); done
|
||||
;;
|
||||
ld)
|
||||
if [ -n "$dtags_to_add" ] ; then
|
||||
append args_list "$dtags_to_add"
|
||||
fi
|
||||
extend args_list rpath_dirs_list "-rpath${lsep}"
|
||||
extend args_list system_rpath_dirs_list "-rpath${lsep}"
|
||||
if [ -n "$dtags_to_add" ] ; then args+=("$dtags_to_add") ; fi
|
||||
for dir in "${rpaths[@]}"; do args+=("-rpath" "$dir"); done
|
||||
for dir in "${system_rpaths[@]}"; do args+=("-rpath" "$dir"); done
|
||||
;;
|
||||
esac
|
||||
|
||||
# Other arguments from the input command
|
||||
extend args_list other_args_list
|
||||
args+=("${other_args[@]}")
|
||||
|
||||
# Inject SPACK_LDLIBS, if supplied
|
||||
extend args_list libs_list "-l"
|
||||
for lib in "${libs[@]}"; do
|
||||
args+=("-l$lib");
|
||||
done
|
||||
|
||||
full_command_list="$command"
|
||||
extend full_command_list args_list
|
||||
full_command=("$command" "${args[@]}")
|
||||
|
||||
# prepend the ccache binary if we're using ccache
|
||||
if [ -n "$SPACK_CCACHE_BINARY" ]; then
|
||||
case "$lang_flags" in
|
||||
C|CXX) # ccache only supports C languages
|
||||
prepend full_command_list "${SPACK_CCACHE_BINARY}"
|
||||
full_command=("${SPACK_CCACHE_BINARY}" "${full_command[@]}")
|
||||
# workaround for stage being a temp folder
|
||||
# see #3761#issuecomment-294352232
|
||||
export CCACHE_NOHASHDIR=yes
|
||||
@@ -741,36 +612,22 @@ if [ -n "$SPACK_CCACHE_BINARY" ]; then
|
||||
fi
|
||||
|
||||
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
|
||||
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
|
||||
case "$SPACK_TEST_COMMAND" in
|
||||
dump-args)
|
||||
IFS="$lsep"
|
||||
for arg in $full_command_list; do
|
||||
echo "$arg"
|
||||
done
|
||||
unset IFS
|
||||
exit
|
||||
;;
|
||||
dump-env-*)
|
||||
var=${SPACK_TEST_COMMAND#dump-env-}
|
||||
eval "printf '%s\n' \"\$0: \$var: \$$var\""
|
||||
;;
|
||||
*)
|
||||
die "Unknown test command: '$SPACK_TEST_COMMAND'"
|
||||
;;
|
||||
esac
|
||||
if [[ $SPACK_TEST_COMMAND == dump-args ]]; then
|
||||
IFS="
|
||||
" && echo "${full_command[*]}"
|
||||
exit
|
||||
elif [[ -n $SPACK_TEST_COMMAND ]]; then
|
||||
die "ERROR: Unknown test command"
|
||||
fi
|
||||
|
||||
#
|
||||
# Write the input and output commands to debug logs if it's asked for.
|
||||
#
|
||||
if [ "$SPACK_DEBUG" = TRUE ]; then
|
||||
if [[ $SPACK_DEBUG == TRUE ]]; then
|
||||
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
|
||||
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
|
||||
echo "[$mode] $command $input_command" >> "$input_log"
|
||||
echo "[$mode] ${full_command_list}" >> "$output_log"
|
||||
echo "[$mode] ${full_command[*]}" >> "$output_log"
|
||||
fi
|
||||
|
||||
# Execute the full command, preserving spaces with IFS set
|
||||
# to the alarm bell separator.
|
||||
IFS="$lsep"; exec $full_command_list
|
||||
exec "${full_command[@]}"
|
||||
|
1
lib/spack/env/ld.gold
vendored
1
lib/spack/env/ld.gold
vendored
@@ -1 +0,0 @@
|
||||
cc
|
1
lib/spack/env/ld.lld
vendored
1
lib/spack/env/ld.lld
vendored
@@ -1 +0,0 @@
|
||||
cc
|
4
lib/spack/external/__init__.py
vendored
4
lib/spack/external/__init__.py
vendored
@@ -11,7 +11,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
|
||||
* Version: 0.1.2 (commit 4dbf253daf37e4a008e4beb6489f347b4a35aed4)
|
||||
|
||||
argparse
|
||||
--------
|
||||
@@ -88,8 +88,6 @@
|
||||
* Usage: Needed by pytest. Library with cross-python path,
|
||||
ini-parsing, io, code, and log facilities.
|
||||
* Version: 1.4.34 (last version supporting Python 2.6)
|
||||
* Note: This packages has been modified:
|
||||
* https://github.com/pytest-dev/py/pull/186 was backported
|
||||
|
||||
pytest
|
||||
------
|
||||
|
13
lib/spack/external/archspec/README.md
vendored
13
lib/spack/external/archspec/README.md
vendored
@@ -49,19 +49,6 @@ $ tox
|
||||
congratulations :)
|
||||
```
|
||||
|
||||
## Citing Archspec
|
||||
|
||||
If you are referencing `archspec` in a publication, please cite the following
|
||||
paper:
|
||||
|
||||
* Massimiliano Culpo, Gregory Becker, Carlos Eduardo Arango Gutierrez, Kenneth
|
||||
Hoste, and Todd Gamblin.
|
||||
[**`archspec`: A library for detecting, labeling, and reasoning about
|
||||
microarchitectures**](https://tgamblin.github.io/pubs/archspec-canopie-hpc-2020.pdf).
|
||||
In *2nd International Workshop on Containers and New Orchestration Paradigms
|
||||
for Isolated Environments in HPC (CANOPIE-HPC'20)*, Online Event, November
|
||||
12, 2020.
|
||||
|
||||
## License
|
||||
|
||||
Archspec is distributed under the terms of both the MIT license and the
|
||||
|
48
lib/spack/external/archspec/cpu/detect.py
vendored
48
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -206,26 +206,11 @@ def host():
|
||||
# Get a list of possible candidates for this micro-architecture
|
||||
candidates = compatible_microarchitectures(info)
|
||||
|
||||
# Sorting criteria for candidates
|
||||
def sorting_fn(item):
|
||||
return len(item.ancestors), len(item.features)
|
||||
|
||||
# Get the best generic micro-architecture
|
||||
generic_candidates = [c for c in candidates if c.vendor == "generic"]
|
||||
best_generic = max(generic_candidates, key=sorting_fn)
|
||||
|
||||
# Filter the candidates to be descendant of the best generic candidate.
|
||||
# This is to avoid that the lack of a niche feature that can be disabled
|
||||
# from e.g. BIOS prevents detection of a reasonably performant architecture
|
||||
candidates = [c for c in candidates if c > best_generic]
|
||||
|
||||
# If we don't have candidates, return the best generic micro-architecture
|
||||
if not candidates:
|
||||
return best_generic
|
||||
|
||||
# Reverse sort of the depth for the inheritance tree among only targets we
|
||||
# can use. This gets the newest target we satisfy.
|
||||
return max(candidates, key=sorting_fn)
|
||||
return sorted(
|
||||
candidates, key=lambda t: (len(t.ancestors), len(t.features)), reverse=True
|
||||
)[0]
|
||||
|
||||
|
||||
def compatibility_check(architecture_family):
|
||||
@@ -260,13 +245,7 @@ def compatibility_check_for_power(info, target):
|
||||
"""Compatibility check for PPC64 and PPC64LE architectures."""
|
||||
basename = platform.machine()
|
||||
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
|
||||
try:
|
||||
generation = int(generation_match.group(1))
|
||||
except AttributeError:
|
||||
# There might be no match under emulated environments. For instance
|
||||
# emulating a ppc64le with QEMU and Docker still reports the host
|
||||
# /proc/cpuinfo and not a Power
|
||||
generation = 0
|
||||
generation = int(generation_match.group(1))
|
||||
|
||||
# We can use a target if it descends from our machine type and our
|
||||
# generation (9 for POWER9, etc) is at least its generation.
|
||||
@@ -306,22 +285,3 @@ def compatibility_check_for_aarch64(info, target):
|
||||
and (target.vendor == vendor or target.vendor == "generic")
|
||||
and target.features.issubset(features)
|
||||
)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family="riscv64")
|
||||
def compatibility_check_for_riscv64(info, target):
|
||||
"""Compatibility check for riscv64 architectures."""
|
||||
basename = "riscv64"
|
||||
uarch = info.get("uarch")
|
||||
|
||||
# sifive unmatched board
|
||||
if uarch == "sifive,u74-mc":
|
||||
uarch = "u74mc"
|
||||
# catch-all for unknown uarchs
|
||||
else:
|
||||
uarch = "riscv64"
|
||||
|
||||
arch_root = TARGETS[basename]
|
||||
return (target == arch_root or arch_root in target.ancestors) and (
|
||||
target == uarch or target.vendor == "generic"
|
||||
)
|
||||
|
@@ -173,12 +173,6 @@ def family(self):
|
||||
|
||||
return roots.pop()
|
||||
|
||||
@property
|
||||
def generic(self):
|
||||
"""Returns the best generic architecture that is compatible with self"""
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
|
||||
def to_dict(self, return_list_of_items=False):
|
||||
"""Returns a dictionary representation of this object.
|
||||
|
||||
|
@@ -2017,44 +2017,6 @@
|
||||
"features": [],
|
||||
"compilers": {
|
||||
}
|
||||
},
|
||||
"riscv64": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
"features": [],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "7.1:",
|
||||
"flags" : "-march=rv64gc"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "9.0:",
|
||||
"flags" : "-march=rv64gc"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"u74mc": {
|
||||
"from": ["riscv64"],
|
||||
"vendor": "SiFive",
|
||||
"features": [],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "10.2:",
|
||||
"flags" : "-march=rv64gc -mtune=sifive-7-series"
|
||||
}
|
||||
],
|
||||
"clang" : [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"flags" : "-march=rv64gc -mtune=sifive-7-series"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"feature_aliases": {
|
||||
|
91
lib/spack/external/ctest_log_parser.py
vendored
91
lib/spack/external/ctest_log_parser.py
vendored
@@ -77,18 +77,52 @@
|
||||
from six import StringIO
|
||||
from six import string_types
|
||||
|
||||
class prefilter(object):
|
||||
"""Make regular expressions faster with a simple prefiltering predicate.
|
||||
|
||||
Some regular expressions seem to be much more costly than others. In
|
||||
most cases, we can evaluate a simple precondition, e.g.::
|
||||
|
||||
lambda x: "error" in x
|
||||
|
||||
to avoid evaluating expensive regexes on all lines in a file. This
|
||||
can reduce parse time for large files by orders of magnitude when
|
||||
evaluating lots of expressions.
|
||||
|
||||
A ``prefilter`` object is designed to act like a regex,, but
|
||||
``search`` and ``match`` check the precondition before bothering to
|
||||
evaluate the regular expression.
|
||||
|
||||
Note that ``match`` and ``search`` just return ``True`` and ``False``
|
||||
at the moment. Make them return a ``MatchObject`` or ``None`` if it
|
||||
becomes necessary.
|
||||
"""
|
||||
def __init__(self, precondition, *patterns):
|
||||
self.patterns = [re.compile(p) for p in patterns]
|
||||
self.pre = precondition
|
||||
self.pattern = "\n ".join(
|
||||
('MERGED:',) + patterns)
|
||||
|
||||
def search(self, text):
|
||||
return self.pre(text) and any(p.search(text) for p in self.patterns)
|
||||
|
||||
def match(self, text):
|
||||
return self.pre(text) and any(p.match(text) for p in self.patterns)
|
||||
|
||||
|
||||
_error_matches = [
|
||||
"^FAIL: ",
|
||||
"^FATAL: ",
|
||||
"^failed ",
|
||||
"FAILED",
|
||||
"Failed test",
|
||||
prefilter(
|
||||
lambda x: any(s in x for s in (
|
||||
'Error:', 'error', 'undefined reference', 'multiply defined')),
|
||||
"([^:]+): error[ \\t]*[0-9]+[ \\t]*:",
|
||||
"([^:]+): (Error:|error|undefined reference|multiply defined)",
|
||||
"([^ :]+) ?: (error|fatal error|catastrophic error)",
|
||||
"([^:]+)\\(([^\\)]+)\\) ?: (error|fatal error|catastrophic error)"),
|
||||
"^FAILED",
|
||||
"^[Bb]us [Ee]rror",
|
||||
"^[Ss]egmentation [Vv]iolation",
|
||||
"^[Ss]egmentation [Ff]ault",
|
||||
":.*[Pp]ermission [Dd]enied",
|
||||
"[^ :]:[0-9]+: [^ \\t]",
|
||||
"[^:]: error[ \\t]*[0-9]+[ \\t]*:",
|
||||
"^Error ([0-9]+):",
|
||||
"^Fatal",
|
||||
"^[Ee]rror: ",
|
||||
@@ -98,9 +132,6 @@
|
||||
"^cc[^C]*CC: ERROR File = ([^,]+), Line = ([0-9]+)",
|
||||
"^ld([^:])*:([ \\t])*ERROR([^:])*:",
|
||||
"^ild:([ \\t])*\\(undefined symbol\\)",
|
||||
"[^ :] : (error|fatal error|catastrophic error)",
|
||||
"[^:]: (Error:|error|undefined reference|multiply defined)",
|
||||
"[^:]\\([^\\)]+\\) ?: (error|fatal error|catastrophic error)",
|
||||
"^fatal error C[0-9]+:",
|
||||
": syntax error ",
|
||||
"^collect2: ld returned 1 exit status",
|
||||
@@ -109,7 +140,7 @@
|
||||
"^Unresolved:",
|
||||
"Undefined symbol",
|
||||
"^Undefined[ \\t]+first referenced",
|
||||
"^CMake Error",
|
||||
"^CMake Error.*:",
|
||||
":[ \\t]cannot find",
|
||||
":[ \\t]can't find",
|
||||
": \\*\\*\\* No rule to make target [`'].*\\'. Stop",
|
||||
@@ -123,7 +154,6 @@
|
||||
"ld: 0706-006 Cannot find or open library file: -l ",
|
||||
"ild: \\(argument error\\) can't find library argument ::",
|
||||
"^could not be found and will not be loaded.",
|
||||
"^WARNING: '.*' is missing on your system",
|
||||
"s:616 string too big",
|
||||
"make: Fatal error: ",
|
||||
"ld: 0711-993 Error occurred while writing to the output file:",
|
||||
@@ -145,40 +175,44 @@
|
||||
"instantiated from ",
|
||||
"candidates are:",
|
||||
": warning",
|
||||
": WARNING",
|
||||
": \\(Warning\\)",
|
||||
": note",
|
||||
" ok",
|
||||
"Note:",
|
||||
"makefile:",
|
||||
"Makefile:",
|
||||
":[ \\t]+Where:",
|
||||
"[^ :]:[0-9]+: Warning",
|
||||
"([^ :]+):([0-9]+): Warning",
|
||||
"------ Build started: .* ------",
|
||||
]
|
||||
|
||||
#: Regexes to match file/line numbers in error/warning messages
|
||||
_warning_matches = [
|
||||
"[^ :]:[0-9]+: warning:",
|
||||
"[^ :]:[0-9]+: note:",
|
||||
prefilter(
|
||||
lambda x: 'warning' in x,
|
||||
"([^ :]+):([0-9]+): warning:",
|
||||
"([^:]+): warning ([0-9]+):",
|
||||
"([^:]+): warning[ \\t]*[0-9]+[ \\t]*:",
|
||||
"([^ :]+) : warning",
|
||||
"([^:]+): warning"),
|
||||
prefilter(
|
||||
lambda x: 'note:' in x,
|
||||
"^([^ :]+):([0-9]+): note:"),
|
||||
prefilter(
|
||||
lambda x: any(s in x for s in ('Warning', 'Warnung')),
|
||||
"^(Warning|Warnung) ([0-9]+):",
|
||||
"^(Warning|Warnung)[ :]",
|
||||
"^cxx: Warning:",
|
||||
"([^ :]+):([0-9]+): (Warning|Warnung)",
|
||||
"^CMake Warning.*:"),
|
||||
"file: .* has no symbols",
|
||||
"^cc[^C]*CC: WARNING File = ([^,]+), Line = ([0-9]+)",
|
||||
"^ld([^:])*:([ \\t])*WARNING([^:])*:",
|
||||
"[^:]: warning [0-9]+:",
|
||||
"^\"[^\"]+\", line [0-9]+: [Ww](arning|arnung)",
|
||||
"[^:]: warning[ \\t]*[0-9]+[ \\t]*:",
|
||||
"^(Warning|Warnung) ([0-9]+):",
|
||||
"^(Warning|Warnung)[ :]",
|
||||
"WARNING: ",
|
||||
"[^ :] : warning",
|
||||
"[^:]: warning",
|
||||
"\", line [0-9]+\\.[0-9]+: [0-9]+-[0-9]+ \\([WI]\\)",
|
||||
"^cxx: Warning:",
|
||||
"file: .* has no symbols",
|
||||
"[^ :]:[0-9]+: (Warning|Warnung)",
|
||||
"\\([0-9]*\\): remark #[0-9]*",
|
||||
"\".*\", line [0-9]+: remark\\([0-9]*\\):",
|
||||
"cc-[0-9]* CC: REMARK File = .*, Line = [0-9]*",
|
||||
"^CMake Warning",
|
||||
"^\\[WARNING\\]",
|
||||
]
|
||||
|
||||
@@ -309,7 +343,8 @@ def _profile_match(matches, exceptions, line, match_times, exc_times):
|
||||
|
||||
def _parse(lines, offset, profile):
|
||||
def compile(regex_array):
|
||||
return [re.compile(regex) for regex in regex_array]
|
||||
return [regex if isinstance(regex, prefilter) else re.compile(regex)
|
||||
for regex in regex_array]
|
||||
|
||||
error_matches = compile(_error_matches)
|
||||
error_exceptions = compile(_error_exceptions)
|
||||
|
6
lib/spack/external/py/_path/local.py
vendored
6
lib/spack/external/py/_path/local.py
vendored
@@ -10,7 +10,7 @@
|
||||
from py._path.common import iswin32, fspath
|
||||
from stat import S_ISLNK, S_ISDIR, S_ISREG
|
||||
|
||||
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
|
||||
from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname
|
||||
|
||||
if sys.version_info > (3,0):
|
||||
def map_as_list(func, iter):
|
||||
@@ -801,10 +801,10 @@ def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
|
||||
if rootdir is None:
|
||||
rootdir = cls.get_temproot()
|
||||
|
||||
nprefix = prefix.lower()
|
||||
nprefix = normcase(prefix)
|
||||
def parse_num(path):
|
||||
""" parse the number out of a path (if it matches the prefix) """
|
||||
nbasename = path.basename.lower()
|
||||
nbasename = normcase(path.basename)
|
||||
if nbasename.startswith(nprefix):
|
||||
try:
|
||||
return int(nbasename[len(nprefix):])
|
||||
|
@@ -656,12 +656,6 @@ def working_dir(dirname, **kwargs):
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
class CouldNotRestoreDirectoryBackup(RuntimeError):
|
||||
def __init__(self, inner_exception, outer_exception):
|
||||
self.inner_exception = inner_exception
|
||||
self.outer_exception = outer_exception
|
||||
|
||||
|
||||
@contextmanager
|
||||
def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
"""Moves a directory to a temporary space. If the operations executed
|
||||
@@ -689,33 +683,32 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
assert os.path.isabs(tmp_root)
|
||||
|
||||
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
|
||||
tty.debug('Temporary directory created [{0}]'.format(tmp_dir))
|
||||
tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))
|
||||
|
||||
shutil.move(src=directory_name, dst=tmp_dir)
|
||||
tty.debug('Directory moved [src={0}, dest={1}]'.format(directory_name, tmp_dir))
|
||||
tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
|
||||
directory_name, tmp_dir
|
||||
))
|
||||
|
||||
try:
|
||||
yield tmp_dir
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as inner_exception:
|
||||
# Try to recover the original directory, if this fails, raise a
|
||||
# composite exception.
|
||||
try:
|
||||
# Delete what was there, before copying back the original content
|
||||
if os.path.exists(directory_name):
|
||||
shutil.rmtree(directory_name)
|
||||
shutil.move(
|
||||
src=os.path.join(tmp_dir, directory_basename),
|
||||
dst=os.path.dirname(directory_name)
|
||||
)
|
||||
except Exception as outer_exception:
|
||||
raise CouldNotRestoreDirectoryBackup(inner_exception, outer_exception)
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as e:
|
||||
# Delete what was there, before copying back the original content
|
||||
if os.path.exists(directory_name):
|
||||
shutil.rmtree(directory_name)
|
||||
shutil.move(
|
||||
src=os.path.join(tmp_dir, directory_basename),
|
||||
dst=os.path.dirname(directory_name)
|
||||
)
|
||||
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
|
||||
|
||||
tty.debug('Directory recovered [{0}]'.format(directory_name))
|
||||
raise
|
||||
msg = 'the transactional move of "{0}" failed.'
|
||||
msg += '\n ' + str(e)
|
||||
raise RuntimeError(msg.format(directory_name))
|
||||
else:
|
||||
# Otherwise delete the temporary directory
|
||||
shutil.rmtree(tmp_dir, ignore_errors=True)
|
||||
tty.debug('Temporary directory deleted [{0}]'.format(tmp_dir))
|
||||
shutil.rmtree(tmp_dir)
|
||||
tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
|
||||
|
||||
|
||||
def hash_directory(directory, ignore=[]):
|
||||
@@ -1855,18 +1848,3 @@ def keep_modification_time(*filenames):
|
||||
for f, mtime in mtimes.items():
|
||||
if os.path.exists(f):
|
||||
os.utime(f, (os.path.getatime(f), mtime))
|
||||
|
||||
|
||||
@contextmanager
|
||||
def temporary_dir(*args, **kwargs):
|
||||
"""Create a temporary directory and cd's into it. Delete the directory
|
||||
on exit.
|
||||
|
||||
Takes the same arguments as tempfile.mkdtemp()
|
||||
"""
|
||||
tmp_dir = tempfile.mkdtemp(*args, **kwargs)
|
||||
try:
|
||||
with working_dir(tmp_dir):
|
||||
yield tmp_dir
|
||||
finally:
|
||||
remove_directory_contents(tmp_dir)
|
||||
|
@@ -7,6 +7,7 @@
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@@ -30,6 +31,23 @@
|
||||
ignore_modules = [r'^\.#', '~$']
|
||||
|
||||
|
||||
# On macOS, Python 3.8 multiprocessing now defaults to the 'spawn' start
|
||||
# method. Spack cannot currently handle this, so force the process to start
|
||||
# using the 'fork' start method.
|
||||
#
|
||||
# TODO: This solution is not ideal, as the 'fork' start method can lead to
|
||||
# crashes of the subprocess. Figure out how to make 'spawn' work.
|
||||
#
|
||||
# See:
|
||||
# * https://github.com/spack/spack/pull/18124
|
||||
# * https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # noqa: E501
|
||||
# * https://bugs.python.org/issue33725
|
||||
if sys.version_info >= (3,): # novm
|
||||
fork_context = multiprocessing.get_context('fork')
|
||||
else:
|
||||
fork_context = multiprocessing
|
||||
|
||||
|
||||
def index_by(objects, *funcs):
|
||||
"""Create a hierarchy of dictionaries by splitting the supplied
|
||||
set of objects on unique values of the supplied functions.
|
||||
@@ -915,19 +933,3 @@ class Devnull(object):
|
||||
"""
|
||||
def write(self, *_):
|
||||
pass
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
elide_list([1,2,3,4,5,6], 4)
|
||||
|
||||
gives::
|
||||
|
||||
[1, 2, 3, '...', 6]
|
||||
"""
|
||||
if len(line_list) > max_num:
|
||||
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
|
||||
else:
|
||||
return line_list
|
||||
|
@@ -9,7 +9,6 @@
|
||||
import socket
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Dict, Tuple # novm
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -37,126 +36,6 @@
|
||||
true_fn = lambda: True
|
||||
|
||||
|
||||
class OpenFile(object):
|
||||
"""Record for keeping track of open lockfiles (with reference counting).
|
||||
|
||||
There's really only one ``OpenFile`` per inode, per process, but we record the
|
||||
filehandle here as it's the thing we end up using in python code. You can get
|
||||
the file descriptor from the file handle if needed -- or we could make this track
|
||||
file descriptors as well in the future.
|
||||
"""
|
||||
def __init__(self, fh):
|
||||
self.fh = fh
|
||||
self.refs = 0
|
||||
|
||||
|
||||
class OpenFileTracker(object):
|
||||
"""Track open lockfiles, to minimize number of open file descriptors.
|
||||
|
||||
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
|
||||
This is convenient, because if a process exits, it releases its locks.
|
||||
Unfortunately, this also means that if you close a file, *all* locks associated
|
||||
with that file's inode are released, regardless of whether the process has any
|
||||
other open file descriptors on it.
|
||||
|
||||
Because of this, we need to track open lock files so that we only close them when
|
||||
a process no longer needs them. We do this by tracking each lockfile by its
|
||||
inode and process id. This has several nice properties:
|
||||
|
||||
1. Tracking by pid ensures that, if we fork, we don't inadvertently track the parent
|
||||
process's lockfiles. ``fcntl`` locks are not inherited across forks, so we'll
|
||||
just track new lockfiles in the child.
|
||||
2. Tracking by inode ensures that referencs are counted per inode, and that we don't
|
||||
inadvertently close a file whose inode still has open locks.
|
||||
3. Tracking by both pid and inode ensures that we only open lockfiles the minimum
|
||||
number of times necessary for the locks we have.
|
||||
|
||||
Note: as mentioned elsewhere, these locks aren't thread safe -- they're designed to
|
||||
work in Python and assume the GIL.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new ``OpenFileTracker``."""
|
||||
self._descriptors = {} # type: Dict[Tuple[int, int], OpenFile]
|
||||
|
||||
def get_fh(self, path):
|
||||
"""Get a filehandle for a lockfile.
|
||||
|
||||
This routine will open writable files for read/write even if you're asking
|
||||
for a shared (read-only) lock. This is so that we can upgrade to an exclusive
|
||||
(write) lock later if requested.
|
||||
|
||||
Arguments:
|
||||
path (str): path to lock file we want a filehandle for
|
||||
"""
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
|
||||
pid = os.getpid()
|
||||
open_file = None # OpenFile object, if there is one
|
||||
stat = None # stat result for the lockfile, if it exists
|
||||
|
||||
try:
|
||||
# see whether we've seen this inode/pid before
|
||||
stat = os.stat(path)
|
||||
key = (stat.st_ino, pid)
|
||||
open_file = self._descriptors.get(key)
|
||||
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
|
||||
# path does not exist -- fail if we won't be able to create it
|
||||
parent = os.path.dirname(path) or '.'
|
||||
if not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(path)
|
||||
|
||||
# if there was no already open file, we'll need to open one
|
||||
if not open_file:
|
||||
if stat and not os.access(path, os.W_OK):
|
||||
# we know path exists but not if it's writable. If it's read-only,
|
||||
# only open the file for reading (and fail if we're trying to get
|
||||
# an exclusive (write) lock on it)
|
||||
os_mode, fh_mode = os.O_RDONLY, 'r'
|
||||
|
||||
fd = os.open(path, os_mode)
|
||||
fh = os.fdopen(fd, fh_mode)
|
||||
open_file = OpenFile(fh)
|
||||
|
||||
# if we just created the file, we'll need to get its inode here
|
||||
if not stat:
|
||||
inode = os.fstat(fd).st_ino
|
||||
key = (inode, pid)
|
||||
|
||||
self._descriptors[key] = open_file
|
||||
|
||||
open_file.refs += 1
|
||||
return open_file.fh
|
||||
|
||||
def release_fh(self, path):
|
||||
"""Release a filehandle, only closing it if there are no more references."""
|
||||
try:
|
||||
inode = os.stat(path).st_ino
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
inode = None # this will not be in self._descriptors
|
||||
|
||||
key = (inode, os.getpid())
|
||||
open_file = self._descriptors.get(key)
|
||||
assert open_file, "Attempted to close non-existing lock path: %s" % path
|
||||
|
||||
open_file.refs -= 1
|
||||
if not open_file.refs:
|
||||
del self._descriptors[key]
|
||||
open_file.fh.close()
|
||||
|
||||
|
||||
#: Open file descriptors for locks in this process. Used to prevent one process
|
||||
#: from opening the sam file many times for different byte range locks
|
||||
file_tracker = OpenFileTracker()
|
||||
|
||||
|
||||
def _attempts_str(wait_time, nattempts):
|
||||
# Don't print anything if we succeeded on the first try
|
||||
if nattempts <= 1:
|
||||
@@ -177,8 +56,7 @@ class Lock(object):
|
||||
Note that this is for managing contention over resources *between*
|
||||
processes and not for managing contention between threads in a process: the
|
||||
functions of this object are not thread-safe. A process also must not
|
||||
maintain multiple locks on the same file (or, more specifically, on
|
||||
overlapping byte ranges in the same file).
|
||||
maintain multiple locks on the same file.
|
||||
"""
|
||||
|
||||
def __init__(self, path, start=0, length=0, default_timeout=None,
|
||||
@@ -283,10 +161,25 @@ def _lock(self, op, timeout=None):
|
||||
|
||||
# Create file and parent directories if they don't exist.
|
||||
if self._file is None:
|
||||
self._ensure_parent_directory()
|
||||
self._file = file_tracker.get_fh(self.path)
|
||||
parent = self._ensure_parent_directory()
|
||||
|
||||
if op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
if os.path.exists(self.path):
|
||||
if not os.access(self.path, os.W_OK):
|
||||
if op == fcntl.LOCK_SH:
|
||||
# can still lock read-only files if we open 'r'
|
||||
os_mode, fd_mode = os.O_RDONLY, 'r'
|
||||
else:
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
elif not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(self.path)
|
||||
|
||||
fd = os.open(self.path, os_mode)
|
||||
self._file = os.fdopen(fd, fd_mode)
|
||||
|
||||
elif op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
# Attempt to upgrade to write lock w/a read-only file.
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
@@ -399,8 +292,7 @@ def _unlock(self):
|
||||
"""
|
||||
fcntl.lockf(self._file, fcntl.LOCK_UN,
|
||||
self._length, self._start, os.SEEK_SET)
|
||||
|
||||
file_tracker.release_fh(self.path)
|
||||
self._file.close()
|
||||
self._file = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
|
@@ -33,7 +33,7 @@
|
||||
|
||||
|
||||
# Use this to strip escape sequences
|
||||
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07')
|
||||
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h')
|
||||
|
||||
# control characters for enabling/disabling echo
|
||||
#
|
||||
@@ -323,7 +323,7 @@ def unwrap(self):
|
||||
if sys.version_info < (3,):
|
||||
self.file = open(self.file_like, 'w')
|
||||
else:
|
||||
self.file = open(self.file_like, 'w', encoding='utf-8') # novm
|
||||
self.file = open(self.file_like, 'w', encoding='utf-8')
|
||||
else:
|
||||
self.file = StringIO()
|
||||
return self.file
|
||||
|
@@ -4,7 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
#: major, minor, patch version for Spack, in a tuple
|
||||
spack_version_info = (0, 17, 0)
|
||||
spack_version_info = (0, 16, 2)
|
||||
|
||||
#: String containing Spack version joined with .'s
|
||||
spack_version = '.'.join(str(v) for v in spack_version_info)
|
||||
|
@@ -10,8 +10,6 @@
|
||||
|
||||
import os
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
@@ -45,7 +43,6 @@ def _read_environment_file(self, filename):
|
||||
to remove path prefixes specific to user systems.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
tty.warn("No environment file available")
|
||||
return
|
||||
|
||||
mods = EnvironmentModifications.from_sourcing_file(filename)
|
||||
|
612
lib/spack/spack/architecture.py
Normal file
612
lib/spack/spack/architecture.py
Normal file
@@ -0,0 +1,612 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""
|
||||
This module contains all the elements that are required to create an
|
||||
architecture object. These include, the target processor, the operating system,
|
||||
and the architecture platform (i.e. cray, darwin, linux, etc) classes.
|
||||
|
||||
On a multiple architecture machine, the architecture spec field can be set to
|
||||
build a package against any target and operating system that is present on the
|
||||
platform. On Cray platforms or any other architecture that has different front
|
||||
and back end environments, the operating system will determine the method of
|
||||
compiler
|
||||
detection.
|
||||
|
||||
There are two different types of compiler detection:
|
||||
1. Through the $PATH env variable (front-end detection)
|
||||
2. Through the tcl module system. (back-end detection)
|
||||
|
||||
Depending on which operating system is specified, the compiler will be detected
|
||||
using one of those methods.
|
||||
|
||||
For platforms such as linux and darwin, the operating system is autodetected
|
||||
and the target is set to be x86_64.
|
||||
|
||||
The command line syntax for specifying an architecture is as follows:
|
||||
|
||||
target=<Target name> os=<OperatingSystem name>
|
||||
|
||||
If the user wishes to use the defaults, either target or os can be left out of
|
||||
the command line and Spack will concretize using the default. These defaults
|
||||
are set in the 'platforms/' directory which contains the different subclasses
|
||||
for platforms. If the machine has multiple architectures, the user can
|
||||
also enter front-end, or fe or back-end or be. These settings will concretize
|
||||
to their respective front-end and back-end targets and operating systems.
|
||||
Additional platforms can be added by creating a subclass of Platform
|
||||
and adding it inside the platform directory.
|
||||
|
||||
Platforms are an abstract class that are extended by subclasses. If the user
|
||||
wants to add a new type of platform (such as cray_xe), they can create a
|
||||
subclass and set all the class attributes such as priority, front_target,
|
||||
back_target, front_os, back_os. Platforms also contain a priority class
|
||||
attribute. A lower number signifies higher priority. These numbers are
|
||||
arbitrarily set and can be changed though often there isn't much need unless a
|
||||
new platform is added and the user wants that to be detected first.
|
||||
|
||||
Targets are created inside the platform subclasses. Most architecture
|
||||
(like linux, and darwin) will have only one target (x86_64) but in the case of
|
||||
Cray machines, there is both a frontend and backend processor. The user can
|
||||
specify which targets are present on front-end and back-end architecture
|
||||
|
||||
Depending on the platform, operating systems are either auto-detected or are
|
||||
set. The user can set the front-end and back-end operating setting by the class
|
||||
attributes front_os and back_os. The operating system as described earlier,
|
||||
will be responsible for compiler detection.
|
||||
"""
|
||||
import contextlib
|
||||
import functools
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.lang as lang
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.error as serr
|
||||
import spack.paths
|
||||
import spack.util.classes
|
||||
import spack.util.executable
|
||||
import spack.version
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
|
||||
|
||||
class NoPlatformError(serr.SpackError):
|
||||
def __init__(self):
|
||||
super(NoPlatformError, self).__init__(
|
||||
"Could not determine a platform for this machine.")
|
||||
|
||||
|
||||
def _ensure_other_is_target(method):
|
||||
"""Decorator to be used in dunder methods taking a single argument to
|
||||
ensure that the argument is an instance of ``Target`` too.
|
||||
"""
|
||||
@functools.wraps(method)
|
||||
def _impl(self, other):
|
||||
if isinstance(other, six.string_types):
|
||||
other = Target(other)
|
||||
|
||||
if not isinstance(other, Target):
|
||||
return NotImplemented
|
||||
|
||||
return method(self, other)
|
||||
|
||||
return _impl
|
||||
|
||||
|
||||
class Target(object):
|
||||
def __init__(self, name, module_name=None):
|
||||
"""Target models microarchitectures and their compatibility.
|
||||
|
||||
Args:
|
||||
name (str or Microarchitecture):micro-architecture of the
|
||||
target
|
||||
module_name (str): optional module name to get access to the
|
||||
current target. This is typically used on machines
|
||||
like Cray (e.g. craype-compiler)
|
||||
"""
|
||||
if not isinstance(name, archspec.cpu.Microarchitecture):
|
||||
name = archspec.cpu.TARGETS.get(
|
||||
name, archspec.cpu.generic_microarchitecture(name)
|
||||
)
|
||||
self.microarchitecture = name
|
||||
self.module_name = module_name
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.microarchitecture.name
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __eq__(self, other):
|
||||
return self.microarchitecture == other.microarchitecture and \
|
||||
self.module_name == other.module_name
|
||||
|
||||
def __ne__(self, other):
|
||||
# This method is necessary as long as we support Python 2. In Python 3
|
||||
# __ne__ defaults to the implementation below
|
||||
return not self == other
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __lt__(self, other):
|
||||
# TODO: In the future it would be convenient to say
|
||||
# TODO: `spec.architecture.target < other.architecture.target`
|
||||
# TODO: and change the semantic of the comparison operators
|
||||
|
||||
# This is needed to sort deterministically specs in a list.
|
||||
# It doesn't implement a total ordering semantic.
|
||||
return self.microarchitecture.name < other.microarchitecture.name
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self.module_name))
|
||||
|
||||
@staticmethod
|
||||
def from_dict_or_value(dict_or_value):
|
||||
# A string here represents a generic target (like x86_64 or ppc64) or
|
||||
# a custom micro-architecture
|
||||
if isinstance(dict_or_value, six.string_types):
|
||||
return Target(dict_or_value)
|
||||
|
||||
# TODO: From a dict we actually retrieve much more information than
|
||||
# TODO: just the name. We can use that information to reconstruct an
|
||||
# TODO: "old" micro-architecture or check the current definition.
|
||||
target_info = dict_or_value
|
||||
return Target(target_info['name'])
|
||||
|
||||
def to_dict_or_value(self):
|
||||
"""Returns a dict or a value representing the current target.
|
||||
|
||||
String values are used to keep backward compatibility with generic
|
||||
targets, like e.g. x86_64 or ppc64. More specific micro-architectures
|
||||
will return a dictionary which contains information on the name,
|
||||
features, vendor, generation and parents of the current target.
|
||||
"""
|
||||
# Generic targets represent either an architecture
|
||||
# family (like x86_64) or a custom micro-architecture
|
||||
if self.microarchitecture.vendor == 'generic':
|
||||
return str(self)
|
||||
|
||||
return syaml_dict(
|
||||
self.microarchitecture.to_dict(return_list_of_items=True)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
cls_name = self.__class__.__name__
|
||||
fmt = cls_name + '({0}, {1})'
|
||||
return fmt.format(repr(self.microarchitecture),
|
||||
repr(self.module_name))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.microarchitecture)
|
||||
|
||||
def __contains__(self, cpu_flag):
|
||||
return cpu_flag in self.microarchitecture
|
||||
|
||||
def optimization_flags(self, compiler):
|
||||
"""Returns the flags needed to optimize for this target using
|
||||
the compiler passed as argument.
|
||||
|
||||
Args:
|
||||
compiler (spack.spec.CompilerSpec or spack.compiler.Compiler): object that
|
||||
contains both the name and the version of the compiler we want to use
|
||||
"""
|
||||
# Mixed toolchains are not supported yet
|
||||
import spack.compilers
|
||||
if isinstance(compiler, spack.compiler.Compiler):
|
||||
if spack.compilers.is_mixed_toolchain(compiler):
|
||||
msg = ('microarchitecture specific optimizations are not '
|
||||
'supported yet on mixed compiler toolchains [check'
|
||||
' {0.name}@{0.version} for further details]')
|
||||
warnings.warn(msg.format(compiler))
|
||||
return ''
|
||||
|
||||
# Try to check if the current compiler comes with a version number or
|
||||
# has an unexpected suffix. If so, treat it as a compiler with a
|
||||
# custom spec.
|
||||
compiler_version = compiler.version
|
||||
version_number, suffix = archspec.cpu.version_components(
|
||||
compiler.version
|
||||
)
|
||||
if not version_number or suffix not in ('', 'apple'):
|
||||
# Try to deduce the underlying version of the compiler, regardless
|
||||
# of its name in compilers.yaml. Depending on where this function
|
||||
# is called we might get either a CompilerSpec or a fully fledged
|
||||
# compiler object.
|
||||
import spack.spec
|
||||
if isinstance(compiler, spack.spec.CompilerSpec):
|
||||
compiler = spack.compilers.compilers_for_spec(compiler).pop()
|
||||
try:
|
||||
compiler_version = compiler.real_version
|
||||
except spack.util.executable.ProcessError as e:
|
||||
# log this and just return compiler.version instead
|
||||
tty.debug(str(e))
|
||||
|
||||
return self.microarchitecture.optimization_flags(
|
||||
compiler.name, str(compiler_version)
|
||||
)
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class Platform(object):
|
||||
""" Abstract class that each type of Platform will subclass.
|
||||
Will return a instance of it once it is returned.
|
||||
"""
|
||||
|
||||
# Subclass sets number. Controls detection order
|
||||
priority = None # type: int
|
||||
|
||||
#: binary formats used on this platform; used by relocation logic
|
||||
binary_formats = ['elf']
|
||||
|
||||
front_end = None # type: str
|
||||
back_end = None # type: str
|
||||
default = None # type: str # The default back end target.
|
||||
|
||||
front_os = None # type: str
|
||||
back_os = None # type: str
|
||||
default_os = None # type: str
|
||||
|
||||
reserved_targets = ['default_target', 'frontend', 'fe', 'backend', 'be']
|
||||
reserved_oss = ['default_os', 'frontend', 'fe', 'backend', 'be']
|
||||
|
||||
def __init__(self, name):
|
||||
self.targets = {}
|
||||
self.operating_sys = {}
|
||||
self.name = name
|
||||
|
||||
def add_target(self, name, target):
|
||||
"""Used by the platform specific subclass to list available targets.
|
||||
Raises an error if the platform specifies a name
|
||||
that is reserved by spack as an alias.
|
||||
"""
|
||||
if name in Platform.reserved_targets:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of a target" % name)
|
||||
self.targets[name] = target
|
||||
|
||||
def target(self, name):
|
||||
"""This is a getter method for the target dictionary
|
||||
that handles defaulting based on the values provided by default,
|
||||
front-end, and back-end. This can be overwritten
|
||||
by a subclass for which we want to provide further aliasing options.
|
||||
"""
|
||||
# TODO: Check if we can avoid using strings here
|
||||
name = str(name)
|
||||
if name == 'default_target':
|
||||
name = self.default
|
||||
elif name == 'frontend' or name == 'fe':
|
||||
name = self.front_end
|
||||
elif name == 'backend' or name == 'be':
|
||||
name = self.back_end
|
||||
|
||||
return self.targets.get(name, None)
|
||||
|
||||
def add_operating_system(self, name, os_class):
|
||||
""" Add the operating_system class object into the
|
||||
platform.operating_sys dictionary
|
||||
"""
|
||||
if name in Platform.reserved_oss:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of an OS" % name)
|
||||
self.operating_sys[name] = os_class
|
||||
|
||||
def operating_system(self, name):
|
||||
if name == 'default_os':
|
||||
name = self.default_os
|
||||
if name == 'frontend' or name == "fe":
|
||||
name = self.front_os
|
||||
if name == 'backend' or name == 'be':
|
||||
name = self.back_os
|
||||
|
||||
return self.operating_sys.get(name, None)
|
||||
|
||||
@classmethod
|
||||
def setup_platform_environment(cls, pkg, env):
|
||||
""" Subclass can override this method if it requires any
|
||||
platform-specific build environment modifications.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
""" Subclass is responsible for implementing this method.
|
||||
Returns True if the Platform class detects that
|
||||
it is the current platform
|
||||
and False if it's not.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.default
|
||||
yield self.front_end
|
||||
yield self.back_end
|
||||
yield self.default_os
|
||||
yield self.front_os
|
||||
yield self.back_os
|
||||
|
||||
def targets():
|
||||
for t in sorted(self.targets.values()):
|
||||
yield t._cmp_iter
|
||||
yield targets
|
||||
|
||||
def oses():
|
||||
for o in sorted(self.operating_sys.values()):
|
||||
yield o._cmp_iter
|
||||
yield oses
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class OperatingSystem(object):
|
||||
""" Operating System will be like a class similar to platform extended
|
||||
by subclasses for the specifics. Operating System will contain the
|
||||
compiler finding logic. Instead of calling two separate methods to
|
||||
find compilers we call find_compilers method for each operating system
|
||||
"""
|
||||
|
||||
def __init__(self, name, version):
|
||||
self.name = name.replace('-', '_')
|
||||
self.version = str(version).replace('-', '_')
|
||||
|
||||
def __str__(self):
|
||||
return "%s%s" % (self.name, self.version)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.version
|
||||
|
||||
def to_dict(self):
|
||||
return syaml_dict([
|
||||
('name', self.name),
|
||||
('version', self.version)
|
||||
])
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class Arch(object):
|
||||
"""Architecture is now a class to help with setting attributes.
|
||||
|
||||
TODO: refactor so that we don't need this class.
|
||||
"""
|
||||
|
||||
def __init__(self, plat=None, os=None, target=None):
|
||||
self.platform = plat
|
||||
if plat and os:
|
||||
os = self.platform.operating_system(os)
|
||||
self.os = os
|
||||
if plat and target:
|
||||
target = self.platform.target(target)
|
||||
self.target = target
|
||||
|
||||
# Hooks for parser to use when platform is set after target or os
|
||||
self.target_string = None
|
||||
self.os_string = None
|
||||
|
||||
@property
|
||||
def concrete(self):
|
||||
return all((self.platform is not None,
|
||||
isinstance(self.platform, Platform),
|
||||
self.os is not None,
|
||||
isinstance(self.os, OperatingSystem),
|
||||
self.target is not None, isinstance(self.target, Target)))
|
||||
|
||||
def __str__(self):
|
||||
if self.platform or self.os or self.target:
|
||||
if self.platform.name == 'darwin':
|
||||
os_name = self.os.name if self.os else "None"
|
||||
else:
|
||||
os_name = str(self.os)
|
||||
|
||||
return (str(self.platform) + "-" +
|
||||
os_name + "-" + str(self.target))
|
||||
else:
|
||||
return ''
|
||||
|
||||
def __contains__(self, string):
|
||||
return string in str(self)
|
||||
|
||||
# TODO: make this unnecessary: don't include an empty arch on *every* spec.
|
||||
def __nonzero__(self):
|
||||
return (self.platform is not None or
|
||||
self.os is not None or
|
||||
self.target is not None)
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _cmp_iter(self):
|
||||
if isinstance(self.platform, Platform):
|
||||
yield self.platform.name
|
||||
else:
|
||||
yield self.platform
|
||||
|
||||
if isinstance(self.os, OperatingSystem):
|
||||
yield self.os.name
|
||||
else:
|
||||
yield self.os
|
||||
|
||||
if isinstance(self.target, Target):
|
||||
yield self.target.microarchitecture
|
||||
else:
|
||||
yield self.target
|
||||
|
||||
def to_dict(self):
|
||||
str_or_none = lambda v: str(v) if v else None
|
||||
d = syaml_dict([
|
||||
('platform', str_or_none(self.platform)),
|
||||
('platform_os', str_or_none(self.os)),
|
||||
('target', self.target.to_dict_or_value())])
|
||||
return syaml_dict([('arch', d)])
|
||||
|
||||
def to_spec(self):
|
||||
"""Convert this Arch to an anonymous Spec with architecture defined."""
|
||||
spec = spack.spec.Spec()
|
||||
spec.architecture = spack.spec.ArchSpec(str(self))
|
||||
return spec
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d):
|
||||
spec = spack.spec.ArchSpec.from_dict(d)
|
||||
return arch_for_spec(spec)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def get_platform(platform_name):
|
||||
"""Returns a platform object that corresponds to the given name."""
|
||||
platform_list = all_platforms()
|
||||
for p in platform_list:
|
||||
if platform_name.replace("_", "").lower() == p.__name__.lower():
|
||||
return p()
|
||||
|
||||
|
||||
def verify_platform(platform_name):
|
||||
""" Determines whether or not the platform with the given name is supported
|
||||
in Spack. For more information, see the 'spack.platforms' submodule.
|
||||
"""
|
||||
platform_name = platform_name.replace("_", "").lower()
|
||||
platform_names = [p.__name__.lower() for p in all_platforms()]
|
||||
|
||||
if platform_name not in platform_names:
|
||||
tty.die("%s is not a supported platform; supported platforms are %s" %
|
||||
(platform_name, platform_names))
|
||||
|
||||
|
||||
def arch_for_spec(arch_spec):
|
||||
"""Transforms the given architecture spec into an architecture object."""
|
||||
arch_spec = spack.spec.ArchSpec(arch_spec)
|
||||
assert arch_spec.concrete
|
||||
|
||||
arch_plat = get_platform(arch_spec.platform)
|
||||
if not (arch_plat.operating_system(arch_spec.os) and
|
||||
arch_plat.target(arch_spec.target)):
|
||||
raise ValueError(
|
||||
"Can't recreate arch for spec %s on current arch %s; "
|
||||
"spec architecture is too different" % (arch_spec, sys_type()))
|
||||
|
||||
return Arch(arch_plat, arch_spec.os, arch_spec.target)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def _all_platforms():
|
||||
mod_path = spack.paths.platform_path
|
||||
return spack.util.classes.list_classes("spack.platforms", mod_path)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def _platform():
|
||||
"""Detects the platform for this machine.
|
||||
|
||||
Gather a list of all available subclasses of platforms.
|
||||
Sorts the list according to their priority looking. Priority is
|
||||
an arbitrarily set number. Detects platform either using uname or
|
||||
a file path (/opt/cray...)
|
||||
"""
|
||||
# Try to create a Platform object using the config file FIRST
|
||||
platform_list = _all_platforms()
|
||||
platform_list.sort(key=lambda a: a.priority)
|
||||
|
||||
for platform_cls in platform_list:
|
||||
if platform_cls.detect():
|
||||
return platform_cls()
|
||||
|
||||
|
||||
#: The "real" platform of the host running Spack. This should not be changed
|
||||
#: by any method and is here as a convenient way to refer to the host platform.
|
||||
real_platform = _platform
|
||||
|
||||
#: The current platform used by Spack. May be swapped by the use_platform
|
||||
#: context manager.
|
||||
platform = _platform
|
||||
|
||||
#: The list of all platform classes. May be swapped by the use_platform
|
||||
#: context manager.
|
||||
all_platforms = _all_platforms
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def default_arch():
|
||||
"""Default ``Arch`` object for this machine.
|
||||
|
||||
See ``sys_type()``.
|
||||
"""
|
||||
return Arch(platform(), 'default_os', 'default_target')
|
||||
|
||||
|
||||
def sys_type():
|
||||
"""Print out the "default" platform-os-target tuple for this machine.
|
||||
|
||||
On machines with only one target OS/target, prints out the
|
||||
platform-os-target for the frontend. For machines with a frontend
|
||||
and a backend, prints the default backend.
|
||||
|
||||
TODO: replace with use of more explicit methods to get *all* the
|
||||
backends, as client code should really be aware of cross-compiled
|
||||
architectures.
|
||||
|
||||
"""
|
||||
return str(default_arch())
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def compatible_sys_types():
|
||||
"""Returns a list of all the systypes compatible with the current host."""
|
||||
compatible_archs = []
|
||||
current_host = archspec.cpu.host()
|
||||
compatible_targets = [current_host] + current_host.ancestors
|
||||
for target in compatible_targets:
|
||||
arch = Arch(platform(), 'default_os', target)
|
||||
compatible_archs.append(str(arch))
|
||||
return compatible_archs
|
||||
|
||||
|
||||
class _PickleableCallable(object):
|
||||
"""Class used to pickle a callable that may substitute either
|
||||
_platform or _all_platforms. Lambda or nested functions are
|
||||
not pickleable.
|
||||
"""
|
||||
def __init__(self, return_value):
|
||||
self.return_value = return_value
|
||||
|
||||
def __call__(self):
|
||||
return self.return_value
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def use_platform(new_platform):
|
||||
global platform, all_platforms
|
||||
|
||||
msg = '"{0}" must be an instance of Platform'
|
||||
assert isinstance(new_platform, Platform), msg.format(new_platform)
|
||||
|
||||
original_platform_fn, original_all_platforms_fn = platform, all_platforms
|
||||
|
||||
try:
|
||||
platform = _PickleableCallable(new_platform)
|
||||
all_platforms = _PickleableCallable([type(new_platform)])
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
||||
|
||||
yield new_platform
|
||||
|
||||
finally:
|
||||
platform, all_platforms = original_platform_fn, original_all_platforms_fn
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
@@ -37,16 +37,12 @@ def _search_duplicate_compilers(error_cls):
|
||||
"""
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from six.moves.urllib.request import urlopen
|
||||
|
||||
try:
|
||||
from collections.abc import Sequence # novm
|
||||
except ImportError:
|
||||
from collections import Sequence
|
||||
|
||||
|
||||
#: Map an audit tag to a list of callables implementing checks
|
||||
CALLBACKS = {}
|
||||
|
||||
@@ -265,45 +261,6 @@ def _search_duplicate_specs_in_externals(error_cls):
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
#: Sanity checks on linting
|
||||
# This can take some time, so it's run separately from packages
|
||||
package_https_directives = AuditClass(
|
||||
group='packages-https',
|
||||
tag='PKG-HTTPS-DIRECTIVES',
|
||||
description='Sanity checks on https checks of package urls, etc.',
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
|
||||
@package_https_directives
|
||||
def _linting_package_file(pkgs, error_cls):
|
||||
"""Check for correctness of links
|
||||
"""
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
|
||||
# Does the homepage have http, and if so, does https work?
|
||||
if pkg.homepage.startswith('http://'):
|
||||
https = re.sub("http", "https", pkg.homepage, 1)
|
||||
try:
|
||||
response = urlopen(https)
|
||||
except Exception as e:
|
||||
msg = 'Error with attempting https for "{0}": '
|
||||
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
|
||||
continue
|
||||
|
||||
if response.getcode() == 200:
|
||||
msg = 'Package "{0}" uses http but has a valid https endpoint.'
|
||||
errors.append(msg.format(pkg.name))
|
||||
|
||||
return llnl.util.lang.dedupe(errors)
|
||||
|
||||
|
||||
@package_directives
|
||||
def _unknown_variants_in_directives(pkgs, error_cls):
|
||||
|
@@ -4,9 +4,11 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import codecs
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tarfile
|
||||
@@ -27,9 +29,7 @@
|
||||
import spack.database as spack_db
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks.sbang
|
||||
import spack.mirror
|
||||
import spack.platforms
|
||||
import spack.relocate as relocate
|
||||
import spack.util.file_cache as file_cache
|
||||
import spack.util.gpg
|
||||
@@ -45,25 +45,6 @@
|
||||
_build_cache_keys_relative_path = '_pgp'
|
||||
|
||||
|
||||
class FetchCacheError(Exception):
|
||||
"""Error thrown when fetching the cache failed, usually a composite error list."""
|
||||
def __init__(self, errors):
|
||||
if not isinstance(errors, list):
|
||||
raise TypeError("Expected a list of errors")
|
||||
self.errors = errors
|
||||
if len(errors) > 1:
|
||||
msg = " Error {0}: {1}: {2}"
|
||||
self.message = "Multiple errors during fetching:\n"
|
||||
self.message += "\n".join((
|
||||
msg.format(i + 1, err.__class__.__name__, str(err))
|
||||
for (i, err) in enumerate(errors)
|
||||
))
|
||||
else:
|
||||
err = errors[0]
|
||||
self.message = "{0}: {1}".format(err.__class__.__name__, str(err))
|
||||
super(FetchCacheError, self).__init__(self.message)
|
||||
|
||||
|
||||
class BinaryCacheIndex(object):
|
||||
"""
|
||||
The BinaryCacheIndex tracks what specs are available on (usually remote)
|
||||
@@ -241,16 +222,11 @@ def find_built_spec(self, spec):
|
||||
]
|
||||
"""
|
||||
self.regenerate_spec_cache()
|
||||
return self.find_by_hash(spec.dag_hash())
|
||||
|
||||
def find_by_hash(self, find_hash):
|
||||
"""Same as find_built_spec but uses the hash of a spec.
|
||||
|
||||
Args:
|
||||
find_hash (str): hash of the spec to search
|
||||
"""
|
||||
find_hash = spec.dag_hash()
|
||||
if find_hash not in self._mirrors_for_spec:
|
||||
return None
|
||||
|
||||
return self._mirrors_for_spec[find_hash]
|
||||
|
||||
def update_spec(self, spec, found_list):
|
||||
@@ -315,22 +291,14 @@ def update(self):
|
||||
# Otherwise the concrete spec cache should not need to be updated at
|
||||
# all.
|
||||
|
||||
fetch_errors = []
|
||||
all_methods_failed = True
|
||||
|
||||
for cached_mirror_url in self._local_index_cache:
|
||||
cache_entry = self._local_index_cache[cached_mirror_url]
|
||||
cached_index_hash = cache_entry['index_hash']
|
||||
cached_index_path = cache_entry['index_path']
|
||||
if cached_mirror_url in configured_mirror_urls:
|
||||
# May need to fetch the index and update the local caches
|
||||
try:
|
||||
needs_regen = self._fetch_and_cache_index(
|
||||
cached_mirror_url, expect_hash=cached_index_hash)
|
||||
all_methods_failed = False
|
||||
except FetchCacheError as fetch_error:
|
||||
needs_regen = False
|
||||
fetch_errors.extend(fetch_error.errors)
|
||||
needs_regen = self._fetch_and_cache_index(
|
||||
cached_mirror_url, expect_hash=cached_index_hash)
|
||||
# The need to regenerate implies a need to clear as well.
|
||||
spec_cache_clear_needed |= needs_regen
|
||||
spec_cache_regenerate_needed |= needs_regen
|
||||
@@ -357,12 +325,7 @@ def update(self):
|
||||
for mirror_url in configured_mirror_urls:
|
||||
if mirror_url not in self._local_index_cache:
|
||||
# Need to fetch the index and update the local caches
|
||||
try:
|
||||
needs_regen = self._fetch_and_cache_index(mirror_url)
|
||||
all_methods_failed = False
|
||||
except FetchCacheError as fetch_error:
|
||||
fetch_errors.extend(fetch_error.errors)
|
||||
needs_regen = False
|
||||
needs_regen = self._fetch_and_cache_index(mirror_url)
|
||||
# Generally speaking, a new mirror wouldn't imply the need to
|
||||
# clear the spec cache, so leave it as is.
|
||||
if needs_regen:
|
||||
@@ -370,9 +333,7 @@ def update(self):
|
||||
|
||||
self._write_local_index_cache()
|
||||
|
||||
if all_methods_failed:
|
||||
raise FetchCacheError(fetch_errors)
|
||||
elif spec_cache_regenerate_needed:
|
||||
if spec_cache_regenerate_needed:
|
||||
self.regenerate_spec_cache(clear_existing=spec_cache_clear_needed)
|
||||
|
||||
def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
@@ -391,8 +352,6 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
True if this function thinks the concrete spec cache,
|
||||
``_mirrors_for_spec``, should be regenerated. Returns False
|
||||
otherwise.
|
||||
Throws:
|
||||
FetchCacheError: a composite exception.
|
||||
"""
|
||||
index_fetch_url = url_util.join(
|
||||
mirror_url, _build_cache_relative_path, 'index.json')
|
||||
@@ -402,19 +361,14 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
old_cache_key = None
|
||||
fetched_hash = None
|
||||
|
||||
errors = []
|
||||
|
||||
# Fetch the hash first so we can check if we actually need to fetch
|
||||
# the index itself.
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(hash_fetch_url)
|
||||
fetched_hash = codecs.getreader('utf-8')(fs).read()
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
errors.append(
|
||||
RuntimeError("Unable to read index hash {0} due to {1}: {2}".format(
|
||||
hash_fetch_url, url_err.__class__.__name__, str(url_err)
|
||||
))
|
||||
)
|
||||
tty.debug('Unable to read index hash {0}'.format(
|
||||
hash_fetch_url), url_err, 1)
|
||||
|
||||
# The only case where we'll skip attempting to fetch the buildcache
|
||||
# index from the mirror is when we already have a hash for this
|
||||
@@ -441,23 +395,24 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
_, _, fs = web_util.read_from_url(index_fetch_url)
|
||||
index_object_str = codecs.getreader('utf-8')(fs).read()
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
errors.append(
|
||||
RuntimeError("Unable to read index {0} due to {1}: {2}".format(
|
||||
index_fetch_url, url_err.__class__.__name__, str(url_err)
|
||||
))
|
||||
)
|
||||
raise FetchCacheError(errors)
|
||||
tty.debug('Unable to read index {0}'.format(index_fetch_url),
|
||||
url_err, 1)
|
||||
# We failed to fetch the index, even though we decided it was
|
||||
# necessary. However, regenerating the spec cache won't produce
|
||||
# anything different than what it has already, so return False.
|
||||
return False
|
||||
|
||||
locally_computed_hash = compute_hash(index_object_str)
|
||||
|
||||
if fetched_hash is not None and locally_computed_hash != fetched_hash:
|
||||
msg = ('Computed hash ({0}) did not match remote ({1}), '
|
||||
'indicating error in index transmission').format(
|
||||
locally_computed_hash, expect_hash)
|
||||
errors.append(RuntimeError(msg))
|
||||
msg_tmpl = ('Computed hash ({0}) did not match remote ({1}), '
|
||||
'indicating error in index transmission')
|
||||
tty.error(msg_tmpl.format(locally_computed_hash, expect_hash))
|
||||
# We somehow got an index that doesn't match the remote one, maybe
|
||||
# the next time we try we'll be successful.
|
||||
raise FetchCacheError(errors)
|
||||
# the next time we try we'll be successful. Regardless, we're not
|
||||
# updating our index cache with this, so don't regenerate the spec
|
||||
# cache either.
|
||||
return False
|
||||
|
||||
url_hash = compute_hash(mirror_url)
|
||||
|
||||
@@ -613,16 +568,6 @@ def get_buildfile_manifest(spec):
|
||||
# Used by make_package_relative to determine binaries to change.
|
||||
for root, dirs, files in os.walk(spec.prefix, topdown=True):
|
||||
dirs[:] = [d for d in dirs if d not in blacklist]
|
||||
|
||||
# Directories may need to be relocated too.
|
||||
for directory in dirs:
|
||||
dir_path_name = os.path.join(root, directory)
|
||||
rel_path_name = os.path.relpath(dir_path_name, spec.prefix)
|
||||
if os.path.islink(dir_path_name):
|
||||
link = os.readlink(dir_path_name)
|
||||
if os.path.isabs(link) and link.startswith(spack.store.layout.root):
|
||||
data['link_to_relocate'].append(rel_path_name)
|
||||
|
||||
for filename in files:
|
||||
path_name = os.path.join(root, filename)
|
||||
m_type, m_subtype = relocate.mime_type(path_name)
|
||||
@@ -670,8 +615,9 @@ def write_buildinfo_file(spec, workdir, rel=False):
|
||||
prefix_to_hash[str(d.prefix)] = d.dag_hash()
|
||||
|
||||
# Create buildinfo data and write it to disk
|
||||
import spack.hooks.sbang as sbang
|
||||
buildinfo = {}
|
||||
buildinfo['sbang_install_path'] = spack.hooks.sbang.sbang_install_path()
|
||||
buildinfo['sbang_install_path'] = sbang.sbang_install_path()
|
||||
buildinfo['relative_rpaths'] = rel
|
||||
buildinfo['buildpath'] = spack.store.layout.root
|
||||
buildinfo['spackprefix'] = spack.paths.prefix
|
||||
@@ -762,14 +708,14 @@ def generate_package_index(cache_prefix):
|
||||
"""Create the build cache index page.
|
||||
|
||||
Creates (or replaces) the "index.json" page at the location given in
|
||||
cache_prefix. This page contains a link for each binary package (.yaml or
|
||||
.json) under cache_prefix.
|
||||
cache_prefix. This page contains a link for each binary package (.yaml)
|
||||
under cache_prefix.
|
||||
"""
|
||||
try:
|
||||
file_list = (
|
||||
entry
|
||||
for entry in web_util.list_url(cache_prefix)
|
||||
if entry.endswith('.yaml') or entry.endswith('spec.json'))
|
||||
if entry.endswith('.yaml'))
|
||||
except KeyError as inst:
|
||||
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
|
||||
tty.warn(msg)
|
||||
@@ -783,33 +729,28 @@ def generate_package_index(cache_prefix):
|
||||
tty.warn(msg)
|
||||
return
|
||||
|
||||
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
|
||||
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
|
||||
cache_prefix))
|
||||
|
||||
all_mirror_specs = {}
|
||||
|
||||
for file_path in file_list:
|
||||
try:
|
||||
spec_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(spec_url))
|
||||
_, _, spec_file = web_util.read_from_url(spec_url)
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
# Need full spec.json name or this gets confused with index.json.
|
||||
if spec_url.endswith('.json'):
|
||||
spec_dict = sjson.load(spec_file_contents)
|
||||
s = Spec.from_json(spec_file_contents)
|
||||
elif spec_url.endswith('.yaml'):
|
||||
spec_dict = syaml.load(spec_file_contents)
|
||||
s = Spec.from_yaml(spec_file_contents)
|
||||
yaml_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(yaml_url))
|
||||
_, _, yaml_file = web_util.read_from_url(yaml_url)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
spec_dict = syaml.load(yaml_contents)
|
||||
s = Spec.from_yaml(yaml_contents)
|
||||
all_mirror_specs[s.dag_hash()] = {
|
||||
'spec_url': spec_url,
|
||||
'yaml_url': yaml_url,
|
||||
'spec': s,
|
||||
'num_deps': len(list(s.traverse(root=False))),
|
||||
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
|
||||
'buildinfo': spec_dict['buildinfo'],
|
||||
}
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Error reading specfile: {0}'.format(file_path))
|
||||
tty.error('Error reading spec.yaml: {0}'.format(file_path))
|
||||
tty.error(url_err)
|
||||
|
||||
sorted_specs = sorted(all_mirror_specs.keys(),
|
||||
@@ -835,7 +776,7 @@ def generate_package_index(cache_prefix):
|
||||
# full hash. If the full hash we have for any deps does not
|
||||
# match what those deps have themselves, then we need to splice
|
||||
# this spec with those deps, and push this spliced spec
|
||||
# (spec.json file) back to the mirror, as well as update the
|
||||
# (spec.yaml file) back to the mirror, as well as update the
|
||||
# all_mirror_specs dictionary with this spliced spec.
|
||||
to_splice = []
|
||||
for dep in s.dependencies():
|
||||
@@ -853,25 +794,25 @@ def generate_package_index(cache_prefix):
|
||||
s = s.splice(true_dep, True)
|
||||
|
||||
# Push this spliced spec back to the mirror
|
||||
spliced_spec_dict = s.to_dict(hash=ht.full_hash)
|
||||
spliced_yaml = s.to_dict(hash=ht.full_hash)
|
||||
for key in ['binary_cache_checksum', 'buildinfo']:
|
||||
spliced_spec_dict[key] = spec_record[key]
|
||||
spliced_yaml[key] = spec_record[key]
|
||||
|
||||
temp_json_path = os.path.join(tmpdir, 'spliced.spec.json')
|
||||
with open(temp_json_path, 'w') as fd:
|
||||
fd.write(sjson.dump(spliced_spec_dict))
|
||||
temp_yaml_path = os.path.join(tmpdir, 'spliced.spec.yaml')
|
||||
with open(temp_yaml_path, 'w') as fd:
|
||||
fd.write(syaml.dump(spliced_yaml))
|
||||
|
||||
spliced_spec_url = spec_record['spec_url']
|
||||
spliced_yaml_url = spec_record['yaml_url']
|
||||
web_util.push_to_url(
|
||||
temp_json_path, spliced_spec_url, keep_original=False)
|
||||
temp_yaml_path, spliced_yaml_url, keep_original=False)
|
||||
tty.debug(' spliced and wrote {0}'.format(
|
||||
spliced_spec_url))
|
||||
spliced_yaml_url))
|
||||
spec_record['spec'] = s
|
||||
|
||||
db.add(s, None)
|
||||
db.mark(s, 'in_buildcache', True)
|
||||
|
||||
# Now that we have fixed any old specfiles that might have had the wrong
|
||||
# Now that we have fixed any old spec yamls that might have had the wrong
|
||||
# full hash for their dependencies, we can generate the index, compute
|
||||
# the hash, and push those files to the mirror.
|
||||
index_json_path = os.path.join(db_root_dir, 'index.json')
|
||||
@@ -1007,27 +948,19 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# need to copy the spec file so the build cache can be downloaded
|
||||
# without concretizing with the current spack packages
|
||||
# and preferences
|
||||
|
||||
spec_file = spack.store.layout.spec_file_path(spec)
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
|
||||
deprecated_specfile_path = specfile_path.replace('.spec.json', '.spec.yaml')
|
||||
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.realpath(
|
||||
os.path.join(cache_prefix, specfile_name))
|
||||
|
||||
remote_specfile_path = url_util.join(
|
||||
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
|
||||
remote_specfile_path_deprecated = url_util.join(
|
||||
outdir, os.path.relpath(deprecated_specfile_path,
|
||||
os.path.realpath(tmpdir)))
|
||||
|
||||
# If force and exists, overwrite. Otherwise raise exception on collision.
|
||||
if force:
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
if force:
|
||||
web_util.remove_url(remote_specfile_path)
|
||||
if web_util.url_exists(remote_specfile_path_deprecated):
|
||||
web_util.remove_url(remote_specfile_path_deprecated)
|
||||
elif (web_util.url_exists(remote_specfile_path) or
|
||||
web_util.url_exists(remote_specfile_path_deprecated)):
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
else:
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
|
||||
# make a copy of the install directory to work with
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
@@ -1075,23 +1008,15 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# get the sha256 checksum of the tarball
|
||||
checksum = checksum_tarball(tarfile_path)
|
||||
|
||||
# add sha256 checksum to spec.json
|
||||
|
||||
# add sha256 checksum to spec.yaml
|
||||
with open(spec_file, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
if spec_file.endswith('.yaml'):
|
||||
spec_dict = yaml.load(content)
|
||||
elif spec_file.endswith('.json'):
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
raise ValueError(
|
||||
'{0} not a valid spec file type (json or yaml)'.format(
|
||||
spec_file))
|
||||
spec_dict = yaml.load(content)
|
||||
bchecksum = {}
|
||||
bchecksum['hash_algorithm'] = 'sha256'
|
||||
bchecksum['hash'] = checksum
|
||||
spec_dict['binary_cache_checksum'] = bchecksum
|
||||
# Add original install prefix relative to layout root to spec.json.
|
||||
# Add original install prefix relative to layout root to spec.yaml.
|
||||
# This will be used to determine is the directory layout has changed.
|
||||
buildinfo = {}
|
||||
buildinfo['relative_prefix'] = os.path.relpath(
|
||||
@@ -1100,7 +1025,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
spec_dict['buildinfo'] = buildinfo
|
||||
|
||||
with open(specfile_path, 'w') as outfile:
|
||||
outfile.write(sjson.dump(spec_dict))
|
||||
outfile.write(syaml.dump(spec_dict))
|
||||
|
||||
# sign the tarball and spec file with gpg
|
||||
if not unsigned:
|
||||
@@ -1210,7 +1135,7 @@ def make_package_relative(workdir, spec, allow_root):
|
||||
orig_path_names.append(os.path.join(prefix, filename))
|
||||
cur_path_names.append(os.path.join(workdir, filename))
|
||||
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.make_macho_binaries_relative(
|
||||
cur_path_names, orig_path_names, old_layout_root)
|
||||
@@ -1244,6 +1169,8 @@ def relocate_package(spec, allow_root):
|
||||
"""
|
||||
Relocate the given package
|
||||
"""
|
||||
import spack.hooks.sbang as sbang
|
||||
|
||||
workdir = str(spec.prefix)
|
||||
buildinfo = read_buildinfo_file(workdir)
|
||||
new_layout_root = str(spack.store.layout.root)
|
||||
@@ -1282,8 +1209,7 @@ def relocate_package(spec, allow_root):
|
||||
prefix_to_prefix_bin = OrderedDict({})
|
||||
|
||||
if old_sbang_install_path:
|
||||
install_path = spack.hooks.sbang.sbang_install_path()
|
||||
prefix_to_prefix_text[old_sbang_install_path] = install_path
|
||||
prefix_to_prefix_text[old_sbang_install_path] = sbang.sbang_install_path()
|
||||
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
prefix_to_prefix_bin[old_prefix] = new_prefix
|
||||
@@ -1297,7 +1223,7 @@ def relocate_package(spec, allow_root):
|
||||
# now a POSIX script that lives in the install prefix. Old packages
|
||||
# will have the old sbang location in their shebangs.
|
||||
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(old_spack_prefix)
|
||||
new_sbang = spack.hooks.sbang.sbang_shebang_line()
|
||||
new_sbang = sbang.sbang_shebang_line()
|
||||
prefix_to_prefix_text[orig_sbang] = new_sbang
|
||||
|
||||
tty.debug("Relocating package from",
|
||||
@@ -1321,7 +1247,7 @@ def is_backup_file(file):
|
||||
]
|
||||
# If the buildcache was not created with relativized rpaths
|
||||
# do the relocation of path in binaries
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.relocate_macho_binaries(files_to_relocate,
|
||||
old_layout_root,
|
||||
@@ -1380,26 +1306,15 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spackfile_path = os.path.join(stagepath, spackfile_name)
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
specfile_is_json = True
|
||||
deprecated_yaml_name = tarball_name(spec, '.spec.yaml')
|
||||
deprecated_yaml_path = os.path.join(tmpdir, deprecated_yaml_name)
|
||||
json_name = tarball_name(spec, '.spec.json')
|
||||
json_path = os.path.join(tmpdir, json_name)
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(tmpdir, specfile_name)
|
||||
|
||||
with closing(tarfile.open(spackfile_path, 'r')) as tar:
|
||||
tar.extractall(tmpdir)
|
||||
# some buildcache tarfiles use bzip2 compression
|
||||
if not os.path.exists(tarfile_path):
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
|
||||
if os.path.exists(json_path):
|
||||
specfile_path = json_path
|
||||
elif os.path.exists(deprecated_yaml_path):
|
||||
specfile_is_json = False
|
||||
specfile_path = deprecated_yaml_path
|
||||
else:
|
||||
raise ValueError('Cannot find spec file for {0}.'.format(tmpdir))
|
||||
|
||||
if not unsigned:
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
try:
|
||||
@@ -1422,10 +1337,7 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spec_dict = {}
|
||||
with open(specfile_path, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
if specfile_is_json:
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
spec_dict = syaml.load(content)
|
||||
spec_dict = syaml.load(content)
|
||||
bchecksum = spec_dict['binary_cache_checksum']
|
||||
|
||||
# if the checksums don't match don't install
|
||||
@@ -1441,30 +1353,42 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
buildinfo = spec_dict.get('buildinfo', {})
|
||||
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
|
||||
rel = buildinfo.get('relative_rpaths')
|
||||
# if the original relative prefix and new relative prefix differ the
|
||||
# directory layout has changed and the buildcache cannot be installed
|
||||
# if it was created with relative rpaths
|
||||
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
|
||||
tty.debug(info %
|
||||
(old_relative_prefix, new_relative_prefix, rel))
|
||||
# if (old_relative_prefix != new_relative_prefix and (rel)):
|
||||
# shutil.rmtree(tmpdir)
|
||||
# msg = "Package tarball was created from an install "
|
||||
# msg += "prefix with a different directory layout. "
|
||||
# msg += "It cannot be relocated because it "
|
||||
# msg += "uses relative rpaths."
|
||||
# raise NewLayoutException(msg)
|
||||
|
||||
# Extract the tarball into the store root, presumably on the same filesystem.
|
||||
# The directory created is the base directory name of the old prefix.
|
||||
# Moving the old prefix name to the new prefix location should preserve
|
||||
# hard links and symbolic links.
|
||||
extract_tmp = os.path.join(spack.store.layout.root, '.tmp')
|
||||
mkdirp(extract_tmp)
|
||||
extracted_dir = os.path.join(extract_tmp,
|
||||
old_relative_prefix.split(os.path.sep)[-1])
|
||||
|
||||
# extract the tarball in a temp directory
|
||||
with closing(tarfile.open(tarfile_path, 'r')) as tar:
|
||||
try:
|
||||
tar.extractall(path=extract_tmp)
|
||||
except Exception as e:
|
||||
shutil.rmtree(extracted_dir)
|
||||
raise e
|
||||
try:
|
||||
shutil.move(extracted_dir, spec.prefix)
|
||||
except Exception as e:
|
||||
shutil.rmtree(extracted_dir)
|
||||
raise e
|
||||
tar.extractall(path=tmpdir)
|
||||
# get the parent directory of the file .spack/binary_distribution
|
||||
# this should the directory unpacked from the tarball whose
|
||||
# name is unknown because the prefix naming is unknown
|
||||
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
|
||||
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
|
||||
tty.debug('workdir %s' % workdir)
|
||||
# install_tree copies hardlinks
|
||||
# create a temporary tarfile from prefix and exract it to workdir
|
||||
# tarfile preserves hardlinks
|
||||
temp_tarfile_name = tarball_name(spec, '.tar')
|
||||
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
|
||||
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % workdir,
|
||||
arcname='.')
|
||||
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
|
||||
tar.extractall(spec.prefix)
|
||||
os.remove(temp_tarfile_path)
|
||||
|
||||
# cleanup
|
||||
os.remove(tarfile_path)
|
||||
os.remove(specfile_path)
|
||||
|
||||
@@ -1490,39 +1414,27 @@ def try_direct_fetch(spec, full_hash_match=False, mirrors=None):
|
||||
"""
|
||||
Try to find the spec directly on the configured mirrors
|
||||
"""
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
lenient = not full_hash_match
|
||||
found_specs = []
|
||||
spec_full_hash = spec.full_hash()
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors=mirrors).values():
|
||||
buildcache_fetch_url_yaml = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, deprecated_specfile_name)
|
||||
buildcache_fetch_url_json = url_util.join(
|
||||
buildcache_fetch_url = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, specfile_name)
|
||||
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url)
|
||||
fetched_spec_yaml = codecs.getreader('utf-8')(fs).read()
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err:
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_yaml)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err_y:
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_json), url_err)
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_yaml), url_err_y)
|
||||
continue
|
||||
specfile_contents = codecs.getreader('utf-8')(fs).read()
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url), url_err)
|
||||
continue
|
||||
|
||||
# read the spec from the build cache file. All specs in build caches
|
||||
# are concrete (as they are built) so we need to mark this spec
|
||||
# concrete on read-in.
|
||||
if specfile_is_json:
|
||||
fetched_spec = Spec.from_json(specfile_contents)
|
||||
else:
|
||||
fetched_spec = Spec.from_yaml(specfile_contents)
|
||||
fetched_spec = Spec.from_yaml(fetched_spec_yaml)
|
||||
fetched_spec._mark_concrete()
|
||||
|
||||
# Do not recompute the full hash for the fetched spec, instead just
|
||||
@@ -1550,7 +1462,7 @@ def get_mirrors_for_spec(spec=None, full_hash_match=False,
|
||||
is included in the results.
|
||||
mirrors_to_check (dict): Optionally override the configured mirrors
|
||||
with the mirrors in this dictionary.
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.json``
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.yaml``
|
||||
files from remote mirrors, only consider the indices.
|
||||
|
||||
Return:
|
||||
@@ -1600,9 +1512,6 @@ def update_cache_and_get_specs():
|
||||
possible, so this method will also attempt to initialize and update the
|
||||
local index cache (essentially a no-op if it has been done already and
|
||||
nothing has changed on the configured mirrors.)
|
||||
|
||||
Throws:
|
||||
FetchCacheError
|
||||
"""
|
||||
binary_index.update()
|
||||
return binary_index.get_all_built_specs()
|
||||
@@ -1750,91 +1659,57 @@ def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
|
||||
tty.debug(spec.tree())
|
||||
|
||||
# Try to retrieve the specfile directly, based on the known
|
||||
# Try to retrieve the .spec.yaml directly, based on the known
|
||||
# format of the name, in order to determine if the package
|
||||
# needs to be rebuilt.
|
||||
cache_prefix = build_cache_prefix(mirror_url)
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(cache_prefix, specfile_name)
|
||||
deprecated_specfile_path = os.path.join(cache_prefix,
|
||||
deprecated_specfile_name)
|
||||
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
|
||||
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
|
||||
|
||||
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
|
||||
spec.short_spec, '' if rebuild_on_errors else 'not ')
|
||||
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(specfile_path)
|
||||
_, _, yaml_file = web_util.read_from_url(file_path)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(deprecated_specfile_path)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError) as url_err_y:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1} or {2}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(
|
||||
spec.short_spec,
|
||||
specfile_path,
|
||||
deprecated_specfile_path))
|
||||
tty.debug(url_err)
|
||||
tty.debug(url_err_y)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
if not spec_file_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(
|
||||
specfile_path if specfile_is_json else deprecated_specfile_path))
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
|
||||
tty.debug(url_err)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_dict = (sjson.load(spec_file_contents)
|
||||
if specfile_is_json else syaml.load(spec_file_contents))
|
||||
if not yaml_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(file_path))
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
try:
|
||||
nodes = spec_dict['spec']['nodes']
|
||||
except KeyError:
|
||||
# Prior node dict format omitted 'nodes' key
|
||||
nodes = spec_dict['spec']
|
||||
spec_yaml = syaml.load(yaml_contents)
|
||||
|
||||
yaml_spec = spec_yaml['spec']
|
||||
name = spec.name
|
||||
|
||||
# In the old format:
|
||||
# The "spec" key represents a list of objects, each with a single
|
||||
# The "spec" key in the yaml is a list of objects, each with a single
|
||||
# key that is the package name. While the list usually just contains
|
||||
# a single object, we iterate over the list looking for the object
|
||||
# with the name of this concrete spec as a key, out of an abundance
|
||||
# of caution.
|
||||
# In format version 2:
|
||||
# ['spec']['nodes'] is still a list of objects, but with a
|
||||
# multitude of keys. The list will commonly contain many objects, and in the
|
||||
# case of build specs, it is highly likely that the same name will occur
|
||||
# once as the actual package, and then again as the build provenance of that
|
||||
# same package. Hence format version 2 matches on the dag hash, not name.
|
||||
if nodes and 'name' not in nodes[0]:
|
||||
# old style
|
||||
cached_pkg_specs = [item[name] for item in nodes if name in item]
|
||||
elif nodes and spec_dict['spec']['_meta']['version'] == 2:
|
||||
cached_pkg_specs = [item for item in nodes
|
||||
if item[ht.dag_hash.name] == spec.dag_hash()]
|
||||
cached_pkg_specs = [item[name] for item in yaml_spec if name in item]
|
||||
cached_target = cached_pkg_specs[0] if cached_pkg_specs else None
|
||||
|
||||
# If either the full_hash didn't exist in the specfile, or it
|
||||
# If either the full_hash didn't exist in the .spec.yaml file, or it
|
||||
# did, but didn't match the one we computed locally, then we should
|
||||
# just rebuild. This can be simplified once the dag_hash and the
|
||||
# full_hash become the same thing.
|
||||
rebuild = False
|
||||
|
||||
if not cached_target:
|
||||
reason = 'did not find spec in specfile contents'
|
||||
rebuild = True
|
||||
elif ht.full_hash.name not in cached_target:
|
||||
reason = 'full_hash was missing from remote specfile'
|
||||
if not cached_target or 'full_hash' not in cached_target:
|
||||
reason = 'full_hash was missing from remote spec.yaml'
|
||||
rebuild = True
|
||||
else:
|
||||
full_hash = cached_target[ht.full_hash.name]
|
||||
full_hash = cached_target['full_hash']
|
||||
if full_hash != pkg_full_hash:
|
||||
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
|
||||
full_hash, pkg_full_hash)
|
||||
@@ -1896,23 +1771,24 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
|
||||
def _download_buildcache_entry(mirror_root, descriptions):
|
||||
for description in descriptions:
|
||||
description_url = os.path.join(mirror_root, description['url'])
|
||||
path = description['path']
|
||||
mkdirp(path)
|
||||
fail_if_missing = description['required']
|
||||
for url in description['url']:
|
||||
description_url = os.path.join(mirror_root, url)
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
try:
|
||||
stage.fetch()
|
||||
break
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
else:
|
||||
|
||||
mkdirp(path)
|
||||
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
|
||||
try:
|
||||
stage.fetch()
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
if fail_if_missing:
|
||||
tty.error('Failed to download required url {0}'.format(
|
||||
description_url))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
@@ -2,15 +2,8 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from __future__ import print_function
|
||||
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
|
||||
try:
|
||||
@@ -24,403 +17,17 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.architecture
|
||||
import spack.config
|
||||
import spack.detection
|
||||
import spack.environment
|
||||
import spack.main
|
||||
import spack.modules
|
||||
import spack.paths
|
||||
import spack.platforms
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.user_environment
|
||||
import spack.user_environment as uenv
|
||||
import spack.util.executable
|
||||
import spack.util.path
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
#: "spack buildcache" command, initialized lazily
|
||||
_buildcache_cmd = None
|
||||
|
||||
#: Map a bootstrapper type to the corresponding class
|
||||
_bootstrap_methods = {}
|
||||
|
||||
|
||||
def _bootstrapper(type):
|
||||
"""Decorator to register classes implementing bootstrapping
|
||||
methods.
|
||||
|
||||
Args:
|
||||
type (str): string identifying the class
|
||||
"""
|
||||
def _register(cls):
|
||||
_bootstrap_methods[type] = cls
|
||||
return cls
|
||||
return _register
|
||||
|
||||
|
||||
def _try_import_from_store(module, abstract_spec_str):
|
||||
"""Return True if the module can be imported from an already
|
||||
installed spec, False otherwise.
|
||||
|
||||
Args:
|
||||
module: Python module to be imported
|
||||
abstract_spec_str: abstract spec that may provide the module
|
||||
"""
|
||||
bincache_platform = spack.platforms.real_host()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.linux.Linux()
|
||||
with spack.platforms.use_platform(bincache_platform):
|
||||
abstract_spec_str = str(spack.spec.Spec(abstract_spec_str))
|
||||
|
||||
# We have to run as part of this python interpreter
|
||||
abstract_spec_str += ' ^' + spec_for_current_python()
|
||||
|
||||
installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
|
||||
|
||||
for candidate_spec in installed_specs:
|
||||
lib_spd = candidate_spec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, lib_spd),
|
||||
os.path.join(candidate_spec.prefix, lib64_spd)
|
||||
]
|
||||
sys.path.extend(module_paths)
|
||||
|
||||
try:
|
||||
_fix_ext_suffix(candidate_spec)
|
||||
if _python_import(module):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
|
||||
'provides the "{0}" Python module').format(
|
||||
module, abstract_spec_str, candidate_spec.dag_hash()
|
||||
)
|
||||
tty.debug(msg)
|
||||
return True
|
||||
except Exception as e:
|
||||
msg = ('unexpected error while trying to import module '
|
||||
'"{0}" from spec "{1}" [error="{2}"]')
|
||||
tty.warn(msg.format(module, candidate_spec, str(e)))
|
||||
else:
|
||||
msg = "Spec {0} did not provide module {1}"
|
||||
tty.warn(msg.format(candidate_spec, module))
|
||||
|
||||
sys.path = sys.path[:-2]
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _fix_ext_suffix(candidate_spec):
|
||||
"""Fix the external suffixes of Python extensions on the fly for
|
||||
platforms that may need it
|
||||
|
||||
Args:
|
||||
candidate_spec (Spec): installed spec with a Python module
|
||||
to be checked.
|
||||
"""
|
||||
# Here we map target families to the patterns expected
|
||||
# by pristine CPython. Only architectures with known issues
|
||||
# are included. Known issues:
|
||||
#
|
||||
# [RHEL + ppc64le]: https://github.com/spack/spack/issues/25734
|
||||
#
|
||||
_suffix_to_be_checked = {
|
||||
'ppc64le': {
|
||||
'glob': '*.cpython-*-powerpc64le-linux-gnu.so',
|
||||
're': r'.cpython-[\w]*-powerpc64le-linux-gnu.so',
|
||||
'fmt': r'{module}.cpython-{major}{minor}m-powerpc64le-linux-gnu.so'
|
||||
}
|
||||
}
|
||||
|
||||
# If the current architecture is not problematic return
|
||||
generic_target = archspec.cpu.host().family
|
||||
if str(generic_target) not in _suffix_to_be_checked:
|
||||
return
|
||||
|
||||
# If there's no EXT_SUFFIX (Python < 3.5) or the suffix matches
|
||||
# the expectations, return since the package is surely good
|
||||
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
||||
if ext_suffix is None:
|
||||
return
|
||||
|
||||
expected = _suffix_to_be_checked[str(generic_target)]
|
||||
if fnmatch.fnmatch(ext_suffix, expected['glob']):
|
||||
return
|
||||
|
||||
# If we are here it means the current interpreter expects different names
|
||||
# than pristine CPython. So:
|
||||
# 1. Find what we have installed
|
||||
# 2. Create symbolic links for the other names, it they're not there already
|
||||
|
||||
# Check if standard names are installed and if we have to create
|
||||
# link for this interpreter
|
||||
standard_extensions = fs.find(candidate_spec.prefix, expected['glob'])
|
||||
link_names = [re.sub(expected['re'], ext_suffix, s) for s in standard_extensions]
|
||||
for file_name, link_name in zip(standard_extensions, link_names):
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(file_name, link_name)
|
||||
|
||||
# Check if this interpreter installed something and we have to create
|
||||
# links for a standard CPython interpreter
|
||||
non_standard_extensions = fs.find(candidate_spec.prefix, '*' + ext_suffix)
|
||||
for abs_path in non_standard_extensions:
|
||||
directory, filename = os.path.split(abs_path)
|
||||
module = filename.split('.')[0]
|
||||
link_name = os.path.join(directory, expected['fmt'].format(
|
||||
module=module, major=sys.version_info[0], minor=sys.version_info[1])
|
||||
)
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(abs_path, link_name)
|
||||
|
||||
|
||||
def _executables_in_store(executables, abstract_spec_str):
|
||||
"""Return True if at least one of the executables can be retrieved from
|
||||
a spec in store, False otherwise.
|
||||
|
||||
The different executables must provide the same functionality and are
|
||||
"alternate" to each other, i.e. the function will exit True on the first
|
||||
executable found.
|
||||
|
||||
Args:
|
||||
executables: list of executables to be searched
|
||||
abstract_spec_str: abstract spec that may provide the executable
|
||||
"""
|
||||
executables_str = ', '.join(executables)
|
||||
msg = "[BOOTSTRAP EXECUTABLES {0}] Try installed specs with query '{1}'"
|
||||
tty.debug(msg.format(executables_str, abstract_spec_str))
|
||||
installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
|
||||
if installed_specs:
|
||||
for concrete_spec in installed_specs:
|
||||
bin_dir = concrete_spec.prefix.bin
|
||||
# IF we have a "bin" directory and it contains
|
||||
# the executables we are looking for
|
||||
if (os.path.exists(bin_dir) and os.path.isdir(bin_dir) and
|
||||
spack.util.executable.which_string(*executables, path=bin_dir)):
|
||||
spack.util.environment.path_put_first('PATH', [bin_dir])
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@_bootstrapper(type='buildcache')
|
||||
class _BuildcacheBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from a buildcache."""
|
||||
def __init__(self, conf):
|
||||
self.name = conf['name']
|
||||
self.url = conf['info']['url']
|
||||
|
||||
@staticmethod
|
||||
def _spec_and_platform(abstract_spec_str):
|
||||
"""Return the spec object and platform we need to use when
|
||||
querying the buildcache.
|
||||
|
||||
Args:
|
||||
abstract_spec_str: abstract spec string we are looking for
|
||||
"""
|
||||
# This import is local since it is needed only on Cray
|
||||
import spack.platforms.linux
|
||||
|
||||
# Try to install from an unsigned binary cache
|
||||
abstract_spec = spack.spec.Spec(abstract_spec_str)
|
||||
# On Cray we want to use Linux binaries if available from mirrors
|
||||
bincache_platform = spack.platforms.real_host()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.Linux()
|
||||
with spack.platforms.use_platform(bincache_platform):
|
||||
abstract_spec = spack.spec.Spec(abstract_spec_str)
|
||||
return abstract_spec, bincache_platform
|
||||
|
||||
def _read_metadata(self, package_name):
|
||||
"""Return metadata about the given package."""
|
||||
json_filename = '{0}.json'.format(package_name)
|
||||
json_path = os.path.join(
|
||||
spack.paths.share_path, 'bootstrap', self.name, json_filename
|
||||
)
|
||||
with open(json_path) as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
def _install_by_hash(self, pkg_hash, pkg_sha256, index, bincache_platform):
|
||||
global _buildcache_cmd
|
||||
|
||||
if _buildcache_cmd is None:
|
||||
_buildcache_cmd = spack.main.SpackCommand('buildcache')
|
||||
|
||||
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
|
||||
# Reconstruct the compiler that we need to use for bootstrapping
|
||||
compiler_entry = {
|
||||
"modules": [],
|
||||
"operating_system": str(index_spec.os),
|
||||
"paths": {
|
||||
"cc": "/dev/null",
|
||||
"cxx": "/dev/null",
|
||||
"f77": "/dev/null",
|
||||
"fc": "/dev/null"
|
||||
},
|
||||
"spec": str(index_spec.compiler),
|
||||
"target": str(index_spec.target.family)
|
||||
}
|
||||
with spack.platforms.use_platform(bincache_platform):
|
||||
with spack.config.override(
|
||||
'compilers', [{'compiler': compiler_entry}]
|
||||
):
|
||||
spec_str = '/' + pkg_hash
|
||||
install_args = [
|
||||
'install',
|
||||
'--sha256', pkg_sha256,
|
||||
'--only-root',
|
||||
'-a', '-u', '-o', '-f', spec_str
|
||||
]
|
||||
_buildcache_cmd(*install_args, fail_on_error=False)
|
||||
|
||||
def _install_and_test(
|
||||
self, abstract_spec, bincache_platform, bincache_data, test_fn
|
||||
):
|
||||
# Ensure we see only the buildcache being used to bootstrap
|
||||
with spack.config.override(self.mirror_scope):
|
||||
# This index is currently needed to get the compiler used to build some
|
||||
# specs that we know by dag hash.
|
||||
spack.binary_distribution.binary_index.regenerate_spec_cache()
|
||||
index = spack.binary_distribution.update_cache_and_get_specs()
|
||||
|
||||
if not index:
|
||||
raise RuntimeError("The binary index is empty")
|
||||
|
||||
for item in bincache_data['verified']:
|
||||
candidate_spec = item['spec']
|
||||
# This will be None for things that don't depend on python
|
||||
python_spec = item.get('python', None)
|
||||
# Skip specs which are not compatible
|
||||
if not abstract_spec.satisfies(candidate_spec):
|
||||
continue
|
||||
|
||||
if python_spec is not None and python_spec not in abstract_spec:
|
||||
continue
|
||||
|
||||
for pkg_name, pkg_hash, pkg_sha256 in item['binaries']:
|
||||
# TODO: undo installations that didn't complete?
|
||||
self._install_by_hash(
|
||||
pkg_hash, pkg_sha256, index, bincache_platform
|
||||
)
|
||||
|
||||
if test_fn():
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def mirror_scope(self):
|
||||
return spack.config.InternalConfigScope(
|
||||
'bootstrap_buildcache', {'mirrors:': {self.name: self.url}}
|
||||
)
|
||||
|
||||
def try_import(self, module, abstract_spec_str):
|
||||
test_fn = functools.partial(_try_import_from_store, module, abstract_spec_str)
|
||||
if test_fn():
|
||||
return True
|
||||
|
||||
tty.info("Bootstrapping {0} from pre-built binaries".format(module))
|
||||
abstract_spec, bincache_platform = self._spec_and_platform(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
data = self._read_metadata(module)
|
||||
return self._install_and_test(
|
||||
abstract_spec, bincache_platform, data, test_fn
|
||||
)
|
||||
|
||||
def try_search_path(self, executables, abstract_spec_str):
|
||||
test_fn = functools.partial(
|
||||
_executables_in_store, executables, abstract_spec_str
|
||||
)
|
||||
if test_fn():
|
||||
return True
|
||||
|
||||
abstract_spec, bincache_platform = self._spec_and_platform(
|
||||
abstract_spec_str
|
||||
)
|
||||
tty.info("Bootstrapping {0} from pre-built binaries".format(abstract_spec.name))
|
||||
data = self._read_metadata(abstract_spec.name)
|
||||
return self._install_and_test(
|
||||
abstract_spec, bincache_platform, data, test_fn
|
||||
)
|
||||
|
||||
|
||||
@_bootstrapper(type='install')
|
||||
class _SourceBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from sources."""
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
@staticmethod
|
||||
def try_import(module, abstract_spec_str):
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
|
||||
tty.info("Bootstrapping {0} from sources".format(module))
|
||||
|
||||
# If we compile code from sources detecting a few build tools
|
||||
# might reduce compilation time by a fair amount
|
||||
_add_externals_if_missing()
|
||||
|
||||
# Try to build and install from sources
|
||||
with spack_python_interpreter():
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.platforms.host()) == 'cray':
|
||||
abstract_spec_str += ' os=fe'
|
||||
|
||||
concrete_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
if module == 'clingo':
|
||||
# TODO: remove when the old concretizer is deprecated
|
||||
concrete_spec._old_concretize(deprecation_warning=False)
|
||||
else:
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
||||
tty.debug(msg.format(module, abstract_spec_str))
|
||||
|
||||
# Install the spec that should make the module importable
|
||||
concrete_spec.package.do_install(fail_fast=True)
|
||||
|
||||
return _try_import_from_store(module, abstract_spec_str=abstract_spec_str)
|
||||
|
||||
def try_search_path(self, executables, abstract_spec_str):
|
||||
if _executables_in_store(executables, abstract_spec_str):
|
||||
return True
|
||||
|
||||
# If we compile code from sources detecting a few build tools
|
||||
# might reduce compilation time by a fair amount
|
||||
_add_externals_if_missing()
|
||||
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.platforms.host()) == 'cray':
|
||||
abstract_spec_str += ' os=fe'
|
||||
|
||||
concrete_spec = spack.spec.Spec(abstract_spec_str)
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP GnuPG] Try installing '{0}' from sources"
|
||||
tty.debug(msg.format(abstract_spec_str))
|
||||
concrete_spec.package.do_install()
|
||||
return _executables_in_store(executables, abstract_spec_str)
|
||||
|
||||
|
||||
def _make_bootstrapper(conf):
|
||||
"""Return a bootstrap object built according to the
|
||||
configuration argument
|
||||
"""
|
||||
btype = conf['type']
|
||||
return _bootstrap_methods[btype](conf)
|
||||
|
||||
|
||||
def _source_is_trusted(conf):
|
||||
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
|
||||
if name not in trusted:
|
||||
return False
|
||||
return trusted[name]
|
||||
|
||||
|
||||
def spec_for_current_python():
|
||||
"""For bootstrapping purposes we are just interested in the Python
|
||||
@@ -447,7 +54,7 @@ def spack_python_interpreter():
|
||||
which Spack is currently running as the only Python external spec
|
||||
available.
|
||||
"""
|
||||
python_prefix = sys.exec_prefix
|
||||
python_prefix = os.path.dirname(os.path.dirname(sys.executable))
|
||||
external_python = spec_for_current_python()
|
||||
|
||||
entry = {
|
||||
@@ -461,106 +68,72 @@ def spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def ensure_module_importable_or_raise(module, abstract_spec=None):
|
||||
"""Make the requested module available for import, or raise.
|
||||
|
||||
This function tries to import a Python module in the current interpreter
|
||||
using, in order, the methods configured in bootstrap.yaml.
|
||||
|
||||
If none of the methods succeed, an exception is raised. The function exits
|
||||
on first success.
|
||||
|
||||
Args:
|
||||
module (str): module to be imported in the current interpreter
|
||||
abstract_spec (str): abstract spec that might provide the module. If not
|
||||
given it defaults to "module"
|
||||
|
||||
Raises:
|
||||
ImportError: if the module couldn't be imported
|
||||
"""
|
||||
# If we can import it already, that's great
|
||||
tty.debug("[BOOTSTRAP MODULE {0}] Try importing from Python".format(module))
|
||||
if _python_import(module):
|
||||
return
|
||||
|
||||
abstract_spec = abstract_spec or module
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
|
||||
errors = {}
|
||||
|
||||
for current_config in source_configs:
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(module, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
if b.try_import(module, abstract_spec):
|
||||
return
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(module, str(e)))
|
||||
errors[current_config['name']] = e
|
||||
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
|
||||
if abstract_spec:
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
msg += ' due to the following failures:\n'
|
||||
for method in errors:
|
||||
err = errors[method]
|
||||
msg += " '{0}' raised {1}: {2}\n".format(
|
||||
method, err.__class__.__name__, str(err))
|
||||
msg += ' Please run `spack -d spec zlib` for more verbose error messages'
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
def ensure_executables_in_path_or_raise(executables, abstract_spec):
|
||||
"""Ensure that some executables are in path or raise.
|
||||
|
||||
Args:
|
||||
executables (list): list of executables to be searched in the PATH,
|
||||
in order. The function exits on the first one found.
|
||||
abstract_spec (str): abstract spec that provides the executables
|
||||
|
||||
Raises:
|
||||
RuntimeError: if the executables cannot be ensured to be in PATH
|
||||
"""
|
||||
if spack.util.executable.which_string(*executables):
|
||||
return
|
||||
|
||||
executables_str = ', '.join(executables)
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
for current_config in source_configs:
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP EXECUTABLES {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(executables_str, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
if b.try_search_path(executables, abstract_spec):
|
||||
return
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(executables_str, str(e)))
|
||||
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap any of the {0} executables'.format(executables_str)
|
||||
if abstract_spec:
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def _python_import(module):
|
||||
def make_module_available(module, spec=None, install=False):
|
||||
"""Ensure module is importable"""
|
||||
# If we already can import it, that's great
|
||||
try:
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
return False
|
||||
return True
|
||||
pass
|
||||
|
||||
# If it's already installed, use it
|
||||
# Search by spec
|
||||
spec = spack.spec.Spec(spec or module)
|
||||
|
||||
# We have to run as part of this python
|
||||
# We can constrain by a shortened version in place of a version range
|
||||
# because this spec is only used for querying or as a placeholder to be
|
||||
# replaced by an external that already has a concrete version. This syntax
|
||||
# is not sufficient when concretizing without an external, as it will
|
||||
# concretize to python@X.Y instead of python@X.Y.Z
|
||||
python_requirement = '^' + spec_for_current_python()
|
||||
spec.constrain(python_requirement)
|
||||
installed_specs = spack.store.db.query(spec, installed=True)
|
||||
|
||||
for ispec in installed_specs:
|
||||
lib_spd = ispec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(ispec.prefix, lib_spd),
|
||||
os.path.join(ispec.prefix, lib64_spd)
|
||||
]
|
||||
try:
|
||||
sys.path.extend(module_paths)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
tty.warn("Spec %s did not provide module %s" % (ispec, module))
|
||||
sys.path = sys.path[:-2]
|
||||
|
||||
def _raise_error(module_name, module_spec):
|
||||
error_msg = 'cannot import module "{0}"'.format(module_name)
|
||||
if module_spec:
|
||||
error_msg += ' from spec "{0}'.format(module_spec)
|
||||
raise ImportError(error_msg)
|
||||
|
||||
if not install:
|
||||
_raise_error(module, spec)
|
||||
|
||||
with spack_python_interpreter():
|
||||
# We will install for ourselves, using this python if needed
|
||||
# Concretize the spec
|
||||
spec.concretize()
|
||||
spec.package.do_install()
|
||||
|
||||
lib_spd = spec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(spec.prefix, lib_spd),
|
||||
os.path.join(spec.prefix, lib64_spd)
|
||||
]
|
||||
try:
|
||||
sys.path.extend(module_paths)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
sys.path = sys.path[:-2]
|
||||
_raise_error(module, spec)
|
||||
|
||||
|
||||
def get_executable(exe, spec=None, install=False):
|
||||
@@ -574,8 +147,7 @@ def get_executable(exe, spec=None, install=False):
|
||||
When ``install`` is True, Spack will use the python used to run Spack as an
|
||||
external. The ``install`` option should only be used with packages that
|
||||
install quickly (when using external python) or are guaranteed by Spack
|
||||
organization to be in a binary mirror (clingo).
|
||||
"""
|
||||
organization to be in a binary mirror (clingo)."""
|
||||
# Search the system first
|
||||
runner = spack.util.executable.which(exe)
|
||||
if runner:
|
||||
@@ -592,9 +164,7 @@ def get_executable(exe, spec=None, install=False):
|
||||
ret = spack.util.executable.Executable(exe_path[0])
|
||||
envmod = EnvironmentModifications()
|
||||
for dep in ispec.traverse(root=True, order='post'):
|
||||
envmod.extend(
|
||||
spack.user_environment.environment_modifications_for_spec(dep)
|
||||
)
|
||||
envmod.extend(uenv.environment_modifications_for_spec(dep))
|
||||
ret.add_default_envmod(envmod)
|
||||
return ret
|
||||
else:
|
||||
@@ -623,9 +193,7 @@ def _raise_error(executable, exe_spec):
|
||||
ret = spack.util.executable.Executable(exe_path[0])
|
||||
envmod = EnvironmentModifications()
|
||||
for dep in spec.traverse(root=True, order='post'):
|
||||
envmod.extend(
|
||||
spack.user_environment.environment_modifications_for_spec(dep)
|
||||
)
|
||||
envmod.extend(uenv.environment_modifications_for_spec(dep))
|
||||
ret.add_default_envmod(envmod)
|
||||
return ret
|
||||
|
||||
@@ -637,12 +205,8 @@ def _bootstrap_config_scopes():
|
||||
config_scopes = [
|
||||
spack.config.InternalConfigScope('_builtin', spack.config.config_defaults)
|
||||
]
|
||||
configuration_paths = (
|
||||
spack.config.configuration_defaults_path,
|
||||
('bootstrap', _config_path())
|
||||
)
|
||||
for name, path in configuration_paths:
|
||||
platform = spack.platforms.host().name
|
||||
for name, path in spack.config.configuration_paths:
|
||||
platform = spack.architecture.platform().name
|
||||
platform_scope = spack.config.ConfigScope(
|
||||
'/'.join([name, platform]), os.path.join(path, platform)
|
||||
)
|
||||
@@ -654,62 +218,18 @@ def _bootstrap_config_scopes():
|
||||
return config_scopes
|
||||
|
||||
|
||||
def _add_compilers_if_missing():
|
||||
arch = spack.spec.ArchSpec.frontend_arch()
|
||||
if not spack.compilers.compilers_for_arch(arch):
|
||||
new_compilers = spack.compilers.find_new_compilers()
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
|
||||
|
||||
|
||||
def _add_externals_if_missing():
|
||||
search_list = [
|
||||
# clingo
|
||||
spack.repo.path.get('cmake'),
|
||||
spack.repo.path.get('bison'),
|
||||
# GnuPG
|
||||
spack.repo.path.get('gawk')
|
||||
]
|
||||
detected_packages = spack.detection.by_executable(search_list)
|
||||
spack.detection.update_configuration(detected_packages, scope='bootstrap')
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ensure_bootstrap_configuration():
|
||||
bootstrap_store_path = store_path()
|
||||
user_configuration = _read_and_sanitize_configuration()
|
||||
with spack.environment.no_active_environment():
|
||||
with spack.platforms.use_platform(spack.platforms.real_host()):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
# We may need to compile code from sources, so ensure we have
|
||||
# compilers for the current platform before switching parts.
|
||||
_add_compilers_if_missing()
|
||||
spack.config.set('bootstrap', user_configuration['bootstrap'])
|
||||
spack.config.set('config', user_configuration['config'])
|
||||
with spack.modules.disable_modules():
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def _read_and_sanitize_configuration():
|
||||
"""Read the user configuration that needs to be reused for bootstrapping
|
||||
and remove the entries that should not be copied over.
|
||||
"""
|
||||
# Read the "config" section but pop the install tree (the entry will not be
|
||||
# considered due to the use_store context manager, so it will be confusing
|
||||
# to have it in the configuration).
|
||||
config_yaml = spack.config.get('config')
|
||||
config_yaml.pop('install_tree', None)
|
||||
user_configuration = {
|
||||
'bootstrap': spack.config.get('bootstrap'),
|
||||
'config': config_yaml
|
||||
}
|
||||
return user_configuration
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def store_path():
|
||||
@@ -720,69 +240,34 @@ def store_path():
|
||||
'Use "spack bootstrap enable" to enable it')
|
||||
raise RuntimeError(msg)
|
||||
|
||||
return _store_path()
|
||||
|
||||
|
||||
def _root_path():
|
||||
"""Root of all the bootstrap related folders"""
|
||||
return spack.config.get(
|
||||
'bootstrap:root', spack.paths.default_user_bootstrap_path
|
||||
bootstrap_root_path = spack.config.get(
|
||||
'bootstrap:root', spack.paths.user_bootstrap_path
|
||||
)
|
||||
|
||||
|
||||
def _store_path():
|
||||
bootstrap_root_path = _root_path()
|
||||
return spack.util.path.canonicalize_path(
|
||||
bootstrap_store_path = spack.util.path.canonicalize_path(
|
||||
os.path.join(bootstrap_root_path, 'store')
|
||||
)
|
||||
return bootstrap_store_path
|
||||
|
||||
|
||||
def _config_path():
|
||||
bootstrap_root_path = _root_path()
|
||||
return spack.util.path.canonicalize_path(
|
||||
os.path.join(bootstrap_root_path, 'config')
|
||||
)
|
||||
def clingo_root_spec():
|
||||
# Construct the root spec that will be used to bootstrap clingo
|
||||
spec_str = 'clingo-bootstrap@spack+python'
|
||||
|
||||
|
||||
def _root_spec(spec_str):
|
||||
"""Add a proper compiler and target to a spec used during bootstrapping.
|
||||
|
||||
Args:
|
||||
spec_str (str): spec to be bootstrapped. Must be without compiler and target.
|
||||
"""
|
||||
# Add a proper compiler hint to the root spec. We use GCC for
|
||||
# everything but MacOS.
|
||||
if str(spack.platforms.host()) == 'darwin':
|
||||
if str(spack.architecture.platform()) == 'darwin':
|
||||
spec_str += ' %apple-clang'
|
||||
else:
|
||||
spec_str += ' %gcc'
|
||||
|
||||
target = archspec.cpu.host().family
|
||||
spec_str += ' target={0}'.format(target)
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.architecture.platform()) == 'cray':
|
||||
spec_str += ' os=fe'
|
||||
|
||||
tty.debug('[BOOTSTRAP ROOT SPEC] {0}'.format(spec_str))
|
||||
return spec_str
|
||||
# Add the generic target
|
||||
generic_target = archspec.cpu.host().family
|
||||
spec_str += ' target={0}'.format(str(generic_target))
|
||||
|
||||
tty.debug('[BOOTSTRAP ROOT SPEC] clingo: {0}'.format(spec_str))
|
||||
|
||||
def clingo_root_spec():
|
||||
"""Return the root spec used to bootstrap clingo"""
|
||||
return _root_spec('clingo-bootstrap@spack+python')
|
||||
|
||||
|
||||
def ensure_clingo_importable_or_raise():
|
||||
"""Ensure that the clingo module is available for import."""
|
||||
ensure_module_importable_or_raise(
|
||||
module='clingo', abstract_spec=clingo_root_spec()
|
||||
)
|
||||
|
||||
|
||||
def gnupg_root_spec():
|
||||
"""Return the root spec used to bootstrap GnuPG"""
|
||||
return _root_spec('gnupg@2.3:')
|
||||
|
||||
|
||||
def ensure_gpg_in_path_or_raise():
|
||||
"""Ensure gpg or gpg2 are in the PATH or raise."""
|
||||
ensure_executables_in_path_or_raise(
|
||||
executables=['gpg2', 'gpg'], abstract_spec=gnupg_root_spec(),
|
||||
)
|
||||
return spack.spec.Spec(spec_str)
|
||||
|
@@ -49,6 +49,7 @@
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
from llnl.util.tty.log import MultiProcessFd
|
||||
|
||||
import spack.architecture as arch
|
||||
import spack.build_systems.cmake
|
||||
import spack.build_systems.meson
|
||||
import spack.config
|
||||
@@ -56,12 +57,10 @@
|
||||
import spack.main
|
||||
import spack.package
|
||||
import spack.paths
|
||||
import spack.platforms
|
||||
import spack.repo
|
||||
import spack.schema.environment
|
||||
import spack.store
|
||||
import spack.subprocess_context
|
||||
import spack.user_environment
|
||||
import spack.util.path
|
||||
from spack.error import NoHeadersError, NoLibrariesError
|
||||
from spack.util.cpus import cpus_available
|
||||
@@ -70,8 +69,8 @@
|
||||
env_flag,
|
||||
filter_system_paths,
|
||||
get_path,
|
||||
inspect_path,
|
||||
is_system_path,
|
||||
preserve_environment,
|
||||
system_dirs,
|
||||
validate,
|
||||
)
|
||||
@@ -147,14 +146,6 @@ def __call__(self, *args, **kwargs):
|
||||
return super(MakeExecutable, self).__call__(*args, **kwargs)
|
||||
|
||||
|
||||
def _on_cray():
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system('default_os')
|
||||
on_cray = str(host_platform) == 'cray'
|
||||
using_cnl = re.match(r'cnl\d+', str(host_os))
|
||||
return on_cray, using_cnl
|
||||
|
||||
|
||||
def clean_environment():
|
||||
# Stuff in here sanitizes the build environment to eliminate
|
||||
# anything the user has set that may interfere. We apply it immediately
|
||||
@@ -178,9 +169,6 @@ def clean_environment():
|
||||
|
||||
env.unset('CMAKE_PREFIX_PATH')
|
||||
|
||||
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
|
||||
env.unset('MAKEFLAGS')
|
||||
|
||||
# Avoid that libraries of build dependencies get hijacked.
|
||||
env.unset('LD_PRELOAD')
|
||||
env.unset('DYLD_INSERT_LIBRARIES')
|
||||
@@ -189,7 +177,9 @@ def clean_environment():
|
||||
# interference with Spack dependencies.
|
||||
# CNL requires these variables to be set (or at least some of them,
|
||||
# depending on the CNL version).
|
||||
on_cray, using_cnl = _on_cray()
|
||||
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
|
||||
on_cray = str(hostarch.platform) == 'cray'
|
||||
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
|
||||
if on_cray and not using_cnl:
|
||||
env.unset('CRAY_LD_LIBRARY_PATH')
|
||||
for varname in os.environ.keys():
|
||||
@@ -232,7 +222,7 @@ def clean_environment():
|
||||
if '/macports/' in p:
|
||||
env.remove_path('PATH', p)
|
||||
|
||||
return env
|
||||
env.apply_modifications()
|
||||
|
||||
|
||||
def set_compiler_environment_variables(pkg, env):
|
||||
@@ -765,77 +755,72 @@ def setup_package(pkg, dirty, context='build'):
|
||||
|
||||
set_module_variables_for_package(pkg)
|
||||
|
||||
# Keep track of env changes from packages separately, since we want to
|
||||
# issue warnings when packages make "suspicious" modifications.
|
||||
env_base = EnvironmentModifications() if dirty else clean_environment()
|
||||
env_mods = EnvironmentModifications()
|
||||
env = EnvironmentModifications()
|
||||
|
||||
if not dirty:
|
||||
clean_environment()
|
||||
|
||||
# setup compilers for build contexts
|
||||
need_compiler = context == 'build' or (context == 'test' and
|
||||
pkg.test_requires_compiler)
|
||||
if need_compiler:
|
||||
set_compiler_environment_variables(pkg, env_mods)
|
||||
set_wrapper_variables(pkg, env_mods)
|
||||
set_compiler_environment_variables(pkg, env)
|
||||
set_wrapper_variables(pkg, env)
|
||||
|
||||
env_mods.extend(modifications_from_dependencies(
|
||||
env.extend(modifications_from_dependencies(
|
||||
pkg.spec, context, custom_mods_only=False))
|
||||
|
||||
# architecture specific setup
|
||||
platform = spack.platforms.by_name(pkg.spec.architecture.platform)
|
||||
target = platform.target(pkg.spec.architecture.target)
|
||||
platform.setup_platform_environment(pkg, env_mods)
|
||||
pkg.architecture.platform.setup_platform_environment(pkg, env)
|
||||
|
||||
if context == 'build':
|
||||
pkg.setup_build_environment(env_mods)
|
||||
pkg.setup_build_environment(env)
|
||||
|
||||
if (not dirty) and (not env_mods.is_unset('CPATH')):
|
||||
if (not dirty) and (not env.is_unset('CPATH')):
|
||||
tty.debug("A dependency has updated CPATH, this may lead pkg-"
|
||||
"config to assume that the package is part of the system"
|
||||
" includes and omit it when invoked with '--cflags'.")
|
||||
elif context == 'test':
|
||||
env_mods.extend(
|
||||
inspect_path(
|
||||
pkg.spec.prefix,
|
||||
spack.user_environment.prefix_inspections(pkg.spec.platform),
|
||||
exclude=is_system_path
|
||||
)
|
||||
)
|
||||
pkg.setup_run_environment(env_mods)
|
||||
env_mods.prepend_path('PATH', '.')
|
||||
pkg.setup_run_environment(env)
|
||||
env.prepend_path('PATH', '.')
|
||||
|
||||
# First apply the clean environment changes
|
||||
env_base.apply_modifications()
|
||||
# Loading modules, in particular if they are meant to be used outside
|
||||
# of Spack, can change environment variables that are relevant to the
|
||||
# build of packages. To avoid a polluted environment, preserve the
|
||||
# value of a few, selected, environment variables
|
||||
# With the current ordering of environment modifications, this is strictly
|
||||
# unnecessary. Modules affecting these variables will be overwritten anyway
|
||||
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
|
||||
# All module loads that otherwise would belong in previous
|
||||
# functions have to occur after the env object has its
|
||||
# modifications applied. Otherwise the environment modifications
|
||||
# could undo module changes, such as unsetting LD_LIBRARY_PATH
|
||||
# after a module changes it.
|
||||
if need_compiler:
|
||||
for mod in pkg.compiler.modules:
|
||||
# Fixes issue https://github.com/spack/spack/issues/3153
|
||||
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
|
||||
load_module("cce")
|
||||
load_module(mod)
|
||||
|
||||
# Load modules on an already clean environment, just before applying Spack's
|
||||
# own environment modifications. This ensures Spack controls CC/CXX/... variables.
|
||||
if need_compiler:
|
||||
for mod in pkg.compiler.modules:
|
||||
load_module(mod)
|
||||
|
||||
# kludge to handle cray libsci being automatically loaded by PrgEnv
|
||||
# modules on cray platform. Module unload does no damage when
|
||||
# unnecessary
|
||||
on_cray, _ = _on_cray()
|
||||
if on_cray:
|
||||
# kludge to handle cray libsci being automatically loaded by PrgEnv
|
||||
# modules on cray platform. Module unload does no damage when
|
||||
# unnecessary
|
||||
module('unload', 'cray-libsci')
|
||||
|
||||
if target.module_name:
|
||||
load_module(target.module_name)
|
||||
if pkg.architecture.target.module_name:
|
||||
load_module(pkg.architecture.target.module_name)
|
||||
|
||||
load_external_modules(pkg)
|
||||
load_external_modules(pkg)
|
||||
|
||||
implicit_rpaths = pkg.compiler.implicit_rpaths()
|
||||
if implicit_rpaths:
|
||||
env_mods.set('SPACK_COMPILER_IMPLICIT_RPATHS',
|
||||
':'.join(implicit_rpaths))
|
||||
env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
|
||||
':'.join(implicit_rpaths))
|
||||
|
||||
# Make sure nothing's strange about the Spack environment.
|
||||
validate(env_mods, tty.warn)
|
||||
env_mods.apply_modifications()
|
||||
|
||||
# Return all env modifications we controlled (excluding module related ones)
|
||||
env_base.extend(env_mods)
|
||||
return env_base
|
||||
validate(env, tty.warn)
|
||||
env.apply_modifications()
|
||||
|
||||
|
||||
def _make_runnable(pkg, env):
|
||||
@@ -1023,8 +1008,8 @@ def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
|
||||
|
||||
if not kwargs.get('fake', False):
|
||||
kwargs['unmodified_env'] = os.environ.copy()
|
||||
kwargs['env_modifications'] = setup_package(
|
||||
pkg, dirty=kwargs.get('dirty', False), context=context)
|
||||
setup_package(pkg, dirty=kwargs.get('dirty', False),
|
||||
context=context)
|
||||
return_value = function(pkg, kwargs)
|
||||
child_pipe.send(return_value)
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import stat
|
||||
@@ -13,8 +14,6 @@
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import force_remove, working_dir
|
||||
|
||||
from spack.build_environment import InstallError
|
||||
from spack.directives import depends_on
|
||||
from spack.package import PackageBase, run_after, run_before
|
||||
from spack.util.executable import Executable
|
||||
|
||||
@@ -55,24 +54,9 @@ class AutotoolsPackage(PackageBase):
|
||||
#: This attribute is used in UI queries that need to know the build
|
||||
#: system base class
|
||||
build_system_class = 'AutotoolsPackage'
|
||||
|
||||
@property
|
||||
def patch_config_files(self):
|
||||
"""
|
||||
Whether or not to update old ``config.guess`` and ``config.sub`` files
|
||||
distributed with the tarball. This currently only applies to
|
||||
``ppc64le:``, ``aarch64:``, and ``riscv64`` target architectures. The
|
||||
substitutes are taken from the ``gnuconfig`` package, which is
|
||||
automatically added as a build dependency for these architectures. In
|
||||
case system versions of these config files are required, the
|
||||
``gnuconfig`` package can be marked external with a prefix pointing to
|
||||
the directory containing the system ``config.guess`` and ``config.sub``
|
||||
files.
|
||||
"""
|
||||
return (self.spec.satisfies('target=ppc64le:')
|
||||
or self.spec.satisfies('target=aarch64:')
|
||||
or self.spec.satisfies('target=riscv64:'))
|
||||
|
||||
#: Whether or not to update ``config.guess`` and ``config.sub`` on old
|
||||
#: architectures
|
||||
patch_config_files = True
|
||||
#: Whether or not to update ``libtool``
|
||||
#: (currently only for Arm/Clang/Fujitsu compilers)
|
||||
patch_libtool = True
|
||||
@@ -99,10 +83,6 @@ def patch_config_files(self):
|
||||
#: after the installation. If True instead it installs them.
|
||||
install_libtool_archives = False
|
||||
|
||||
depends_on('gnuconfig', type='build', when='target=ppc64le:')
|
||||
depends_on('gnuconfig', type='build', when='target=aarch64:')
|
||||
depends_on('gnuconfig', type='build', when='target=riscv64:')
|
||||
|
||||
@property
|
||||
def _removed_la_files_log(self):
|
||||
"""File containing the list of remove libtool archives"""
|
||||
@@ -124,10 +104,12 @@ def _do_patch_config_files(self):
|
||||
"""Some packages ship with older config.guess/config.sub files and
|
||||
need to have these updated when installed on a newer architecture.
|
||||
In particular, config.guess fails for PPC64LE for version prior
|
||||
to a 2013-06-10 build date (automake 1.13.4) and for ARM (aarch64) and
|
||||
RISC-V (riscv64).
|
||||
to a 2013-06-10 build date (automake 1.13.4) and for ARM (aarch64).
|
||||
"""
|
||||
if not self.patch_config_files:
|
||||
if not self.patch_config_files or (
|
||||
not self.spec.satisfies('target=ppc64le:') and
|
||||
not self.spec.satisfies('target=aarch64:')
|
||||
):
|
||||
return
|
||||
|
||||
# TODO: Expand this to select the 'config.sub'-compatible architecture
|
||||
@@ -137,8 +119,6 @@ def _do_patch_config_files(self):
|
||||
config_arch = 'ppc64le'
|
||||
elif self.spec.satisfies('target=aarch64:'):
|
||||
config_arch = 'aarch64'
|
||||
elif self.spec.satisfies('target=riscv64:'):
|
||||
config_arch = 'riscv64'
|
||||
else:
|
||||
config_arch = 'local'
|
||||
|
||||
@@ -158,69 +138,39 @@ def runs_ok(script_abs_path):
|
||||
|
||||
return True
|
||||
|
||||
# Get the list of files that needs to be patched
|
||||
to_be_patched = fs.find(self.stage.path, files=['config.sub', 'config.guess'])
|
||||
# Compute the list of files that needs to be patched
|
||||
search_dir = self.stage.path
|
||||
to_be_patched = fs.find(
|
||||
search_dir, files=['config.sub', 'config.guess'], recursive=True
|
||||
)
|
||||
to_be_patched = [f for f in to_be_patched if not runs_ok(f)]
|
||||
|
||||
# If there are no files to be patched, return early
|
||||
if not to_be_patched:
|
||||
return
|
||||
|
||||
# Otherwise, require `gnuconfig` to be a build dependency
|
||||
self._require_build_deps(
|
||||
pkgs=['gnuconfig'],
|
||||
spec=self.spec,
|
||||
err="Cannot patch config files")
|
||||
# Directories where to search for files to be copied
|
||||
# over the failing ones
|
||||
good_file_dirs = ['/usr/share']
|
||||
if 'automake' in self.spec:
|
||||
good_file_dirs.insert(0, self.spec['automake'].prefix)
|
||||
|
||||
# Get the config files we need to patch (config.sub / config.guess).
|
||||
# List of files to be found in the directories above
|
||||
to_be_found = list(set(os.path.basename(f) for f in to_be_patched))
|
||||
gnuconfig = self.spec['gnuconfig']
|
||||
gnuconfig_dir = gnuconfig.prefix
|
||||
|
||||
# An external gnuconfig may not not have a prefix.
|
||||
if gnuconfig_dir is None:
|
||||
raise InstallError("Spack could not find substitutes for GNU config "
|
||||
"files because no prefix is available for the "
|
||||
"`gnuconfig` package. Make sure you set a prefix "
|
||||
"path instead of modules for external `gnuconfig`.")
|
||||
|
||||
candidates = fs.find(gnuconfig_dir, files=to_be_found, recursive=False)
|
||||
|
||||
# For external packages the user may have specified an incorrect prefix.
|
||||
# otherwise the installation is just corrupt.
|
||||
if not candidates:
|
||||
msg = ("Spack could not find `config.guess` and `config.sub` "
|
||||
"files in the `gnuconfig` prefix `{0}`. This means the "
|
||||
"`gnuconfig` package is broken").format(gnuconfig_dir)
|
||||
if gnuconfig.external:
|
||||
msg += (" or the `gnuconfig` package prefix is misconfigured as"
|
||||
" an external package")
|
||||
raise InstallError(msg)
|
||||
|
||||
# Filter working substitutes
|
||||
candidates = [f for f in candidates if runs_ok(f)]
|
||||
substitutes = {}
|
||||
for candidate in candidates:
|
||||
config_file = os.path.basename(candidate)
|
||||
substitutes[config_file] = candidate
|
||||
to_be_found.remove(config_file)
|
||||
for directory in good_file_dirs:
|
||||
candidates = fs.find(directory, files=to_be_found, recursive=True)
|
||||
candidates = [f for f in candidates if runs_ok(f)]
|
||||
for name, good_files in itertools.groupby(
|
||||
candidates, key=os.path.basename
|
||||
):
|
||||
substitutes[name] = next(good_files)
|
||||
to_be_found.remove(name)
|
||||
|
||||
# Check that we found everything we needed
|
||||
if to_be_found:
|
||||
msg = """\
|
||||
Spack could not find working replacements for the following autotools config
|
||||
files: {0}.
|
||||
|
||||
To resolve this problem, please try the following:
|
||||
1. Try to rebuild with `patch_config_files = False` in the package `{1}`, to
|
||||
rule out that Spack tries to replace config files not used by the build.
|
||||
2. Verify that the `gnuconfig` package is up-to-date.
|
||||
3. On some systems you need to use system-provided `config.guess` and `config.sub`
|
||||
files. In this case, mark `gnuconfig` as an non-buildable external package,
|
||||
and set the prefix to the directory containing the `config.guess` and
|
||||
`config.sub` files.
|
||||
"""
|
||||
raise InstallError(msg.format(', '.join(to_be_found), self.name))
|
||||
msg = 'Failed to find suitable substitutes for {0}'
|
||||
raise RuntimeError(msg.format(', '.join(to_be_found)))
|
||||
|
||||
# Copy the good files over the bad ones
|
||||
for abs_path in to_be_patched:
|
||||
@@ -302,41 +252,17 @@ def delete_configure_to_force_update(self):
|
||||
if self.force_autoreconf:
|
||||
force_remove(self.configure_abs_path)
|
||||
|
||||
def _require_build_deps(self, pkgs, spec, err):
|
||||
"""Require `pkgs` to be direct build dependencies of `spec`. Raises a
|
||||
RuntimeError with a helpful error messages when any dep is missing."""
|
||||
|
||||
build_deps = [d.name for d in spec.dependencies(deptype='build')]
|
||||
missing_deps = [x for x in pkgs if x not in build_deps]
|
||||
|
||||
if not missing_deps:
|
||||
return
|
||||
|
||||
# Raise an exception on missing deps.
|
||||
msg = ("{0}: missing dependencies: {1}.\n\nPlease add "
|
||||
"the following lines to the package:\n\n"
|
||||
.format(err, ", ".join(missing_deps)))
|
||||
|
||||
for dep in missing_deps:
|
||||
msg += (" depends_on('{0}', type='build', when='@{1}')\n"
|
||||
.format(dep, spec.version))
|
||||
|
||||
msg += "\nUpdate the version (when='@{0}') as needed.".format(spec.version)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def autoreconf(self, spec, prefix):
|
||||
"""Not needed usually, configure should be already there"""
|
||||
|
||||
# If configure exists nothing needs to be done
|
||||
if os.path.exists(self.configure_abs_path):
|
||||
return
|
||||
|
||||
# Else try to regenerate it, which reuquires a few build dependencies
|
||||
self._require_build_deps(
|
||||
pkgs=['autoconf', 'automake', 'libtool'],
|
||||
spec=spec,
|
||||
err="Cannot generate configure")
|
||||
|
||||
# Else try to regenerate it
|
||||
autotools = ['m4', 'autoconf', 'automake', 'libtool']
|
||||
missing = [x for x in autotools if x not in spec]
|
||||
if missing:
|
||||
msg = 'Cannot generate configure: missing dependencies {0}'
|
||||
raise RuntimeError(msg.format(missing))
|
||||
tty.msg('Configure script not found: trying to generate it')
|
||||
tty.warn('*********************************************************')
|
||||
tty.warn('* If the default procedure fails, consider implementing *')
|
||||
@@ -447,15 +373,14 @@ def _activate_or_not(
|
||||
name,
|
||||
activation_word,
|
||||
deactivation_word,
|
||||
activation_value=None,
|
||||
variant=None
|
||||
activation_value=None
|
||||
):
|
||||
"""This function contains the current implementation details of
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without` and
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.enable_or_disable`.
|
||||
|
||||
Args:
|
||||
name (str): name of the option that is being activated or not
|
||||
name (str): name of the variant that is being processed
|
||||
activation_word (str): the default activation word ('with' in the
|
||||
case of ``with_or_without``)
|
||||
deactivation_word (str): the default deactivation word ('without'
|
||||
@@ -467,8 +392,6 @@ def _activate_or_not(
|
||||
|
||||
The special value 'prefix' can also be assigned and will return
|
||||
``spec[name].prefix`` as activation parameter.
|
||||
variant (str): name of the variant that is being processed
|
||||
(if different from option name)
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -478,7 +401,6 @@ def _activate_or_not(
|
||||
|
||||
variant('foo', values=('x', 'y'), description='')
|
||||
variant('bar', default=True, description='')
|
||||
variant('ba_z', default=True, description='')
|
||||
|
||||
calling this function like:
|
||||
|
||||
@@ -488,13 +410,12 @@ def _activate_or_not(
|
||||
'foo', 'with', 'without', activation_value='prefix'
|
||||
)
|
||||
_activate_or_not('bar', 'with', 'without')
|
||||
_activate_or_not('ba-z', 'with', 'without', variant='ba_z')
|
||||
|
||||
will generate the following configuration options:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
--with-x=<prefix-to-x> --without-y --with-bar --with-ba-z
|
||||
--with-x=<prefix-to-x> --without-y --with-bar
|
||||
|
||||
for ``<spec-name> foo=x +bar``
|
||||
|
||||
@@ -511,37 +432,32 @@ def _activate_or_not(
|
||||
if activation_value == 'prefix':
|
||||
activation_value = lambda x: spec[x].prefix
|
||||
|
||||
variant = variant or name
|
||||
|
||||
# Defensively look that the name passed as argument is among
|
||||
# variants
|
||||
if variant not in self.variants:
|
||||
if name not in self.variants:
|
||||
msg = '"{0}" is not a variant of "{1}"'
|
||||
raise KeyError(msg.format(variant, self.name))
|
||||
raise KeyError(msg.format(name, self.name))
|
||||
|
||||
# Create a list of pairs. Each pair includes a configuration
|
||||
# option and whether or not that option is activated
|
||||
variant_desc, _ = self.variants[variant]
|
||||
if set(variant_desc.values) == set((True, False)):
|
||||
if set(self.variants[name].values) == set((True, False)):
|
||||
# BoolValuedVariant carry information about a single option.
|
||||
# Nonetheless, for uniformity of treatment we'll package them
|
||||
# in an iterable of one element.
|
||||
condition = '+{name}'.format(name=variant)
|
||||
condition = '+{name}'.format(name=name)
|
||||
options = [(name, condition in spec)]
|
||||
else:
|
||||
condition = '{variant}={value}'
|
||||
condition = '{name}={value}'
|
||||
# "feature_values" is used to track values which correspond to
|
||||
# features which can be enabled or disabled as understood by the
|
||||
# package's build system. It excludes values which have special
|
||||
# meanings and do not correspond to features (e.g. "none")
|
||||
feature_values = getattr(
|
||||
variant_desc.values, 'feature_values', None
|
||||
) or variant_desc.values
|
||||
self.variants[name].values, 'feature_values', None
|
||||
) or self.variants[name].values
|
||||
|
||||
options = [
|
||||
(value,
|
||||
condition.format(variant=variant,
|
||||
value=value) in spec)
|
||||
(value, condition.format(name=name, value=value) in spec)
|
||||
for value in feature_values
|
||||
]
|
||||
|
||||
@@ -569,7 +485,7 @@ def _default_generator(is_activated):
|
||||
args.append(line_generator(activated))
|
||||
return args
|
||||
|
||||
def with_or_without(self, name, activation_value=None, variant=None):
|
||||
def with_or_without(self, name, activation_value=None):
|
||||
"""Inspects a variant and returns the arguments that activate
|
||||
or deactivate the selected feature(s) for the configure options.
|
||||
|
||||
@@ -595,10 +511,9 @@ def with_or_without(self, name, activation_value=None, variant=None):
|
||||
Returns:
|
||||
list of arguments to configure
|
||||
"""
|
||||
return self._activate_or_not(name, 'with', 'without', activation_value,
|
||||
variant)
|
||||
return self._activate_or_not(name, 'with', 'without', activation_value)
|
||||
|
||||
def enable_or_disable(self, name, activation_value=None, variant=None):
|
||||
def enable_or_disable(self, name, activation_value=None):
|
||||
"""Same as
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without`
|
||||
but substitute ``with`` with ``enable`` and ``without`` with ``disable``.
|
||||
@@ -616,7 +531,7 @@ def enable_or_disable(self, name, activation_value=None, variant=None):
|
||||
list of arguments to configure
|
||||
"""
|
||||
return self._activate_or_not(
|
||||
name, 'enable', 'disable', activation_value, variant
|
||||
name, 'enable', 'disable', activation_value
|
||||
)
|
||||
|
||||
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
|
||||
@@ -646,6 +561,3 @@ def remove_libtool_archives(self):
|
||||
fs.mkdirp(os.path.dirname(self._removed_la_files_log))
|
||||
with open(self._removed_la_files_log, mode='w') as f:
|
||||
f.write('\n'.join(libtool_files))
|
||||
|
||||
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
|
||||
run_after('install')(PackageBase.apply_macos_rpath_fixups)
|
||||
|
@@ -254,9 +254,9 @@ def define_from_variant(self, cmake_var, variant=None):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
self.define_from_variant('SWR')]
|
||||
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
define_from_variant('SWR')]
|
||||
|
||||
will generate the following configuration options:
|
||||
|
||||
|
@@ -12,7 +12,7 @@ class CudaPackage(PackageBase):
|
||||
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
|
||||
and is meant to unify and facilitate its usage.
|
||||
|
||||
Maintainers: ax3l, Rombur, davidbeckingsale
|
||||
Maintainers: ax3l, Rombur
|
||||
"""
|
||||
|
||||
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
||||
@@ -87,45 +87,28 @@ def cuda_flags(arch_list):
|
||||
|
||||
# Linux x86_64 compiler conflicts from here:
|
||||
# https://gist.github.com/ax3l/9489132
|
||||
|
||||
# GCC
|
||||
# According to
|
||||
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
|
||||
# these conflicts are valid independently from the architecture
|
||||
|
||||
# minimum supported versions
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
|
||||
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
|
||||
|
||||
# maximum supported version
|
||||
# NOTE:
|
||||
# in order to not constrain future cuda version to old gcc versions,
|
||||
# it has been decided to use an upper bound for the latest version.
|
||||
# This implies that the last one in the list has to be updated at
|
||||
# each release of a new cuda minor version.
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4.0')
|
||||
conflicts('%gcc@12:', when='+cuda ^cuda@:11.5.0')
|
||||
conflicts('%clang@12:', when='+cuda ^cuda@:11.4.0')
|
||||
conflicts('%clang@13:', when='+cuda ^cuda@:11.5.0')
|
||||
|
||||
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
|
||||
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
|
||||
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2: target=x86_64:')
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
|
||||
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
|
||||
conflicts('%pgi@:17,20:',
|
||||
when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:17,21:',
|
||||
when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4:',
|
||||
when='+cuda ^cuda@8.0:9.0 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4.1:',
|
||||
when='+cuda ^cuda@9.1 target=x86_64:')
|
||||
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
|
||||
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
|
||||
@@ -149,6 +132,9 @@ def cuda_flags(arch_list):
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.3 target=ppc64le:')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
|
||||
@@ -159,7 +145,7 @@ def cuda_flags(arch_list):
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=ppc64le:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
@@ -184,7 +170,7 @@ def cuda_flags(arch_list):
|
||||
|
||||
# Darwin.
|
||||
# TODO: add missing conflicts for %apple-clang cuda@:10
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
|
||||
|
||||
# Make sure cuda_arch can not be used without +cuda
|
||||
for value in cuda_arch_values:
|
||||
|
@@ -116,9 +116,9 @@ class IntelPackage(PackageBase):
|
||||
# that satisfies self.spec will be used.
|
||||
version_years = {
|
||||
# intel-daal is versioned 2016 and later, no divining is needed
|
||||
'intel-ipp@9.0:9': 2016,
|
||||
'intel-mkl@11.3.0:11.3': 2016,
|
||||
'intel-mpi@5.1:5': 2016,
|
||||
'intel-ipp@9.0:9.99': 2016,
|
||||
'intel-mkl@11.3.0:11.3.999': 2016,
|
||||
'intel-mpi@5.1:5.99': 2016,
|
||||
}
|
||||
|
||||
# Below is the list of possible values for setting auto dispatch functions
|
||||
|
@@ -110,6 +110,3 @@ def installcheck(self):
|
||||
|
||||
# Check that self.prefix is there after installation
|
||||
run_after('install')(PackageBase.sanity_check_prefix)
|
||||
|
||||
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
|
||||
run_after('install')(PackageBase.apply_macos_rpath_fixups)
|
||||
|
@@ -127,10 +127,7 @@ def import_modules(self):
|
||||
list: list of strings of module names
|
||||
"""
|
||||
modules = []
|
||||
root = os.path.join(
|
||||
self.prefix,
|
||||
self.spec['python'].package.config_vars['python_lib']['true']['false'],
|
||||
)
|
||||
root = self.spec['python'].package.get_python_lib(prefix=self.prefix)
|
||||
|
||||
# Some Python libraries are packages: collections of modules
|
||||
# distributed in directories containing __init__.py files
|
||||
@@ -255,11 +252,12 @@ def install_args(self, spec, prefix):
|
||||
# Get all relative paths since we set the root to `prefix`
|
||||
# We query the python with which these will be used for the lib and inc
|
||||
# directories. This ensures we use `lib`/`lib64` as expected by python.
|
||||
pure_site_packages_dir = spec['python'].package.config_vars[
|
||||
'python_lib']['false']['false']
|
||||
plat_site_packages_dir = spec['python'].package.config_vars[
|
||||
'python_lib']['true']['false']
|
||||
inc_dir = spec['python'].package.config_vars['python_inc']['true']
|
||||
pure_site_packages_dir = spec['python'].package.get_python_lib(
|
||||
plat_specific=False, prefix='')
|
||||
plat_site_packages_dir = spec['python'].package.get_python_lib(
|
||||
plat_specific=True, prefix='')
|
||||
inc_dir = spec['python'].package.get_python_inc(
|
||||
plat_specific=True, prefix='')
|
||||
|
||||
args += ['--root=%s' % prefix,
|
||||
'--install-purelib=%s' % pure_site_packages_dir,
|
||||
@@ -393,15 +391,11 @@ def remove_files_from_view(self, view, merge_map):
|
||||
self.spec
|
||||
)
|
||||
)
|
||||
|
||||
to_remove = []
|
||||
for src, dst in merge_map.items():
|
||||
if ignore_namespace and namespace_init(dst):
|
||||
continue
|
||||
|
||||
if global_view or not path_contains_subdirectory(src, bin_dir):
|
||||
to_remove.append(dst)
|
||||
view.remove_file(src, dst)
|
||||
else:
|
||||
os.remove(dst)
|
||||
|
||||
view.remove_files(to_remove)
|
||||
|
@@ -18,9 +18,6 @@ class RubyPackage(PackageBase):
|
||||
#. :py:meth:`~.RubyPackage.build`
|
||||
#. :py:meth:`~.RubyPackage.install`
|
||||
"""
|
||||
|
||||
maintainers = ['Kerilk']
|
||||
|
||||
#: Phases of a Ruby package
|
||||
phases = ['build', 'install']
|
||||
|
||||
@@ -53,12 +50,8 @@ def install(self, spec, prefix):
|
||||
|
||||
gems = glob.glob('*.gem')
|
||||
if gems:
|
||||
# if --install-dir is not used, GEM_PATH is deleted from the
|
||||
# environement, and Gems required to build native extensions will
|
||||
# not be found. Those extensions are built during `gem install`.
|
||||
inspect.getmodule(self).gem(
|
||||
'install', '--norc', '--ignore-dependencies',
|
||||
'--install-dir', prefix, gems[0])
|
||||
'install', '--norc', '--ignore-dependencies', gems[0])
|
||||
|
||||
# Check that self.prefix is there after installation
|
||||
run_after('install')(PackageBase.sanity_check_prefix)
|
||||
|
@@ -64,10 +64,7 @@ def import_modules(self):
|
||||
list: list of strings of module names
|
||||
"""
|
||||
modules = []
|
||||
root = os.path.join(
|
||||
self.prefix,
|
||||
self.spec['python'].package.config_vars['python_lib']['true']['false'],
|
||||
)
|
||||
root = self.spec['python'].package.get_python_lib(prefix=self.prefix)
|
||||
|
||||
# Some Python libraries are packages: collections of modules
|
||||
# distributed in directories containing __init__.py files
|
||||
@@ -99,9 +96,7 @@ def configure(self, spec, prefix):
|
||||
|
||||
args = self.configure_args()
|
||||
|
||||
python_include_dir = os.path.basename(
|
||||
inspect.getmodule(self).python_include_dir
|
||||
)
|
||||
python_include_dir = 'python' + str(spec['python'].version.up_to(2))
|
||||
|
||||
args.extend([
|
||||
'--verbose',
|
||||
|
@@ -23,8 +23,11 @@ def misc_cache_location():
|
||||
Currently the ``misc_cache`` stores indexes for virtual dependency
|
||||
providers and for which packages provide which tags.
|
||||
"""
|
||||
path = spack.config.get('config:misc_cache', spack.paths.default_misc_cache_path)
|
||||
return spack.util.path.canonicalize_path(path)
|
||||
path = spack.config.get('config:misc_cache')
|
||||
if not path:
|
||||
path = os.path.join(spack.paths.user_config_path, 'cache')
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
return path
|
||||
|
||||
|
||||
def _misc_cache():
|
||||
@@ -44,7 +47,7 @@ def fetch_cache_location():
|
||||
"""
|
||||
path = spack.config.get('config:source_cache')
|
||||
if not path:
|
||||
path = spack.paths.default_fetch_cache_path
|
||||
path = os.path.join(spack.paths.var_path, "cache")
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
return path
|
||||
|
||||
|
@@ -45,8 +45,6 @@
|
||||
]
|
||||
|
||||
SPACK_PR_MIRRORS_ROOT_URL = 's3://spack-binaries-prs'
|
||||
SPACK_SHARED_PR_MIRROR_URL = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
|
||||
'shared_pr_mirror')
|
||||
TEMP_STORAGE_MIRROR_NAME = 'ci_temporary_mirror'
|
||||
|
||||
spack_gpg = spack.main.SpackCommand('gpg')
|
||||
@@ -396,6 +394,9 @@ def append_dep(s, d):
|
||||
})
|
||||
|
||||
for spec in spec_list:
|
||||
spec.concretize()
|
||||
|
||||
# root_spec = get_spec_string(spec)
|
||||
root_spec = spec
|
||||
|
||||
for s in spec.traverse(deptype=all):
|
||||
@@ -611,14 +612,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
'strip-compilers': False,
|
||||
})
|
||||
|
||||
# Add per-PR mirror (and shared PR mirror) if enabled, as some specs might
|
||||
# be up to date in one of those and thus not need to be rebuilt.
|
||||
# Add this mirror if it's enabled, as some specs might be up to date
|
||||
# here and thus not need to be rebuilt.
|
||||
if pr_mirror_url:
|
||||
spack.mirror.add(
|
||||
'ci_pr_mirror', pr_mirror_url, cfg.default_modify_scope())
|
||||
spack.mirror.add('ci_shared_pr_mirror',
|
||||
SPACK_SHARED_PR_MIRROR_URL,
|
||||
cfg.default_modify_scope())
|
||||
|
||||
pipeline_artifacts_dir = artifacts_root
|
||||
if not pipeline_artifacts_dir:
|
||||
@@ -665,35 +663,16 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
|
||||
# Speed up staging by first fetching binary indices from all mirrors
|
||||
# (including the per-PR mirror we may have just added above).
|
||||
try:
|
||||
bindist.binary_index.update()
|
||||
except bindist.FetchCacheError as e:
|
||||
tty.error(e)
|
||||
bindist.binary_index.update()
|
||||
|
||||
staged_phases = {}
|
||||
try:
|
||||
for phase in phases:
|
||||
phase_name = phase['name']
|
||||
if phase_name == 'specs':
|
||||
# Anything in the "specs" of the environment are already
|
||||
# concretized by the block at the top of this method, so we
|
||||
# only need to find the concrete versions, and then avoid
|
||||
# re-concretizing them needlessly later on.
|
||||
concrete_phase_specs = [
|
||||
concrete for abstract, concrete in env.concretized_specs()
|
||||
if abstract in env.spec_lists[phase_name]
|
||||
]
|
||||
else:
|
||||
# Any specs lists in other definitions (but not in the
|
||||
# "specs") of the environment are not yet concretized so we
|
||||
# have to concretize them explicitly here.
|
||||
concrete_phase_specs = env.spec_lists[phase_name]
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
for phase_spec in concrete_phase_specs:
|
||||
phase_spec.concretize()
|
||||
staged_phases[phase_name] = stage_spec_jobs(
|
||||
concrete_phase_specs,
|
||||
check_index_only=check_index_only)
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
staged_phases[phase_name] = stage_spec_jobs(
|
||||
env.spec_lists[phase_name],
|
||||
check_index_only=check_index_only)
|
||||
finally:
|
||||
# Clean up PR mirror if enabled
|
||||
if pr_mirror_url:
|
||||
@@ -709,17 +688,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
max_length_needs = 0
|
||||
max_needs_job = ''
|
||||
|
||||
# If this is configured, spack will fail "spack ci generate" if it
|
||||
# generates any full hash which exists under the broken specs url.
|
||||
broken_spec_urls = None
|
||||
if broken_specs_url:
|
||||
if broken_specs_url.startswith('http'):
|
||||
# To make checking each spec against the list faster, we require
|
||||
# a url protocol that allows us to iterate the url in advance.
|
||||
tty.msg('Cannot use an http(s) url for broken specs, ignoring')
|
||||
else:
|
||||
broken_spec_urls = web_util.list_url(broken_specs_url)
|
||||
|
||||
before_script, after_script = None, None
|
||||
for phase in phases:
|
||||
phase_name = phase['name']
|
||||
@@ -903,13 +871,16 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
tty.debug(debug_msg)
|
||||
|
||||
if prune_dag and not rebuild_spec:
|
||||
tty.debug('Pruning spec that does not need to be rebuilt.')
|
||||
continue
|
||||
|
||||
if (broken_spec_urls is not None and
|
||||
release_spec_full_hash in broken_spec_urls):
|
||||
known_broken_specs_encountered.append('{0} ({1})'.format(
|
||||
release_spec, release_spec_full_hash))
|
||||
# Check if this spec is in our list of known failures, now that
|
||||
# we know this spec needs a rebuild
|
||||
if broken_specs_url:
|
||||
broken_spec_path = url_util.join(
|
||||
broken_specs_url, release_spec_full_hash)
|
||||
if web_util.url_exists(broken_spec_path):
|
||||
known_broken_specs_encountered.append('{0} ({1})'.format(
|
||||
release_spec, release_spec_full_hash))
|
||||
|
||||
if artifacts_root:
|
||||
job_dependencies.append({
|
||||
@@ -946,7 +917,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
bc_root = os.path.join(
|
||||
local_mirror_dir, 'build_cache')
|
||||
artifact_paths.extend([os.path.join(bc_root, p) for p in [
|
||||
bindist.tarball_name(release_spec, '.spec.json'),
|
||||
bindist.tarball_name(release_spec, '.spec.yaml'),
|
||||
bindist.tarball_name(release_spec, '.cdashid'),
|
||||
bindist.tarball_directory_name(release_spec),
|
||||
]])
|
||||
@@ -1027,14 +998,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
'after_script',
|
||||
]
|
||||
|
||||
service_job_retries = {
|
||||
'max': 2,
|
||||
'when': [
|
||||
'runner_system_failure',
|
||||
'stuck_or_timeout_failure'
|
||||
]
|
||||
}
|
||||
|
||||
if job_id > 0:
|
||||
if temp_storage_url_prefix:
|
||||
# There were some rebuild jobs scheduled, so we will need to
|
||||
@@ -1054,7 +1017,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
temp_storage_url_prefix)
|
||||
]
|
||||
cleanup_job['when'] = 'always'
|
||||
cleanup_job['retry'] = service_job_retries
|
||||
|
||||
output_object['cleanup'] = cleanup_job
|
||||
|
||||
@@ -1078,7 +1040,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
index_target_mirror)
|
||||
]
|
||||
final_job['when'] = 'always'
|
||||
final_job['retry'] = service_job_retries
|
||||
|
||||
if artifacts_root:
|
||||
final_job['variables'] = {
|
||||
'SPACK_CONCRETE_ENV_DIR': concrete_env_dir
|
||||
}
|
||||
|
||||
output_object['rebuild-index'] = final_job
|
||||
|
||||
@@ -1142,8 +1108,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
'echo "All specs already up to date, nothing to rebuild."',
|
||||
]
|
||||
|
||||
noop_job['retry'] = service_job_retries
|
||||
|
||||
sorted_output = {'no-specs-to-rebuild': noop_job}
|
||||
|
||||
if known_broken_specs_encountered:
|
||||
@@ -1412,13 +1376,13 @@ def read_cdashid_from_mirror(spec, mirror_url):
|
||||
return int(contents)
|
||||
|
||||
|
||||
def push_mirror_contents(env, spec, specfile_path, mirror_url, sign_binaries):
|
||||
def push_mirror_contents(env, spec, yaml_path, mirror_url, sign_binaries):
|
||||
try:
|
||||
unsigned = not sign_binaries
|
||||
tty.debug('Creating buildcache ({0})'.format(
|
||||
'unsigned' if unsigned else 'signed'))
|
||||
spack.cmd.buildcache._createtarball(
|
||||
env, spec_file=specfile_path, add_deps=False,
|
||||
env, spec_yaml=yaml_path, add_deps=False,
|
||||
output_location=mirror_url, force=True, allow_root=True,
|
||||
unsigned=unsigned)
|
||||
except Exception as inst:
|
||||
|
@@ -21,13 +21,11 @@
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.extensions
|
||||
import spack.paths
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.user_environment as uenv
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.string
|
||||
|
||||
@@ -154,7 +152,6 @@ def parse_specs(args, **kwargs):
|
||||
concretize = kwargs.get('concretize', False)
|
||||
normalize = kwargs.get('normalize', False)
|
||||
tests = kwargs.get('tests', False)
|
||||
reuse = kwargs.get('reuse', False)
|
||||
|
||||
try:
|
||||
sargs = args
|
||||
@@ -163,7 +160,7 @@ def parse_specs(args, **kwargs):
|
||||
specs = spack.spec.parse(sargs)
|
||||
for spec in specs:
|
||||
if concretize:
|
||||
spec.concretize(tests=tests, reuse=reuse) # implies normalize
|
||||
spec.concretize(tests=tests) # implies normalize
|
||||
elif normalize:
|
||||
spec.normalize(tests=tests)
|
||||
|
||||
@@ -189,13 +186,29 @@ def matching_spec_from_env(spec):
|
||||
If no matching spec is found in the environment (or if no environment is
|
||||
active), this will return the given spec but concretized.
|
||||
"""
|
||||
env = ev.active_environment()
|
||||
env = spack.environment.get_env({}, cmd_name)
|
||||
if env:
|
||||
return env.matching_spec(spec) or spec.concretized()
|
||||
else:
|
||||
return spec.concretized()
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
elide_list([1,2,3,4,5,6], 4)
|
||||
|
||||
gives::
|
||||
|
||||
[1, 2, 3, '...', 6]
|
||||
"""
|
||||
if len(line_list) > max_num:
|
||||
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
|
||||
else:
|
||||
return line_list
|
||||
|
||||
|
||||
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
|
||||
"""Given a spec, figure out which installed package it refers to.
|
||||
|
||||
@@ -261,19 +274,17 @@ def display_specs_as_json(specs, deps=False):
|
||||
seen = set()
|
||||
records = []
|
||||
for spec in specs:
|
||||
dag_hash = spec.dag_hash()
|
||||
if dag_hash in seen:
|
||||
if spec.dag_hash() in seen:
|
||||
continue
|
||||
records.append(spec.node_dict_with_hashes())
|
||||
seen.add(dag_hash)
|
||||
seen.add(spec.dag_hash())
|
||||
records.append(spec.to_record_dict())
|
||||
|
||||
if deps:
|
||||
for dep in spec.traverse():
|
||||
dep_dag_hash = dep.dag_hash()
|
||||
if dep_dag_hash in seen:
|
||||
if dep.dag_hash() in seen:
|
||||
continue
|
||||
records.append(dep.node_dict_with_hashes())
|
||||
seen.add(dep_dag_hash)
|
||||
seen.add(dep.dag_hash())
|
||||
records.append(dep.to_record_dict())
|
||||
|
||||
sjson.dump(records, sys.stdout)
|
||||
|
||||
@@ -443,28 +454,6 @@ def format_list(specs):
|
||||
output.flush()
|
||||
|
||||
|
||||
def filter_loaded_specs(specs):
|
||||
"""Filter a list of specs returning only those that are
|
||||
currently loaded."""
|
||||
hashes = os.environ.get(uenv.spack_loaded_hashes_var, '').split(':')
|
||||
return [x for x in specs if x.dag_hash() in hashes]
|
||||
|
||||
|
||||
def print_how_many_pkgs(specs, pkg_type=""):
|
||||
"""Given a list of specs, this will print a message about how many
|
||||
specs are in that list.
|
||||
|
||||
Args:
|
||||
specs (list): depending on how many items are in this list, choose
|
||||
the plural or singular form of the word "package"
|
||||
pkg_type (str): the output string will mention this provided
|
||||
category, e.g. if pkg_type is "installed" then the message
|
||||
would be "3 installed packages"
|
||||
"""
|
||||
tty.msg("%s" % spack.util.string.plural(
|
||||
len(specs), pkg_type + " package"))
|
||||
|
||||
|
||||
def spack_is_git_repo():
|
||||
"""Ensure that this instance of Spack is a git clone."""
|
||||
return is_git_repo(spack.paths.prefix)
|
||||
@@ -512,71 +501,3 @@ def extant_file(f):
|
||||
if not os.path.isfile(f):
|
||||
raise argparse.ArgumentTypeError('%s does not exist' % f)
|
||||
return f
|
||||
|
||||
|
||||
def require_active_env(cmd_name):
|
||||
"""Used by commands to get the active environment
|
||||
|
||||
If an environment is not found, print an error message that says the calling
|
||||
command *needs* an active environment.
|
||||
|
||||
Arguments:
|
||||
cmd_name (str): name of calling command
|
||||
|
||||
Returns:
|
||||
(spack.environment.Environment): the active environment
|
||||
"""
|
||||
env = ev.active_environment()
|
||||
|
||||
if env:
|
||||
return env
|
||||
else:
|
||||
tty.die(
|
||||
'`spack %s` requires an environment' % cmd_name,
|
||||
'activate an environment first:',
|
||||
' spack env activate ENV',
|
||||
'or use:',
|
||||
' spack -e ENV %s ...' % cmd_name)
|
||||
|
||||
|
||||
def find_environment(args):
|
||||
"""Find active environment from args or environment variable.
|
||||
|
||||
Check for an environment in this order:
|
||||
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
|
||||
2. via a path in the spack.environment.spack_env_var environment variable.
|
||||
|
||||
If an environment is found, read it in. If not, return None.
|
||||
|
||||
Arguments:
|
||||
args (argparse.Namespace): argparse namespace with command arguments
|
||||
|
||||
Returns:
|
||||
(spack.environment.Environment): a found environment, or ``None``
|
||||
"""
|
||||
|
||||
# treat env as a name
|
||||
env = args.env
|
||||
if env:
|
||||
if ev.exists(env):
|
||||
return ev.read(env)
|
||||
|
||||
else:
|
||||
# if env was specified, see if it is a directory otherwise, look
|
||||
# at env_dir (env and env_dir are mutually exclusive)
|
||||
env = args.env_dir
|
||||
|
||||
# if no argument, look for the environment variable
|
||||
if not env:
|
||||
env = os.environ.get(ev.spack_env_var)
|
||||
|
||||
# nothing was set; there's no active environment
|
||||
if not env:
|
||||
return None
|
||||
|
||||
# if we get here, env isn't the name of a spack environment; it has
|
||||
# to be a path to an environment, or there is something wrong.
|
||||
if ev.is_env_dir(env):
|
||||
return ev.Environment(env)
|
||||
|
||||
raise ev.SpackEnvironmentError('no environment in %s' % env)
|
||||
|
@@ -30,7 +30,8 @@ def activate(parser, args):
|
||||
if len(specs) != 1:
|
||||
tty.die("activate requires one spec. %d given." % len(specs))
|
||||
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], ev.active_environment())
|
||||
env = ev.get_env(args, 'activate')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
if not spec.package.is_extension:
|
||||
tty.die("%s is not an extension." % spec.name)
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'add a spec to an environment'
|
||||
section = "environments"
|
||||
@@ -21,7 +22,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def add(parser, args):
|
||||
env = spack.cmd.require_active_env(cmd_name='add')
|
||||
env = ev.get_env(args, 'add', required=True)
|
||||
|
||||
with env.write_transaction():
|
||||
for spec in spack.cmd.parse_specs(args.specs):
|
||||
|
@@ -95,7 +95,7 @@ def analyze(parser, args, **kwargs):
|
||||
sys.exit(0)
|
||||
|
||||
# handle active environment, if any
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'analyze')
|
||||
|
||||
# Get an disambiguate spec (we should only have one)
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
|
@@ -12,7 +12,7 @@
|
||||
import llnl.util.tty.colify as colify
|
||||
import llnl.util.tty.color as color
|
||||
|
||||
import spack.platforms
|
||||
import spack.architecture as architecture
|
||||
|
||||
description = "print architecture information about this machine"
|
||||
section = "system"
|
||||
@@ -20,10 +20,6 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-g', '--generic-target', action='store_true',
|
||||
help='show the best generic target'
|
||||
)
|
||||
subparser.add_argument(
|
||||
'--known-targets', action='store_true',
|
||||
help='show a list of all known targets and exit'
|
||||
@@ -78,32 +74,25 @@ def display_target_group(header, target_group):
|
||||
|
||||
|
||||
def arch(parser, args):
|
||||
if args.generic_target:
|
||||
print(archspec.cpu.host().generic)
|
||||
return
|
||||
|
||||
if args.known_targets:
|
||||
display_targets(archspec.cpu.TARGETS)
|
||||
return
|
||||
|
||||
os_args, target_args = 'default_os', 'default_target'
|
||||
if args.frontend:
|
||||
os_args, target_args = 'frontend', 'frontend'
|
||||
arch = architecture.Arch(architecture.platform(),
|
||||
'frontend', 'frontend')
|
||||
elif args.backend:
|
||||
os_args, target_args = 'backend', 'backend'
|
||||
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system(os_args)
|
||||
host_target = host_platform.target(target_args)
|
||||
architecture = spack.spec.ArchSpec(
|
||||
(str(host_platform), str(host_os), str(host_target))
|
||||
)
|
||||
arch = architecture.Arch(architecture.platform(),
|
||||
'backend', 'backend')
|
||||
else:
|
||||
arch = architecture.Arch(architecture.platform(),
|
||||
'default_os', 'default_target')
|
||||
|
||||
if args.platform:
|
||||
print(architecture.platform)
|
||||
print(arch.platform)
|
||||
elif args.operating_system:
|
||||
print(architecture.os)
|
||||
print(arch.os)
|
||||
elif args.target:
|
||||
print(architecture.target)
|
||||
print(arch.target)
|
||||
else:
|
||||
print(architecture)
|
||||
print(arch)
|
||||
|
@@ -2,7 +2,6 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.color as cl
|
||||
|
||||
import spack.audit
|
||||
@@ -20,24 +19,12 @@ def setup_parser(subparser):
|
||||
# Audit configuration files
|
||||
sp.add_parser('configs', help='audit configuration files')
|
||||
|
||||
# Https and other linting
|
||||
https_parser = sp.add_parser('packages-https', help='check https in packages')
|
||||
https_parser.add_argument(
|
||||
'--all',
|
||||
action='store_true',
|
||||
default=False,
|
||||
dest='check_all',
|
||||
help="audit all packages"
|
||||
)
|
||||
|
||||
# Audit package recipes
|
||||
pkg_parser = sp.add_parser('packages', help='audit package recipes')
|
||||
|
||||
for group in [pkg_parser, https_parser]:
|
||||
group.add_argument(
|
||||
'name', metavar='PKG', nargs='*',
|
||||
help='package to be analyzed (if none all packages will be processed)',
|
||||
)
|
||||
pkg_parser.add_argument(
|
||||
'name', metavar='PKG', nargs='*',
|
||||
help='package to be analyzed (if none all packages will be processed)',
|
||||
)
|
||||
|
||||
# List all checks
|
||||
sp.add_parser('list', help='list available checks and exits')
|
||||
@@ -54,17 +41,6 @@ def packages(parser, args):
|
||||
_process_reports(reports)
|
||||
|
||||
|
||||
def packages_https(parser, args):
|
||||
|
||||
# Since packages takes a long time, --all is required without name
|
||||
if not args.check_all and not args.name:
|
||||
tty.die("Please specify one or more packages to audit, or --all.")
|
||||
|
||||
pkgs = args.name or spack.repo.path.all_package_names()
|
||||
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
|
||||
_process_reports(reports)
|
||||
|
||||
|
||||
def list(parser, args):
|
||||
for subcommand, check_tags in spack.audit.GROUPS.items():
|
||||
print(cl.colorize('@*b{' + subcommand + '}:'))
|
||||
@@ -82,7 +58,6 @@ def audit(parser, args):
|
||||
subcommands = {
|
||||
'configs': configs,
|
||||
'packages': packages,
|
||||
'packages-https': packages_https,
|
||||
'list': list
|
||||
}
|
||||
subcommands[args.subcommand](parser, args)
|
||||
|
@@ -2,13 +2,10 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from __future__ import print_function
|
||||
|
||||
import os.path
|
||||
import shutil
|
||||
|
||||
import llnl.util.tty
|
||||
import llnl.util.tty.color
|
||||
|
||||
import spack.cmd.common.arguments
|
||||
import spack.config
|
||||
@@ -54,27 +51,6 @@ def setup_parser(subparser):
|
||||
help='set the bootstrap directory to this value'
|
||||
)
|
||||
|
||||
list = sp.add_parser(
|
||||
'list', help='list the methods available for bootstrapping'
|
||||
)
|
||||
_add_scope_option(list)
|
||||
|
||||
trust = sp.add_parser(
|
||||
'trust', help='trust a bootstrapping method'
|
||||
)
|
||||
_add_scope_option(trust)
|
||||
trust.add_argument(
|
||||
'name', help='name of the method to be trusted'
|
||||
)
|
||||
|
||||
untrust = sp.add_parser(
|
||||
'untrust', help='untrust a bootstrapping method'
|
||||
)
|
||||
_add_scope_option(untrust)
|
||||
untrust.add_argument(
|
||||
'name', help='name of the method to be untrusted'
|
||||
)
|
||||
|
||||
|
||||
def _enable_or_disable(args):
|
||||
# Set to True if we called "enable", otherwise set to false
|
||||
@@ -124,97 +100,11 @@ def _root(args):
|
||||
print(root)
|
||||
|
||||
|
||||
def _list(args):
|
||||
sources = spack.config.get(
|
||||
'bootstrap:sources', default=None, scope=args.scope
|
||||
)
|
||||
|
||||
if not sources:
|
||||
llnl.util.tty.msg(
|
||||
"No method available for bootstrapping Spack's dependencies"
|
||||
)
|
||||
return
|
||||
|
||||
def _print_method(source, trusted):
|
||||
color = llnl.util.tty.color
|
||||
|
||||
def fmt(header, content):
|
||||
header_fmt = "@*b{{{0}:}} {1}"
|
||||
color.cprint(header_fmt.format(header, content))
|
||||
|
||||
trust_str = "@*y{UNKNOWN}"
|
||||
if trusted is True:
|
||||
trust_str = "@*g{TRUSTED}"
|
||||
elif trusted is False:
|
||||
trust_str = "@*r{UNTRUSTED}"
|
||||
|
||||
fmt("Name", source['name'] + ' ' + trust_str)
|
||||
print()
|
||||
fmt(" Type", source['type'])
|
||||
print()
|
||||
|
||||
info_lines = ['\n']
|
||||
for key, value in source.get('info', {}).items():
|
||||
info_lines.append(' ' * 4 + '@*{{{0}}}: {1}\n'.format(key, value))
|
||||
if len(info_lines) > 1:
|
||||
fmt(" Info", ''.join(info_lines))
|
||||
|
||||
description_lines = ['\n']
|
||||
for line in source['description'].split('\n'):
|
||||
description_lines.append(' ' * 4 + line + '\n')
|
||||
|
||||
fmt(" Description", ''.join(description_lines))
|
||||
|
||||
trusted = spack.config.get('bootstrap:trusted', {})
|
||||
for s in sources:
|
||||
_print_method(s, trusted.get(s['name'], None))
|
||||
|
||||
|
||||
def _write_trust_state(args, value):
|
||||
name = args.name
|
||||
sources = spack.config.get('bootstrap:sources')
|
||||
|
||||
matches = [s for s in sources if s['name'] == name]
|
||||
if not matches:
|
||||
names = [s['name'] for s in sources]
|
||||
msg = ('there is no bootstrapping method named "{0}". Valid '
|
||||
'method names are: {1}'.format(name, ', '.join(names)))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
if len(matches) > 1:
|
||||
msg = ('there is more than one bootstrapping method named "{0}". '
|
||||
'Please delete all methods but one from bootstrap.yaml '
|
||||
'before proceeding').format(name)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Setting the scope explicitly is needed to not copy over to a new scope
|
||||
# the entire default configuration for bootstrap.yaml
|
||||
scope = args.scope or spack.config.default_modify_scope('bootstrap')
|
||||
spack.config.add(
|
||||
'bootstrap:trusted:{0}:{1}'.format(name, str(value)), scope=scope
|
||||
)
|
||||
|
||||
|
||||
def _trust(args):
|
||||
_write_trust_state(args, value=True)
|
||||
msg = '"{0}" is now trusted for bootstrapping'
|
||||
llnl.util.tty.msg(msg.format(args.name))
|
||||
|
||||
|
||||
def _untrust(args):
|
||||
_write_trust_state(args, value=False)
|
||||
msg = '"{0}" is now untrusted and will not be used for bootstrapping'
|
||||
llnl.util.tty.msg(msg.format(args.name))
|
||||
|
||||
|
||||
def bootstrap(parser, args):
|
||||
callbacks = {
|
||||
'enable': _enable_or_disable,
|
||||
'disable': _enable_or_disable,
|
||||
'reset': _reset,
|
||||
'root': _root,
|
||||
'list': _list,
|
||||
'trust': _trust,
|
||||
'untrust': _untrust
|
||||
'root': _root
|
||||
}
|
||||
callbacks[args.subcommand](args)
|
||||
|
@@ -2,33 +2,29 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import argparse
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.architecture
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.mirror
|
||||
import spack.relocate
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.crypto
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web as web_util
|
||||
from spack.cmd import display_specs
|
||||
from spack.error import SpecError
|
||||
from spack.spec import Spec, save_dependency_specfiles
|
||||
from spack.stage import Stage
|
||||
from spack.spec import Spec, save_dependency_spec_yamls
|
||||
from spack.util.string import plural
|
||||
|
||||
description = "create, download and install binary packages"
|
||||
@@ -74,9 +70,8 @@ def setup_parser(subparser):
|
||||
create.add_argument('--rebuild-index', action='store_true',
|
||||
default=False, help="Regenerate buildcache index " +
|
||||
"after building package(s)")
|
||||
create.add_argument('--spec-file', default=None,
|
||||
help=('Create buildcache entry for spec from json or ' +
|
||||
'yaml file'))
|
||||
create.add_argument('-y', '--spec-yaml', default=None,
|
||||
help='Create buildcache entry for spec from yaml file')
|
||||
create.add_argument('--only', default='package,dependencies',
|
||||
dest='things_to_install',
|
||||
choices=['package', 'dependencies'],
|
||||
@@ -102,11 +97,6 @@ def setup_parser(subparser):
|
||||
install.add_argument('-o', '--otherarch', action='store_true',
|
||||
help="install specs from other architectures" +
|
||||
" instead of default platform and OS")
|
||||
# This argument is needed by the bootstrapping logic to verify checksums
|
||||
install.add_argument('--sha256', help=argparse.SUPPRESS)
|
||||
install.add_argument(
|
||||
'--only-root', action='store_true', help=argparse.SUPPRESS
|
||||
)
|
||||
|
||||
arguments.add_common_arguments(install, ['specs'])
|
||||
install.set_defaults(func=installtarball)
|
||||
@@ -166,9 +156,8 @@ def setup_parser(subparser):
|
||||
help='Check single spec instead of release specs file')
|
||||
|
||||
check.add_argument(
|
||||
'--spec-file', default=None,
|
||||
help=('Check single spec from json or yaml file instead of release ' +
|
||||
'specs file'))
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help='Check single spec from yaml file instead of release specs file')
|
||||
|
||||
check.add_argument(
|
||||
'--rebuild-on-error', default=False, action='store_true',
|
||||
@@ -177,15 +166,14 @@ def setup_parser(subparser):
|
||||
|
||||
check.set_defaults(func=check_binaries)
|
||||
|
||||
# Download tarball and specfile
|
||||
# Download tarball and spec.yaml
|
||||
dltarball = subparsers.add_parser('download', help=get_tarball.__doc__)
|
||||
dltarball.add_argument(
|
||||
'-s', '--spec', default=None,
|
||||
help="Download built tarball for spec from mirror")
|
||||
dltarball.add_argument(
|
||||
'--spec-file', default=None,
|
||||
help=("Download built tarball for spec (from json or yaml file) " +
|
||||
"from mirror"))
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help="Download built tarball for spec (from yaml file) from mirror")
|
||||
dltarball.add_argument(
|
||||
'-p', '--path', default=None,
|
||||
help="Path to directory where tarball should be downloaded")
|
||||
@@ -201,27 +189,26 @@ def setup_parser(subparser):
|
||||
'-s', '--spec', default=None,
|
||||
help='Spec string for which buildcache name is desired')
|
||||
getbuildcachename.add_argument(
|
||||
'--spec-file', default=None,
|
||||
help=('Path to spec json or yaml file for which buildcache name is ' +
|
||||
'desired'))
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help='Path to spec yaml file for which buildcache name is desired')
|
||||
getbuildcachename.set_defaults(func=get_buildcache_name)
|
||||
|
||||
# Given the root spec, save the yaml of the dependent spec to a file
|
||||
savespecfile = subparsers.add_parser('save-specfile',
|
||||
help=save_specfiles.__doc__)
|
||||
savespecfile.add_argument(
|
||||
saveyaml = subparsers.add_parser('save-yaml',
|
||||
help=save_spec_yamls.__doc__)
|
||||
saveyaml.add_argument(
|
||||
'--root-spec', default=None,
|
||||
help='Root spec of dependent spec')
|
||||
savespecfile.add_argument(
|
||||
'--root-specfile', default=None,
|
||||
help='Path to json or yaml file containing root spec of dependent spec')
|
||||
savespecfile.add_argument(
|
||||
saveyaml.add_argument(
|
||||
'--root-spec-yaml', default=None,
|
||||
help='Path to yaml file containing root spec of dependent spec')
|
||||
saveyaml.add_argument(
|
||||
'-s', '--specs', default=None,
|
||||
help='List of dependent specs for which saved yaml is desired')
|
||||
savespecfile.add_argument(
|
||||
'--specfile-dir', default=None,
|
||||
saveyaml.add_argument(
|
||||
'-y', '--yaml-dir', default=None,
|
||||
help='Path to directory where spec yamls should be saved')
|
||||
savespecfile.set_defaults(func=save_specfiles)
|
||||
saveyaml.set_defaults(func=save_spec_yamls)
|
||||
|
||||
# Copy buildcache from some directory to another mirror url
|
||||
copy = subparsers.add_parser('copy', help=buildcache_copy.__doc__)
|
||||
@@ -229,44 +216,13 @@ def setup_parser(subparser):
|
||||
'--base-dir', default=None,
|
||||
help='Path to mirror directory (root of existing buildcache)')
|
||||
copy.add_argument(
|
||||
'--spec-file', default=None,
|
||||
help=('Path to spec json or yaml file representing buildcache entry to' +
|
||||
' copy'))
|
||||
'--spec-yaml', default=None,
|
||||
help='Path to spec yaml file representing buildcache entry to copy')
|
||||
copy.add_argument(
|
||||
'--destination-url', default=None,
|
||||
help='Destination mirror url')
|
||||
copy.set_defaults(func=buildcache_copy)
|
||||
|
||||
# Sync buildcache entries from one mirror to another
|
||||
sync = subparsers.add_parser('sync', help=buildcache_sync.__doc__)
|
||||
source = sync.add_mutually_exclusive_group(required=True)
|
||||
source.add_argument('--src-directory',
|
||||
metavar='DIRECTORY',
|
||||
type=str,
|
||||
help="Source mirror as a local file path")
|
||||
source.add_argument('--src-mirror-name',
|
||||
metavar='MIRROR_NAME',
|
||||
type=str,
|
||||
help="Name of the source mirror")
|
||||
source.add_argument('--src-mirror-url',
|
||||
metavar='MIRROR_URL',
|
||||
type=str,
|
||||
help="URL of the source mirror")
|
||||
dest = sync.add_mutually_exclusive_group(required=True)
|
||||
dest.add_argument('--dest-directory',
|
||||
metavar='DIRECTORY',
|
||||
type=str,
|
||||
help="Destination mirror as a local file path")
|
||||
dest.add_argument('--dest-mirror-name',
|
||||
metavar='MIRROR_NAME',
|
||||
type=str,
|
||||
help="Name of the destination mirror")
|
||||
dest.add_argument('--dest-mirror-url',
|
||||
metavar='MIRROR_URL',
|
||||
type=str,
|
||||
help="URL of the destination mirror")
|
||||
sync.set_defaults(func=buildcache_sync)
|
||||
|
||||
# Update buildcache index without copying any additional packages
|
||||
update_index = subparsers.add_parser(
|
||||
'update-index', help=buildcache_update_index.__doc__)
|
||||
@@ -337,13 +293,9 @@ def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
|
||||
specs_from_cli = []
|
||||
has_errors = False
|
||||
|
||||
try:
|
||||
specs = bindist.update_cache_and_get_specs()
|
||||
except bindist.FetchCacheError as e:
|
||||
tty.error(e)
|
||||
|
||||
specs = bindist.update_cache_and_get_specs()
|
||||
if not other_arch:
|
||||
arch = spack.spec.Spec.default_arch()
|
||||
arch = spack.architecture.default_arch().to_spec()
|
||||
specs = [s for s in specs if s.satisfies(arch)]
|
||||
|
||||
for pkg in pkgs:
|
||||
@@ -377,19 +329,16 @@ def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
|
||||
return specs_from_cli
|
||||
|
||||
|
||||
def _createtarball(env, spec_file=None, packages=None, add_spec=True,
|
||||
def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
|
||||
add_deps=True, output_location=os.getcwd(),
|
||||
signing_key=None, force=False, make_relative=False,
|
||||
unsigned=False, allow_root=False, rebuild_index=False):
|
||||
if spec_file:
|
||||
with open(spec_file, 'r') as fd:
|
||||
specfile_contents = fd.read()
|
||||
tty.debug('createtarball read specfile contents:')
|
||||
tty.debug(specfile_contents)
|
||||
if spec_file.endswith('.json'):
|
||||
s = Spec.from_json(specfile_contents)
|
||||
else:
|
||||
s = Spec.from_yaml(specfile_contents)
|
||||
if spec_yaml:
|
||||
with open(spec_yaml, 'r') as fd:
|
||||
yaml_text = fd.read()
|
||||
tty.debug('createtarball read spec yaml:')
|
||||
tty.debug(yaml_text)
|
||||
s = Spec.from_yaml(yaml_text)
|
||||
package = '/{0}'.format(s.dag_hash())
|
||||
matches = find_matching_specs(package, env=env)
|
||||
|
||||
@@ -402,7 +351,7 @@ def _createtarball(env, spec_file=None, packages=None, add_spec=True,
|
||||
else:
|
||||
tty.die("build cache file creation requires at least one" +
|
||||
" installed package spec, an active environment," +
|
||||
" or else a path to a json or yaml file containing a spec" +
|
||||
" or else a path to a yaml file containing a spec" +
|
||||
" to install")
|
||||
specs = set()
|
||||
|
||||
@@ -471,7 +420,7 @@ def createtarball(args):
|
||||
"""create a binary package from an existing install"""
|
||||
|
||||
# restrict matching to current environment if one is active
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'buildcache create')
|
||||
|
||||
output_location = None
|
||||
if args.directory:
|
||||
@@ -511,7 +460,7 @@ def createtarball(args):
|
||||
add_spec = ('package' in args.things_to_install)
|
||||
add_deps = ('dependencies' in args.things_to_install)
|
||||
|
||||
_createtarball(env, spec_file=args.spec_file, packages=args.specs,
|
||||
_createtarball(env, spec_yaml=args.spec_yaml, packages=args.specs,
|
||||
add_spec=add_spec, add_deps=add_deps,
|
||||
output_location=output_location, signing_key=args.key,
|
||||
force=args.force, make_relative=args.rel,
|
||||
@@ -537,29 +486,15 @@ def install_tarball(spec, args):
|
||||
if s.external or s.virtual:
|
||||
tty.warn("Skipping external or virtual package %s" % spec.format())
|
||||
return
|
||||
|
||||
# This argument is used only for bootstrapping specs without signatures,
|
||||
# since we need to check the sha256 of each tarball
|
||||
if not args.only_root:
|
||||
for d in s.dependencies(deptype=('link', 'run')):
|
||||
tty.msg("Installing buildcache for dependency spec %s" % d)
|
||||
install_tarball(d, args)
|
||||
|
||||
for d in s.dependencies(deptype=('link', 'run')):
|
||||
tty.msg("Installing buildcache for dependency spec %s" % d)
|
||||
install_tarball(d, args)
|
||||
package = spack.repo.get(spec)
|
||||
if s.concrete and package.installed and not args.force:
|
||||
tty.warn("Package for spec %s already installed." % spec.format())
|
||||
else:
|
||||
tarball = bindist.download_tarball(spec)
|
||||
if tarball:
|
||||
if args.sha256:
|
||||
checker = spack.util.crypto.Checker(args.sha256)
|
||||
msg = ('cannot verify checksum for "{0}"'
|
||||
' [expected={1}]')
|
||||
msg = msg.format(tarball, args.sha256)
|
||||
if not checker.check(tarball):
|
||||
raise spack.binary_distribution.NoChecksumException(msg)
|
||||
tty.debug('Verified SHA256 checksum of the build cache')
|
||||
|
||||
tty.msg('Installing buildcache for spec %s' % spec.format())
|
||||
bindist.extract_tarball(spec, tarball, args.allow_root,
|
||||
args.unsigned, args.force)
|
||||
@@ -572,13 +507,9 @@ def install_tarball(spec, args):
|
||||
|
||||
def listspecs(args):
|
||||
"""list binary packages available from mirrors"""
|
||||
try:
|
||||
specs = bindist.update_cache_and_get_specs()
|
||||
except bindist.FetchCacheError as e:
|
||||
tty.error(e)
|
||||
|
||||
specs = bindist.update_cache_and_get_specs()
|
||||
if not args.allarch:
|
||||
arch = spack.spec.Spec.default_arch()
|
||||
arch = spack.architecture.default_arch().to_spec()
|
||||
specs = [s for s in specs if s.satisfies(arch)]
|
||||
|
||||
if args.specs:
|
||||
@@ -621,10 +552,10 @@ def check_binaries(args):
|
||||
its result, specifically, if the exit code is non-zero, then at least
|
||||
one of the indicated specs needs to be rebuilt.
|
||||
"""
|
||||
if args.spec or args.spec_file:
|
||||
if args.spec or args.spec_yaml:
|
||||
specs = [get_concrete_spec(args)]
|
||||
else:
|
||||
env = spack.cmd.require_active_env(cmd_name='buildcache')
|
||||
env = ev.get_env(args, 'buildcache', required=True)
|
||||
env.concretize()
|
||||
specs = env.all_specs()
|
||||
|
||||
@@ -658,16 +589,15 @@ def download_buildcache_files(concrete_spec, local_dest, require_cdashid,
|
||||
|
||||
files_to_fetch = [
|
||||
{
|
||||
'url': [tarball_path_name],
|
||||
'url': tarball_path_name,
|
||||
'path': local_tarball_path,
|
||||
'required': True,
|
||||
}, {
|
||||
'url': [bindist.tarball_name(concrete_spec, '.spec.json'),
|
||||
bindist.tarball_name(concrete_spec, '.spec.yaml')],
|
||||
'url': bindist.tarball_name(concrete_spec, '.spec.yaml'),
|
||||
'path': local_dest,
|
||||
'required': True,
|
||||
}, {
|
||||
'url': [bindist.tarball_name(concrete_spec, '.cdashid')],
|
||||
'url': bindist.tarball_name(concrete_spec, '.cdashid'),
|
||||
'path': local_dest,
|
||||
'required': require_cdashid,
|
||||
},
|
||||
@@ -681,9 +611,9 @@ def get_tarball(args):
|
||||
command uses the process exit code to indicate its result, specifically,
|
||||
a non-zero exit code indicates that the command failed to download at
|
||||
least one of the required buildcache components. Normally, just the
|
||||
tarball and .spec.json files are required, but if the --require-cdashid
|
||||
tarball and .spec.yaml files are required, but if the --require-cdashid
|
||||
argument was provided, then a .cdashid file is also required."""
|
||||
if not args.spec and not args.spec_file:
|
||||
if not args.spec and not args.spec_yaml:
|
||||
tty.msg('No specs provided, exiting.')
|
||||
sys.exit(0)
|
||||
|
||||
@@ -700,7 +630,7 @@ def get_tarball(args):
|
||||
|
||||
def get_concrete_spec(args):
|
||||
spec_str = args.spec
|
||||
spec_yaml_path = args.spec_file
|
||||
spec_yaml_path = args.spec_yaml
|
||||
|
||||
if not spec_str and not spec_yaml_path:
|
||||
tty.msg('Must provide either spec string or path to ' +
|
||||
@@ -732,14 +662,14 @@ def get_buildcache_name(args):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def save_specfiles(args):
|
||||
def save_spec_yamls(args):
|
||||
"""Get full spec for dependencies, relative to root spec, and write them
|
||||
to files in the specified output directory. Uses exit code to signal
|
||||
success or failure. An exit code of zero means the command was likely
|
||||
successful. If any errors or exceptions are encountered, or if expected
|
||||
command-line arguments are not provided, then the exit code will be
|
||||
non-zero."""
|
||||
if not args.root_spec and not args.root_specfile:
|
||||
if not args.root_spec and not args.root_spec_yaml:
|
||||
tty.msg('No root spec provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
@@ -747,20 +677,20 @@ def save_specfiles(args):
|
||||
tty.msg('No dependent specs provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
if not args.specfile_dir:
|
||||
if not args.yaml_dir:
|
||||
tty.msg('No yaml directory provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
if args.root_specfile:
|
||||
with open(args.root_specfile) as fd:
|
||||
root_spec_as_json = fd.read()
|
||||
if args.root_spec_yaml:
|
||||
with open(args.root_spec_yaml) as fd:
|
||||
root_spec_as_yaml = fd.read()
|
||||
else:
|
||||
root_spec = Spec(args.root_spec)
|
||||
root_spec.concretize()
|
||||
root_spec_as_json = root_spec.to_json(hash=ht.build_hash)
|
||||
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
|
||||
save_dependency_specfiles(
|
||||
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format)
|
||||
root_spec_as_yaml = root_spec.to_yaml(hash=ht.build_hash)
|
||||
|
||||
save_dependency_spec_yamls(
|
||||
root_spec_as_yaml, args.yaml_dir, args.specs.split())
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
@@ -769,10 +699,10 @@ def buildcache_copy(args):
|
||||
"""Copy a buildcache entry and all its files from one mirror, given as
|
||||
'--base-dir', to some other mirror, specified as '--destination-url'.
|
||||
The specific buildcache entry to be copied from one location to the
|
||||
other is identified using the '--spec-file' argument."""
|
||||
other is identified using the '--spec-yaml' argument."""
|
||||
# TODO: This sub-command should go away once #11117 is merged
|
||||
|
||||
if not args.spec_file:
|
||||
if not args.spec_yaml:
|
||||
tty.msg('No spec yaml provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
@@ -792,12 +722,12 @@ def buildcache_copy(args):
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with open(args.spec_file, 'r') as fd:
|
||||
with open(args.spec_yaml, 'r') as fd:
|
||||
spec = Spec.from_yaml(fd.read())
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
tty.error('Unable to concrectize spec from yaml {0}'.format(
|
||||
args.spec_file))
|
||||
args.spec_yaml))
|
||||
sys.exit(1)
|
||||
|
||||
dest_root_path = dest_url
|
||||
@@ -812,15 +742,10 @@ def buildcache_copy(args):
|
||||
tarball_dest_path = os.path.join(dest_root_path, tarball_rel_path)
|
||||
|
||||
specfile_rel_path = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.json'))
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
|
||||
specfile_src_path = os.path.join(args.base_dir, specfile_rel_path)
|
||||
specfile_dest_path = os.path.join(dest_root_path, specfile_rel_path)
|
||||
|
||||
specfile_rel_path_yaml = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
|
||||
specfile_src_path_yaml = os.path.join(args.base_dir, specfile_rel_path)
|
||||
specfile_dest_path_yaml = os.path.join(dest_root_path, specfile_rel_path)
|
||||
|
||||
cdashidfile_rel_path = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.cdashid'))
|
||||
cdashid_src_path = os.path.join(args.base_dir, cdashidfile_rel_path)
|
||||
@@ -836,134 +761,12 @@ def buildcache_copy(args):
|
||||
tty.msg('Copying {0}'.format(specfile_rel_path))
|
||||
shutil.copyfile(specfile_src_path, specfile_dest_path)
|
||||
|
||||
tty.msg('Copying {0}'.format(specfile_rel_path_yaml))
|
||||
shutil.copyfile(specfile_src_path_yaml, specfile_dest_path_yaml)
|
||||
|
||||
# Copy the cdashid file (if exists) to the destination mirror
|
||||
if os.path.exists(cdashid_src_path):
|
||||
tty.msg('Copying {0}'.format(cdashidfile_rel_path))
|
||||
shutil.copyfile(cdashid_src_path, cdashid_dest_path)
|
||||
|
||||
|
||||
def buildcache_sync(args):
|
||||
""" Syncs binaries (and associated metadata) from one mirror to another.
|
||||
Requires an active environment in order to know which specs to sync.
|
||||
|
||||
Args:
|
||||
src (str): Source mirror URL
|
||||
dest (str): Destination mirror URL
|
||||
"""
|
||||
# Figure out the source mirror
|
||||
source_location = None
|
||||
if args.src_directory:
|
||||
source_location = args.src_directory
|
||||
scheme = url_util.parse(source_location, scheme='<missing>').scheme
|
||||
if scheme != '<missing>':
|
||||
raise ValueError(
|
||||
'"--src-directory" expected a local path; got a URL, instead')
|
||||
# Ensure that the mirror lookup does not mistake this for named mirror
|
||||
source_location = 'file://' + source_location
|
||||
elif args.src_mirror_name:
|
||||
source_location = args.src_mirror_name
|
||||
result = spack.mirror.MirrorCollection().lookup(source_location)
|
||||
if result.name == "<unnamed>":
|
||||
raise ValueError(
|
||||
'no configured mirror named "{name}"'.format(
|
||||
name=source_location))
|
||||
elif args.src_mirror_url:
|
||||
source_location = args.src_mirror_url
|
||||
scheme = url_util.parse(source_location, scheme='<missing>').scheme
|
||||
if scheme == '<missing>':
|
||||
raise ValueError(
|
||||
'"{url}" is not a valid URL'.format(url=source_location))
|
||||
|
||||
src_mirror = spack.mirror.MirrorCollection().lookup(source_location)
|
||||
src_mirror_url = url_util.format(src_mirror.fetch_url)
|
||||
|
||||
# Figure out the destination mirror
|
||||
dest_location = None
|
||||
if args.dest_directory:
|
||||
dest_location = args.dest_directory
|
||||
scheme = url_util.parse(dest_location, scheme='<missing>').scheme
|
||||
if scheme != '<missing>':
|
||||
raise ValueError(
|
||||
'"--dest-directory" expected a local path; got a URL, instead')
|
||||
# Ensure that the mirror lookup does not mistake this for named mirror
|
||||
dest_location = 'file://' + dest_location
|
||||
elif args.dest_mirror_name:
|
||||
dest_location = args.dest_mirror_name
|
||||
result = spack.mirror.MirrorCollection().lookup(dest_location)
|
||||
if result.name == "<unnamed>":
|
||||
raise ValueError(
|
||||
'no configured mirror named "{name}"'.format(
|
||||
name=dest_location))
|
||||
elif args.dest_mirror_url:
|
||||
dest_location = args.dest_mirror_url
|
||||
scheme = url_util.parse(dest_location, scheme='<missing>').scheme
|
||||
if scheme == '<missing>':
|
||||
raise ValueError(
|
||||
'"{url}" is not a valid URL'.format(url=dest_location))
|
||||
|
||||
dest_mirror = spack.mirror.MirrorCollection().lookup(dest_location)
|
||||
dest_mirror_url = url_util.format(dest_mirror.fetch_url)
|
||||
|
||||
# Get the active environment
|
||||
env = spack.cmd.require_active_env(cmd_name='buildcache sync')
|
||||
|
||||
tty.msg('Syncing environment buildcache files from {0} to {1}'.format(
|
||||
src_mirror_url, dest_mirror_url))
|
||||
|
||||
build_cache_dir = bindist.build_cache_relative_path()
|
||||
buildcache_rel_paths = []
|
||||
|
||||
tty.debug('Syncing the following specs:')
|
||||
for s in env.all_specs():
|
||||
tty.debug(' {0}{1}: {2}'.format(
|
||||
'* ' if s in env.roots() else ' ', s.name, s.dag_hash()))
|
||||
|
||||
buildcache_rel_paths.extend([
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_path_name(s, '.spack')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.spec.yaml')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.spec.json')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.cdashid'))
|
||||
])
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
for rel_path in buildcache_rel_paths:
|
||||
src_url = url_util.join(src_mirror_url, rel_path)
|
||||
local_path = os.path.join(tmpdir, rel_path)
|
||||
dest_url = url_util.join(dest_mirror_url, rel_path)
|
||||
|
||||
tty.debug('Copying {0} to {1} via {2}'.format(
|
||||
src_url, dest_url, local_path))
|
||||
|
||||
stage = Stage(src_url,
|
||||
name="temporary_file",
|
||||
path=os.path.dirname(local_path),
|
||||
keep=True)
|
||||
|
||||
try:
|
||||
stage.create()
|
||||
stage.fetch()
|
||||
web_util.push_to_url(
|
||||
local_path,
|
||||
dest_url,
|
||||
keep_original=True)
|
||||
except fs.FetchError as e:
|
||||
tty.debug('spack buildcache unable to sync {0}'.format(rel_path))
|
||||
tty.debug(e)
|
||||
finally:
|
||||
stage.destroy()
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def update_index(mirror_url, update_keys=False):
|
||||
mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
|
||||
outdir = url_util.format(mirror.push_url)
|
||||
|
@@ -14,7 +14,6 @@
|
||||
import spack.repo
|
||||
import spack.stage
|
||||
import spack.util.crypto
|
||||
from spack.package import preferred_version
|
||||
from spack.util.naming import valid_fully_qualified_module_name
|
||||
from spack.version import Version, ver
|
||||
|
||||
@@ -27,16 +26,9 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'--keep-stage', action='store_true',
|
||||
help="don't clean up staging area when command completes")
|
||||
sp = subparser.add_mutually_exclusive_group()
|
||||
sp.add_argument(
|
||||
subparser.add_argument(
|
||||
'-b', '--batch', action='store_true',
|
||||
help="don't ask which versions to checksum")
|
||||
sp.add_argument(
|
||||
'-l', '--latest', action='store_true',
|
||||
help="checksum the latest available version only")
|
||||
sp.add_argument(
|
||||
'-p', '--preferred', action='store_true',
|
||||
help="checksum the preferred version only")
|
||||
arguments.add_common_arguments(subparser, ['package'])
|
||||
subparser.add_argument(
|
||||
'versions', nargs=argparse.REMAINDER,
|
||||
@@ -56,38 +48,25 @@ def checksum(parser, args):
|
||||
# Get the package we're going to generate checksums for
|
||||
pkg = spack.repo.get(args.package)
|
||||
|
||||
url_dict = {}
|
||||
if args.versions:
|
||||
# If the user asked for specific versions, use those
|
||||
url_dict = {}
|
||||
for version in args.versions:
|
||||
version = ver(version)
|
||||
if not isinstance(version, Version):
|
||||
tty.die("Cannot generate checksums for version lists or "
|
||||
"version ranges. Use unambiguous versions.")
|
||||
url_dict[version] = pkg.url_for_version(version)
|
||||
elif args.preferred:
|
||||
version = preferred_version(pkg)
|
||||
url_dict = dict([(version, pkg.url_for_version(version))])
|
||||
else:
|
||||
# Otherwise, see what versions we can find online
|
||||
url_dict = pkg.fetch_remote_versions()
|
||||
if not url_dict:
|
||||
tty.die("Could not find any versions for {0}".format(pkg.name))
|
||||
|
||||
# And ensure the specified version URLs take precedence, if available
|
||||
try:
|
||||
explicit_dict = {}
|
||||
for v in pkg.versions:
|
||||
if not v.isdevelop():
|
||||
explicit_dict[v] = pkg.url_for_version(v)
|
||||
url_dict.update(explicit_dict)
|
||||
except spack.package.NoURLError:
|
||||
pass
|
||||
|
||||
version_lines = spack.stage.get_checksums_for_versions(
|
||||
url_dict, pkg.name, keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
|
||||
latest=args.latest, fetch_options=pkg.fetch_options)
|
||||
fetch_options=pkg.fetch_options)
|
||||
|
||||
print()
|
||||
print(version_lines)
|
||||
|
@@ -78,8 +78,8 @@ def setup_parser(subparser):
|
||||
default=False, help="""Spack always check specs against configured
|
||||
binary mirrors when generating the pipeline, regardless of whether or not
|
||||
DAG pruning is enabled. This flag controls whether it might attempt to
|
||||
fetch remote spec files directly (ensuring no spec is rebuilt if it
|
||||
is present on the mirror), or whether it should reduce pipeline generation time
|
||||
fetch remote spec.yaml files directly (ensuring no spec is rebuilt if it is
|
||||
present on the mirror), or whether it should reduce pipeline generation time
|
||||
by assuming all remote buildcache indices are up to date and only use those
|
||||
to determine whether a given spec is up to date on mirrors. In the latter
|
||||
case, specs might be needlessly rebuilt if remote buildcache indices are out
|
||||
@@ -118,7 +118,7 @@ def ci_generate(args):
|
||||
for creating a build group for the generated workload and registering
|
||||
all generated jobs under that build group. If this environment
|
||||
variable is not set, no build group will be created on CDash."""
|
||||
env = spack.cmd.require_active_env(cmd_name='ci generate')
|
||||
env = ev.get_env(args, 'ci generate', required=True)
|
||||
|
||||
output_file = args.output_file
|
||||
copy_yaml_to = args.copy_to
|
||||
@@ -152,7 +152,7 @@ def ci_generate(args):
|
||||
def ci_reindex(args):
|
||||
"""Rebuild the buildcache index associated with the mirror in the
|
||||
active, gitlab-enabled environment. """
|
||||
env = spack.cmd.require_active_env(cmd_name='ci rebuild-index')
|
||||
env = ev.get_env(args, 'ci rebuild-index', required=True)
|
||||
yaml_root = ev.config_dict(env.yaml)
|
||||
|
||||
if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
|
||||
@@ -169,7 +169,7 @@ def ci_rebuild(args):
|
||||
"""Check a single spec against the remote mirror, and rebuild it from
|
||||
source if the mirror does not contain the full hash match of the spec
|
||||
as computed locally. """
|
||||
env = spack.cmd.require_active_env(cmd_name='ci rebuild')
|
||||
env = ev.get_env(args, 'ci rebuild', required=True)
|
||||
|
||||
# Make sure the environment is "gitlab-enabled", or else there's nothing
|
||||
# to do.
|
||||
|
@@ -7,7 +7,6 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import llnl.util.filesystem
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.bootstrap
|
||||
@@ -15,9 +14,9 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.cmd.test
|
||||
import spack.config
|
||||
import spack.main
|
||||
import spack.repo
|
||||
import spack.stage
|
||||
import spack.util.path
|
||||
from spack.paths import lib_path, var_path
|
||||
|
||||
description = "remove temporary build files and/or downloaded archives"
|
||||
@@ -28,7 +27,7 @@
|
||||
class AllClean(argparse.Action):
|
||||
"""Activates flags -s -d -f -m and -p simultaneously"""
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
parser.parse_args(['-sdfmp'], namespace=namespace)
|
||||
parser.parse_args(['-sdfmpb'], namespace=namespace)
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
@@ -49,11 +48,9 @@ def setup_parser(subparser):
|
||||
help="remove .pyc, .pyo files and __pycache__ folders")
|
||||
subparser.add_argument(
|
||||
'-b', '--bootstrap', action='store_true',
|
||||
help="remove software and configuration needed to bootstrap Spack")
|
||||
help="remove software needed to bootstrap Spack")
|
||||
subparser.add_argument(
|
||||
'-a', '--all', action=AllClean,
|
||||
help="equivalent to -sdfmp (does not include --bootstrap)",
|
||||
nargs=0
|
||||
'-a', '--all', action=AllClean, help="equivalent to -sdfmpb", nargs=0
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ['specs'])
|
||||
|
||||
@@ -76,11 +73,7 @@ def clean(parser, args):
|
||||
if args.stage:
|
||||
tty.msg('Removing all temporary build stages')
|
||||
spack.stage.purge()
|
||||
# Temp directory where buildcaches are extracted
|
||||
extract_tmp = os.path.join(spack.store.layout.root, '.tmp')
|
||||
if os.path.exists(extract_tmp):
|
||||
tty.debug('Removing {0}'.format(extract_tmp))
|
||||
shutil.rmtree(extract_tmp)
|
||||
|
||||
if args.downloads:
|
||||
tty.msg('Removing cached downloads')
|
||||
spack.caches.fetch_cache.destroy()
|
||||
@@ -109,9 +102,8 @@ def clean(parser, args):
|
||||
shutil.rmtree(dname)
|
||||
|
||||
if args.bootstrap:
|
||||
bootstrap_prefix = spack.util.path.canonicalize_path(
|
||||
spack.config.get('bootstrap:root')
|
||||
)
|
||||
msg = 'Removing bootstrapped software and configuration in "{0}"'
|
||||
tty.msg(msg.format(bootstrap_prefix))
|
||||
llnl.util.filesystem.remove_directory_contents(bootstrap_prefix)
|
||||
msg = 'Removing software in "{0}"'
|
||||
tty.msg(msg.format(spack.bootstrap.store_path()))
|
||||
with spack.store.use_store(spack.bootstrap.store_path()):
|
||||
uninstall = spack.main.SpackCommand('uninstall')
|
||||
uninstall('-a', '-y')
|
||||
|
@@ -69,7 +69,7 @@ def _specs(self, **kwargs):
|
||||
|
||||
# If an environment is provided, we'll restrict the search to
|
||||
# only its installed packages.
|
||||
env = ev.active_environment()
|
||||
env = ev._active_environment
|
||||
if env:
|
||||
kwargs['hashes'] = set(env.all_hashes())
|
||||
|
||||
@@ -320,11 +320,3 @@ def add_cdash_args(subparser, add_help):
|
||||
default=None,
|
||||
help=cdash_help['buildstamp']
|
||||
)
|
||||
|
||||
|
||||
@arg
|
||||
def reuse():
|
||||
return Args(
|
||||
'--reuse', action='store_true', default=False,
|
||||
help='reuse installed dependencies'
|
||||
)
|
||||
|
@@ -18,6 +18,7 @@
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.spec
|
||||
from spack.spec import ArchSpec, CompilerSpec
|
||||
|
||||
description = "manage compilers"
|
||||
section = "system"
|
||||
@@ -77,13 +78,24 @@ def compiler_find(args):
|
||||
# None signals spack.compiler.find_compilers to use its default logic
|
||||
paths = args.add_paths or None
|
||||
|
||||
# Below scope=None because we want new compilers that don't appear
|
||||
# in any other configuration.
|
||||
new_compilers = spack.compilers.find_new_compilers(paths, scope=None)
|
||||
# Don't initialize compilers config via compilers.get_compiler_config.
|
||||
# Just let compiler_find do the
|
||||
# entire process and return an empty config from all_compilers
|
||||
# Default for any other process is init_config=True
|
||||
compilers = [c for c in spack.compilers.find_compilers(paths)]
|
||||
new_compilers = []
|
||||
for c in compilers:
|
||||
arch_spec = ArchSpec((None, c.operating_system, c.target))
|
||||
same_specs = spack.compilers.compilers_for_spec(
|
||||
c.spec, arch_spec, init_config=False)
|
||||
|
||||
if not same_specs:
|
||||
new_compilers.append(c)
|
||||
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(
|
||||
new_compilers, scope=args.scope, init_config=False
|
||||
)
|
||||
spack.compilers.add_compilers_to_config(new_compilers,
|
||||
scope=args.scope,
|
||||
init_config=False)
|
||||
n = len(new_compilers)
|
||||
s = 's' if n > 1 else ''
|
||||
|
||||
@@ -98,7 +110,7 @@ def compiler_find(args):
|
||||
|
||||
|
||||
def compiler_remove(args):
|
||||
cspec = spack.spec.CompilerSpec(args.compiler_spec)
|
||||
cspec = CompilerSpec(args.compiler_spec)
|
||||
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
|
||||
if not compilers:
|
||||
tty.die("No compilers match spec %s" % cspec)
|
||||
@@ -116,7 +128,7 @@ def compiler_remove(args):
|
||||
|
||||
def compiler_info(args):
|
||||
"""Print info about all compilers matching a spec."""
|
||||
cspec = spack.spec.CompilerSpec(args.compiler_spec)
|
||||
cspec = CompilerSpec(args.compiler_spec)
|
||||
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
|
||||
|
||||
if not compilers:
|
||||
|
@@ -3,8 +3,6 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'concretize an environment and write a lockfile'
|
||||
@@ -13,7 +11,6 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ['reuse'])
|
||||
subparser.add_argument(
|
||||
'-f', '--force', action='store_true',
|
||||
help="Re-concretize even if already concretized.")
|
||||
@@ -26,7 +23,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def concretize(parser, args):
|
||||
env = spack.cmd.require_active_env(cmd_name='concretize')
|
||||
env = ev.get_env(args, 'concretize', required=True)
|
||||
|
||||
if args.test == 'all':
|
||||
tests = True
|
||||
@@ -36,8 +33,6 @@ def concretize(parser, args):
|
||||
tests = False
|
||||
|
||||
with env.write_transaction():
|
||||
concretized_specs = env.concretize(
|
||||
force=args.force, tests=tests, reuse=args.reuse
|
||||
)
|
||||
concretized_specs = env.concretize(force=args.force, tests=tests)
|
||||
ev.display_specs(concretized_specs)
|
||||
env.write()
|
||||
|
@@ -118,7 +118,7 @@ def _get_scope_and_section(args):
|
||||
|
||||
# w/no args and an active environment, point to env manifest
|
||||
if not section:
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'config edit')
|
||||
if env:
|
||||
scope = env.env_file_config_scope_name()
|
||||
|
||||
@@ -143,10 +143,7 @@ def config_get(args):
|
||||
"""
|
||||
scope, section = _get_scope_and_section(args)
|
||||
|
||||
if section is not None:
|
||||
spack.config.config.print_section(section)
|
||||
|
||||
elif scope and scope.startswith('env:'):
|
||||
if scope and scope.startswith('env:'):
|
||||
config_file = spack.config.config.get_config_filename(scope, section)
|
||||
if os.path.exists(config_file):
|
||||
with open(config_file) as f:
|
||||
@@ -154,6 +151,9 @@ def config_get(args):
|
||||
else:
|
||||
tty.die('environment has no %s file' % ev.manifest_name)
|
||||
|
||||
elif section is not None:
|
||||
spack.config.config.print_section(section)
|
||||
|
||||
else:
|
||||
tty.die('`spack config get` requires a section argument '
|
||||
'or an active environment.')
|
||||
@@ -170,19 +170,12 @@ def config_edit(args):
|
||||
With no arguments and an active environment, edit the spack.yaml for
|
||||
the active environment.
|
||||
"""
|
||||
spack_env = os.environ.get(ev.spack_env_var)
|
||||
if spack_env and not args.scope:
|
||||
# Don't use the scope object for envs, as `config edit` can be called
|
||||
# for a malformed environment. Use SPACK_ENV to find spack.yaml.
|
||||
config_file = ev.manifest_file(spack_env)
|
||||
else:
|
||||
# If we aren't editing a spack.yaml file, get config path from scope.
|
||||
scope, section = _get_scope_and_section(args)
|
||||
if not scope and not section:
|
||||
tty.die('`spack config edit` requires a section argument '
|
||||
'or an active environment.')
|
||||
config_file = spack.config.config.get_config_filename(scope, section)
|
||||
scope, section = _get_scope_and_section(args)
|
||||
if not scope and not section:
|
||||
tty.die('`spack config edit` requires a section argument '
|
||||
'or an active environment.')
|
||||
|
||||
config_file = spack.config.config.get_config_filename(scope, section)
|
||||
if args.print_file:
|
||||
print(config_file)
|
||||
else:
|
||||
@@ -433,8 +426,7 @@ def config_prefer_upstream(args):
|
||||
or var_name not in spec.package.variants):
|
||||
continue
|
||||
|
||||
variant_desc, _ = spec.package.variants[var_name]
|
||||
if variant.value != variant_desc.default:
|
||||
if variant.value != spec.package.variants[var_name].default:
|
||||
variants.append(str(variant))
|
||||
variants.sort()
|
||||
variants = ' '.join(variants)
|
||||
|
@@ -5,10 +5,7 @@
|
||||
import os
|
||||
import os.path
|
||||
|
||||
import llnl.util.tty
|
||||
|
||||
import spack.container
|
||||
import spack.container.images
|
||||
import spack.monitor
|
||||
|
||||
description = ("creates recipes to build images for different"
|
||||
@@ -19,26 +16,9 @@
|
||||
|
||||
def setup_parser(subparser):
|
||||
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
|
||||
subparser.add_argument(
|
||||
'--list-os', action='store_true', default=False,
|
||||
help='list all the OS that can be used in the bootstrap phase and exit'
|
||||
)
|
||||
subparser.add_argument(
|
||||
'--last-stage',
|
||||
choices=('bootstrap', 'build', 'final'),
|
||||
default='final',
|
||||
help='last stage in the container recipe'
|
||||
)
|
||||
|
||||
|
||||
def containerize(parser, args):
|
||||
if args.list_os:
|
||||
possible_os = spack.container.images.all_bootstrap_os()
|
||||
msg = 'The following operating systems can be used to bootstrap Spack:'
|
||||
msg += '\n{0}'.format(' '.join(possible_os))
|
||||
llnl.util.tty.msg(msg)
|
||||
return
|
||||
|
||||
config_dir = args.env_dir or os.getcwd()
|
||||
config_file = os.path.abspath(os.path.join(config_dir, 'spack.yaml'))
|
||||
if not os.path.exists(config_file):
|
||||
@@ -49,12 +29,10 @@ def containerize(parser, args):
|
||||
|
||||
# If we have a monitor request, add monitor metadata to config
|
||||
if args.use_monitor:
|
||||
config['spack']['monitor'] = {
|
||||
"disable_auth": args.monitor_disable_auth,
|
||||
"host": args.monitor_host,
|
||||
"keep_going": args.monitor_keep_going,
|
||||
"prefix": args.monitor_prefix,
|
||||
"tags": args.monitor_tags
|
||||
}
|
||||
recipe = spack.container.recipe(config, last_phase=args.last_stage)
|
||||
config['spack']['monitor'] = {"disable_auth": args.monitor_disable_auth,
|
||||
"host": args.monitor_host,
|
||||
"keep_going": args.monitor_keep_going,
|
||||
"prefix": args.monitor_prefix,
|
||||
"tags": args.monitor_tags}
|
||||
recipe = spack.container.recipe(config)
|
||||
print(recipe)
|
||||
|
@@ -36,7 +36,7 @@ def deactivate(parser, args):
|
||||
if len(specs) != 1:
|
||||
tty.die("deactivate requires one spec. %d given." % len(specs))
|
||||
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'deactivate')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
pkg = spec.package
|
||||
|
||||
|
@@ -14,9 +14,9 @@
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import working_dir
|
||||
|
||||
import spack.architecture as architecture
|
||||
import spack.config
|
||||
import spack.paths
|
||||
import spack.platforms
|
||||
from spack.main import get_version
|
||||
from spack.util.executable import which
|
||||
|
||||
@@ -74,7 +74,6 @@ def create_db_tarball(args):
|
||||
wd = os.path.dirname(str(spack.store.root))
|
||||
with working_dir(wd):
|
||||
files = [spack.store.db._index_path]
|
||||
files += glob('%s/*/*/*/.spack/spec.json' % base)
|
||||
files += glob('%s/*/*/*/.spack/spec.yaml' % base)
|
||||
files = [os.path.relpath(f) for f in files]
|
||||
|
||||
@@ -87,15 +86,10 @@ def create_db_tarball(args):
|
||||
|
||||
|
||||
def report(args):
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system('frontend')
|
||||
host_target = host_platform.target('frontend')
|
||||
architecture = spack.spec.ArchSpec(
|
||||
(str(host_platform), str(host_os), str(host_target))
|
||||
)
|
||||
print('* **Spack:**', get_version())
|
||||
print('* **Python:**', platform.python_version())
|
||||
print('* **Platform:**', architecture)
|
||||
print('* **Platform:**', architecture.Arch(
|
||||
architecture.platform(), 'frontend', 'frontend'))
|
||||
print('* **Concretizer:**', spack.config.get('config:concretizer'))
|
||||
|
||||
|
||||
|
@@ -41,7 +41,7 @@ def dependencies(parser, args):
|
||||
tty.die("spack dependencies takes only one spec.")
|
||||
|
||||
if args.installed:
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'dependencies')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
|
||||
format_string = '{name}{@version}{%compiler}{/hash:7}'
|
||||
|
@@ -82,7 +82,7 @@ def dependents(parser, args):
|
||||
tty.die("spack dependents takes only one spec.")
|
||||
|
||||
if args.installed:
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'dependents')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
|
||||
format_string = '{name}{@version}{%compiler}{/hash:7}'
|
||||
|
@@ -71,7 +71,7 @@ def setup_parser(sp):
|
||||
|
||||
def deprecate(parser, args):
|
||||
"""Deprecate one spec in favor of another"""
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'deprecate')
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
|
||||
if len(specs) != 2:
|
||||
|
@@ -9,6 +9,7 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
from spack.error import SpackError
|
||||
|
||||
description = "add a spec to an environment's dev-build information"
|
||||
@@ -36,7 +37,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def develop(parser, args):
|
||||
env = spack.cmd.require_active_env(cmd_name='develop')
|
||||
env = ev.get_env(args, 'develop', required=True)
|
||||
|
||||
if not args.spec:
|
||||
if args.clone is False:
|
||||
|
@@ -68,8 +68,8 @@ def compare_specs(a, b, to_string=False, color=None):
|
||||
# Prepare a solver setup to parse differences
|
||||
setup = asp.SpackSolverSetup()
|
||||
|
||||
a_facts = set(t for t in setup.spec_clauses(a, body=True, expand_hashes=True))
|
||||
b_facts = set(t for t in setup.spec_clauses(b, body=True, expand_hashes=True))
|
||||
a_facts = set(t for t in setup.spec_clauses(a, body=True))
|
||||
b_facts = set(t for t in setup.spec_clauses(b, body=True))
|
||||
|
||||
# We want to present them to the user as simple key: values
|
||||
intersect = sorted(a_facts.intersection(b_facts))
|
||||
@@ -175,7 +175,7 @@ def group_by_type(diffset):
|
||||
|
||||
|
||||
def diff(parser, args):
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'diff')
|
||||
|
||||
if len(args.specs) != 2:
|
||||
tty.die("You must provide two specs to diff.")
|
||||
|
@@ -6,7 +6,7 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from collections import namedtuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -20,10 +20,8 @@
|
||||
import spack.cmd.uninstall
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.environment.shell
|
||||
import spack.schema.env
|
||||
import spack.util.string as string
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
description = "manage virtual environments"
|
||||
section = "environments"
|
||||
@@ -71,90 +69,49 @@ def env_activate_setup_parser(subparser):
|
||||
const=False, default=True,
|
||||
help="do not update PATH etc. with associated view")
|
||||
|
||||
subparser.add_argument(
|
||||
'-d', '--dir', action='store_true', default=False,
|
||||
help="force spack to treat env as a directory, not a name")
|
||||
subparser.add_argument(
|
||||
'-p', '--prompt', action='store_true', default=False,
|
||||
help="decorate the command line prompt when activating")
|
||||
|
||||
env_options = subparser.add_mutually_exclusive_group()
|
||||
env_options.add_argument(
|
||||
'--temp', action='store_true', default=False,
|
||||
help='create and activate an environment in a temporary directory')
|
||||
env_options.add_argument(
|
||||
'-d', '--dir', default=None,
|
||||
help="activate the environment in this directory")
|
||||
env_options.add_argument(
|
||||
metavar='env', dest='activate_env', nargs='?', default=None,
|
||||
subparser.add_argument(
|
||||
metavar='env', dest='activate_env',
|
||||
help='name of environment to activate')
|
||||
|
||||
|
||||
def create_temp_env_directory():
|
||||
"""
|
||||
Returns the path of a temporary directory in which to
|
||||
create an environment
|
||||
"""
|
||||
return tempfile.mkdtemp(prefix="spack-")
|
||||
|
||||
|
||||
def env_activate(args):
|
||||
if not args.activate_env and not args.dir and not args.temp:
|
||||
tty.die('spack env activate requires an environment name, directory, or --temp')
|
||||
|
||||
env = args.activate_env
|
||||
if not args.shell:
|
||||
spack.cmd.common.shell_init_instructions(
|
||||
"spack env activate",
|
||||
" eval `spack env activate {sh_arg} [...]`",
|
||||
" eval `spack env activate {sh_arg} %s`" % env,
|
||||
)
|
||||
return 1
|
||||
|
||||
# Error out when -e, -E, -D flags are given, cause they are ambiguous.
|
||||
if args.env or args.no_env or args.env_dir:
|
||||
tty.die('Calling spack env activate with --env, --env-dir and --no-env '
|
||||
'is ambiguous')
|
||||
if ev.exists(env) and not args.dir:
|
||||
spack_env = ev.root(env)
|
||||
short_name = env
|
||||
env_prompt = '[%s]' % env
|
||||
|
||||
env_name_or_dir = args.activate_env or args.dir
|
||||
|
||||
# Temporary environment
|
||||
if args.temp:
|
||||
env = create_temp_env_directory()
|
||||
env_path = os.path.abspath(env)
|
||||
short_name = os.path.basename(env_path)
|
||||
ev.Environment(env).write(regenerate=False)
|
||||
|
||||
# Named environment
|
||||
elif ev.exists(env_name_or_dir) and not args.dir:
|
||||
env_path = ev.root(env_name_or_dir)
|
||||
short_name = env_name_or_dir
|
||||
|
||||
# Environment directory
|
||||
elif ev.is_env_dir(env_name_or_dir):
|
||||
env_path = os.path.abspath(env_name_or_dir)
|
||||
short_name = os.path.basename(env_path)
|
||||
elif ev.is_env_dir(env):
|
||||
spack_env = os.path.abspath(env)
|
||||
short_name = os.path.basename(os.path.abspath(env))
|
||||
env_prompt = '[%s]' % short_name
|
||||
|
||||
else:
|
||||
tty.die("No such environment: '%s'" % env_name_or_dir)
|
||||
tty.die("No such environment: '%s'" % env)
|
||||
|
||||
env_prompt = '[%s]' % short_name
|
||||
if spack_env == os.environ.get('SPACK_ENV'):
|
||||
tty.debug("Environment %s is already active" % args.activate_env)
|
||||
return
|
||||
|
||||
# We only support one active environment at a time, so deactivate the current one.
|
||||
if ev.active_environment() is None:
|
||||
cmds = ''
|
||||
env_mods = EnvironmentModifications()
|
||||
else:
|
||||
cmds = spack.environment.shell.deactivate_header(shell=args.shell)
|
||||
env_mods = spack.environment.shell.deactivate()
|
||||
|
||||
# Activate new environment
|
||||
active_env = ev.Environment(env_path)
|
||||
cmds += spack.environment.shell.activate_header(
|
||||
env=active_env,
|
||||
shell=args.shell,
|
||||
active_env = ev.get_env(namedtuple('args', ['env'])(env),
|
||||
'activate')
|
||||
cmds = ev.activate(
|
||||
active_env, add_view=args.with_view, shell=args.shell,
|
||||
prompt=env_prompt if args.prompt else None
|
||||
)
|
||||
env_mods.extend(spack.environment.shell.activate(
|
||||
env=active_env,
|
||||
add_view=args.with_view
|
||||
))
|
||||
cmds += env_mods.shell_modifications(args.shell)
|
||||
sys.stdout.write(cmds)
|
||||
|
||||
|
||||
@@ -183,17 +140,10 @@ def env_deactivate(args):
|
||||
)
|
||||
return 1
|
||||
|
||||
# Error out when -e, -E, -D flags are given, cause they are ambiguous.
|
||||
if args.env or args.no_env or args.env_dir:
|
||||
tty.die('Calling spack env deactivate with --env, --env-dir and --no-env '
|
||||
'is ambiguous')
|
||||
|
||||
if ev.active_environment() is None:
|
||||
if 'SPACK_ENV' not in os.environ:
|
||||
tty.die('No environment is currently active.')
|
||||
|
||||
cmds = spack.environment.shell.deactivate_header(args.shell)
|
||||
env_mods = spack.environment.shell.deactivate()
|
||||
cmds += env_mods.shell_modifications(args.shell)
|
||||
cmds = ev.deactivate(shell=args.shell)
|
||||
sys.stdout.write(cmds)
|
||||
|
||||
|
||||
@@ -365,7 +315,7 @@ def env_view_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_view(args):
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'env view')
|
||||
|
||||
if env:
|
||||
if args.action == ViewAction.regenerate:
|
||||
@@ -392,7 +342,7 @@ def env_status_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_status(args):
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'env status')
|
||||
if env:
|
||||
if env.path == os.getcwd():
|
||||
tty.msg('Using %s in current directory: %s'
|
||||
@@ -423,7 +373,7 @@ def env_loads_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_loads(args):
|
||||
env = spack.cmd.require_active_env(cmd_name='env loads')
|
||||
env = ev.get_env(args, 'env loads', required=True)
|
||||
|
||||
# Set the module types that have been selected
|
||||
module_type = args.module_type
|
||||
|
@@ -67,7 +67,7 @@ def extensions(parser, args):
|
||||
if not spec[0].package.extendable:
|
||||
tty.die("%s is not an extendable package." % spec[0].name)
|
||||
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'extensions')
|
||||
spec = cmd.disambiguate_spec(spec[0], env)
|
||||
|
||||
if not spec.package.extendable:
|
||||
|
@@ -5,17 +5,23 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict, namedtuple
|
||||
|
||||
import six
|
||||
|
||||
import llnl.util.filesystem
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.colify as colify
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments
|
||||
import spack.detection
|
||||
import spack.error
|
||||
import spack.util.environment
|
||||
import spack.util.spack_yaml as syaml
|
||||
|
||||
description = "manage external packages in Spack configuration"
|
||||
section = "config"
|
||||
@@ -47,6 +53,104 @@ def setup_parser(subparser):
|
||||
)
|
||||
|
||||
|
||||
def is_executable(path):
|
||||
return os.path.isfile(path) and os.access(path, os.X_OK)
|
||||
|
||||
|
||||
def _get_system_executables():
|
||||
"""Get the paths of all executables available from the current PATH.
|
||||
|
||||
For convenience, this is constructed as a dictionary where the keys are
|
||||
the executable paths and the values are the names of the executables
|
||||
(i.e. the basename of the executable path).
|
||||
|
||||
There may be multiple paths with the same basename. In this case it is
|
||||
assumed there are two different instances of the executable.
|
||||
"""
|
||||
path_hints = spack.util.environment.get_path('PATH')
|
||||
search_paths = llnl.util.filesystem.search_paths_for_executables(
|
||||
*path_hints)
|
||||
|
||||
path_to_exe = {}
|
||||
# Reverse order of search directories so that an exe in the first PATH
|
||||
# entry overrides later entries
|
||||
for search_path in reversed(search_paths):
|
||||
for exe in os.listdir(search_path):
|
||||
exe_path = os.path.join(search_path, exe)
|
||||
if is_executable(exe_path):
|
||||
path_to_exe[exe_path] = exe
|
||||
return path_to_exe
|
||||
|
||||
|
||||
ExternalPackageEntry = namedtuple(
|
||||
'ExternalPackageEntry',
|
||||
['spec', 'base_dir'])
|
||||
|
||||
|
||||
def _generate_pkg_config(external_pkg_entries):
|
||||
"""Generate config according to the packages.yaml schema for a single
|
||||
package.
|
||||
|
||||
This does not generate the entire packages.yaml. For example, given some
|
||||
external entries for the CMake package, this could return::
|
||||
|
||||
{
|
||||
'externals': [{
|
||||
'spec': 'cmake@3.17.1',
|
||||
'prefix': '/opt/cmake-3.17.1/'
|
||||
}, {
|
||||
'spec': 'cmake@3.16.5',
|
||||
'prefix': '/opt/cmake-3.16.5/'
|
||||
}]
|
||||
}
|
||||
"""
|
||||
|
||||
pkg_dict = syaml.syaml_dict()
|
||||
pkg_dict['externals'] = []
|
||||
for e in external_pkg_entries:
|
||||
if not _spec_is_valid(e.spec):
|
||||
continue
|
||||
|
||||
external_items = [('spec', str(e.spec)), ('prefix', e.base_dir)]
|
||||
if e.spec.external_modules:
|
||||
external_items.append(('modules', e.spec.external_modules))
|
||||
|
||||
if e.spec.extra_attributes:
|
||||
external_items.append(
|
||||
('extra_attributes',
|
||||
syaml.syaml_dict(e.spec.extra_attributes.items()))
|
||||
)
|
||||
|
||||
# external_items.extend(e.spec.extra_attributes.items())
|
||||
pkg_dict['externals'].append(
|
||||
syaml.syaml_dict(external_items)
|
||||
)
|
||||
|
||||
return pkg_dict
|
||||
|
||||
|
||||
def _spec_is_valid(spec):
|
||||
try:
|
||||
str(spec)
|
||||
except spack.error.SpackError:
|
||||
# It is assumed here that we can at least extract the package name from
|
||||
# the spec so we can look up the implementation of
|
||||
# determine_spec_details
|
||||
tty.warn('Constructed spec for {0} does not have a string'
|
||||
' representation'.format(spec.name))
|
||||
return False
|
||||
|
||||
try:
|
||||
spack.spec.Spec(str(spec))
|
||||
except spack.error.SpackError:
|
||||
tty.warn('Constructed spec has a string representation but the string'
|
||||
' representation does not evaluate to a valid spec: {0}'
|
||||
.format(str(spec)))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def external_find(args):
|
||||
# Construct the list of possible packages to be detected
|
||||
packages_to_check = []
|
||||
@@ -72,9 +176,9 @@ def external_find(args):
|
||||
if not args.tags and not packages_to_check:
|
||||
packages_to_check = spack.repo.path.all_packages()
|
||||
|
||||
detected_packages = spack.detection.by_executable(packages_to_check)
|
||||
new_entries = spack.detection.update_configuration(
|
||||
detected_packages, scope=args.scope, buildable=not args.not_buildable
|
||||
pkg_to_entries = _get_external_packages(packages_to_check)
|
||||
new_entries = _update_pkg_config(
|
||||
args.scope, pkg_to_entries, args.not_buildable
|
||||
)
|
||||
if new_entries:
|
||||
path = spack.config.config.get_config_filename(args.scope, 'packages')
|
||||
@@ -86,6 +190,163 @@ def external_find(args):
|
||||
tty.msg('No new external packages detected')
|
||||
|
||||
|
||||
def _group_by_prefix(paths):
|
||||
groups = defaultdict(set)
|
||||
for p in paths:
|
||||
groups[os.path.dirname(p)].add(p)
|
||||
return groups.items()
|
||||
|
||||
|
||||
def _convert_to_iterable(single_val_or_multiple):
|
||||
x = single_val_or_multiple
|
||||
if x is None:
|
||||
return []
|
||||
elif isinstance(x, six.string_types):
|
||||
return [x]
|
||||
elif isinstance(x, spack.spec.Spec):
|
||||
# Specs are iterable, but a single spec should be converted to a list
|
||||
return [x]
|
||||
|
||||
try:
|
||||
iter(x)
|
||||
return x
|
||||
except TypeError:
|
||||
return [x]
|
||||
|
||||
|
||||
def _determine_base_dir(prefix):
|
||||
# Given a prefix where an executable is found, assuming that prefix ends
|
||||
# with /bin/, strip off the 'bin' directory to get a Spack-compatible
|
||||
# prefix
|
||||
assert os.path.isdir(prefix)
|
||||
if os.path.basename(prefix) == 'bin':
|
||||
return os.path.dirname(prefix)
|
||||
|
||||
|
||||
def _get_predefined_externals():
|
||||
# Pull from all scopes when looking for preexisting external package
|
||||
# entries
|
||||
pkg_config = spack.config.get('packages')
|
||||
already_defined_specs = set()
|
||||
for pkg_name, per_pkg_cfg in pkg_config.items():
|
||||
for item in per_pkg_cfg.get('externals', []):
|
||||
already_defined_specs.add(spack.spec.Spec(item['spec']))
|
||||
return already_defined_specs
|
||||
|
||||
|
||||
def _update_pkg_config(scope, pkg_to_entries, not_buildable):
|
||||
predefined_external_specs = _get_predefined_externals()
|
||||
|
||||
pkg_to_cfg, all_new_specs = {}, []
|
||||
for pkg_name, ext_pkg_entries in pkg_to_entries.items():
|
||||
new_entries = list(
|
||||
e for e in ext_pkg_entries
|
||||
if (e.spec not in predefined_external_specs))
|
||||
|
||||
pkg_config = _generate_pkg_config(new_entries)
|
||||
all_new_specs.extend([
|
||||
spack.spec.Spec(x['spec']) for x in pkg_config.get('externals', [])
|
||||
])
|
||||
if not_buildable:
|
||||
pkg_config['buildable'] = False
|
||||
pkg_to_cfg[pkg_name] = pkg_config
|
||||
|
||||
pkgs_cfg = spack.config.get('packages', scope=scope)
|
||||
|
||||
pkgs_cfg = spack.config.merge_yaml(pkgs_cfg, pkg_to_cfg)
|
||||
spack.config.set('packages', pkgs_cfg, scope=scope)
|
||||
|
||||
return all_new_specs
|
||||
|
||||
|
||||
def _get_external_packages(packages_to_check, system_path_to_exe=None):
|
||||
if not system_path_to_exe:
|
||||
system_path_to_exe = _get_system_executables()
|
||||
|
||||
exe_pattern_to_pkgs = defaultdict(list)
|
||||
for pkg in packages_to_check:
|
||||
if hasattr(pkg, 'executables'):
|
||||
for exe in pkg.executables:
|
||||
exe_pattern_to_pkgs[exe].append(pkg)
|
||||
|
||||
pkg_to_found_exes = defaultdict(set)
|
||||
for exe_pattern, pkgs in exe_pattern_to_pkgs.items():
|
||||
compiled_re = re.compile(exe_pattern)
|
||||
for path, exe in system_path_to_exe.items():
|
||||
if compiled_re.search(exe):
|
||||
for pkg in pkgs:
|
||||
pkg_to_found_exes[pkg].add(path)
|
||||
|
||||
pkg_to_entries = defaultdict(list)
|
||||
resolved_specs = {} # spec -> exe found for the spec
|
||||
|
||||
for pkg, exes in pkg_to_found_exes.items():
|
||||
if not hasattr(pkg, 'determine_spec_details'):
|
||||
tty.warn("{0} must define 'determine_spec_details' in order"
|
||||
" for Spack to detect externally-provided instances"
|
||||
" of the package.".format(pkg.name))
|
||||
continue
|
||||
|
||||
# TODO: iterate through this in a predetermined order (e.g. by package
|
||||
# name) to get repeatable results when there are conflicts. Note that
|
||||
# if we take the prefixes returned by _group_by_prefix, then consider
|
||||
# them in the order that they appear in PATH, this should be sufficient
|
||||
# to get repeatable results.
|
||||
for prefix, exes_in_prefix in _group_by_prefix(exes):
|
||||
# TODO: multiple instances of a package can live in the same
|
||||
# prefix, and a package implementation can return multiple specs
|
||||
# for one prefix, but without additional details (e.g. about the
|
||||
# naming scheme which differentiates them), the spec won't be
|
||||
# usable.
|
||||
specs = _convert_to_iterable(
|
||||
pkg.determine_spec_details(prefix, exes_in_prefix))
|
||||
|
||||
if not specs:
|
||||
tty.debug(
|
||||
'The following executables in {0} were decidedly not '
|
||||
'part of the package {1}: {2}'
|
||||
.format(prefix, pkg.name, ', '.join(
|
||||
_convert_to_iterable(exes_in_prefix)))
|
||||
)
|
||||
|
||||
for spec in specs:
|
||||
pkg_prefix = _determine_base_dir(prefix)
|
||||
|
||||
if not pkg_prefix:
|
||||
tty.debug("{0} does not end with a 'bin/' directory: it"
|
||||
" cannot be added as a Spack package"
|
||||
.format(prefix))
|
||||
continue
|
||||
|
||||
if spec in resolved_specs:
|
||||
prior_prefix = ', '.join(
|
||||
_convert_to_iterable(resolved_specs[spec]))
|
||||
|
||||
tty.debug(
|
||||
"Executables in {0} and {1} are both associated"
|
||||
" with the same spec {2}"
|
||||
.format(prefix, prior_prefix, str(spec)))
|
||||
continue
|
||||
else:
|
||||
resolved_specs[spec] = prefix
|
||||
|
||||
try:
|
||||
spec.validate_detection()
|
||||
except Exception as e:
|
||||
msg = ('"{0}" has been detected on the system but will '
|
||||
'not be added to packages.yaml [reason={1}]')
|
||||
tty.warn(msg.format(spec, str(e)))
|
||||
continue
|
||||
|
||||
if spec.external_path:
|
||||
pkg_prefix = spec.external_path
|
||||
|
||||
pkg_to_entries[pkg.name].append(
|
||||
ExternalPackageEntry(spec=spec, base_dir=pkg_prefix))
|
||||
|
||||
return pkg_to_entries
|
||||
|
||||
|
||||
def external_list(args):
|
||||
# Trigger a read of all packages, might take a long time.
|
||||
list(spack.repo.path.all_packages())
|
||||
|
@@ -47,7 +47,7 @@ def fetch(parser, args):
|
||||
# fetch all uninstalled specs from it otherwise fetch all.
|
||||
# If we are also not in an environment, complain to the
|
||||
# user that we don't know what to do.
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, "fetch")
|
||||
if env:
|
||||
if args.missing:
|
||||
specs = env.uninstalled_specs()
|
||||
@@ -76,6 +76,10 @@ def fetch(parser, args):
|
||||
if args.missing and package.installed:
|
||||
continue
|
||||
|
||||
# Do not attempt to fetch externals (they're local)
|
||||
if package.spec.external:
|
||||
continue
|
||||
|
||||
package.do_fetch()
|
||||
|
||||
package = spack.repo.get(spec)
|
||||
|
@@ -6,6 +6,7 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import copy
|
||||
import os
|
||||
import sys
|
||||
|
||||
import llnl.util.lang
|
||||
@@ -17,7 +18,9 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
import spack.repo
|
||||
import spack.user_environment as uenv
|
||||
from spack.database import InstallStatuses
|
||||
from spack.util.string import plural
|
||||
|
||||
description = "list and search installed packages"
|
||||
section = "basic"
|
||||
@@ -202,24 +205,24 @@ def display_env(env, args, decorator):
|
||||
|
||||
|
||||
def find(parser, args):
|
||||
q_args = query_arguments(args)
|
||||
# Query the current store or the internal bootstrap store if required
|
||||
if args.bootstrap:
|
||||
bootstrap_store_path = spack.bootstrap.store_path()
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
msg = 'Showing internal bootstrap store at "{0}"'
|
||||
tty.msg(msg.format(bootstrap_store_path))
|
||||
_find(parser, args)
|
||||
return
|
||||
_find(parser, args)
|
||||
msg = 'Showing internal bootstrap store at "{0}"'
|
||||
tty.msg(msg.format(bootstrap_store_path))
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
results = args.specs(**q_args)
|
||||
else:
|
||||
results = args.specs(**q_args)
|
||||
|
||||
|
||||
def _find(parser, args):
|
||||
q_args = query_arguments(args)
|
||||
results = args.specs(**q_args)
|
||||
|
||||
env = ev.active_environment()
|
||||
decorator = lambda s, f: f
|
||||
added = set()
|
||||
removed = set()
|
||||
|
||||
env = ev.get_env(args, 'find')
|
||||
if env:
|
||||
decorator, _, roots, _ = setup_env(env)
|
||||
decorator, added, roots, removed = setup_env(env)
|
||||
|
||||
# use groups by default except with format.
|
||||
if args.groups is None:
|
||||
@@ -230,7 +233,7 @@ def _find(parser, args):
|
||||
msg = "No package matches the query: {0}"
|
||||
msg = msg.format(' '.join(args.constraint))
|
||||
tty.msg(msg)
|
||||
raise SystemExit(1)
|
||||
return 1
|
||||
|
||||
# If tags have been specified on the command line, filter by tags
|
||||
if args.tags:
|
||||
@@ -238,7 +241,8 @@ def _find(parser, args):
|
||||
results = [x for x in results if x.name in packages_with_tags]
|
||||
|
||||
if args.loaded:
|
||||
results = spack.cmd.filter_loaded_specs(results)
|
||||
hashes = os.environ.get(uenv.spack_loaded_hashes_var, '').split(':')
|
||||
results = [x for x in results if x.dag_hash() in hashes]
|
||||
|
||||
# Display the result
|
||||
if args.json:
|
||||
@@ -247,10 +251,7 @@ def _find(parser, args):
|
||||
if not args.format:
|
||||
if env:
|
||||
display_env(env, args, decorator)
|
||||
|
||||
if sys.stdout.isatty() and args.groups:
|
||||
pkg_type = "loaded" if args.loaded else "installed"
|
||||
spack.cmd.print_how_many_pkgs(results, pkg_type)
|
||||
|
||||
tty.msg("%s" % plural(len(results), 'installed package'))
|
||||
cmd.display_specs(
|
||||
results, args, decorator=decorator, all_headers=True)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.uninstall
|
||||
import spack.environment as ev
|
||||
import spack.environment
|
||||
import spack.store
|
||||
|
||||
description = "remove specs that are now no longer needed"
|
||||
@@ -24,7 +24,7 @@ def gc(parser, args):
|
||||
|
||||
# Restrict garbage collection to the active environment
|
||||
# speculating over roots that are yet to be installed
|
||||
env = ev.active_environment()
|
||||
env = spack.environment.get_env(args=None, cmd_name='gc')
|
||||
if env:
|
||||
msg = 'Restricting the garbage collection to the "{0}" environment'
|
||||
tty.msg(msg.format(env.name))
|
||||
|
@@ -10,7 +10,6 @@
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.store
|
||||
from spack.graph import graph_ascii, graph_dot
|
||||
|
||||
@@ -36,7 +35,7 @@ def setup_parser(subparser):
|
||||
|
||||
subparser.add_argument(
|
||||
'-i', '--installed', action='store_true',
|
||||
help="graph installed specs, or specs in the active env (implies --dot)")
|
||||
help="graph all installed specs in dot format (implies --dot)")
|
||||
|
||||
arguments.add_common_arguments(subparser, ['deptype', 'specs'])
|
||||
|
||||
@@ -46,12 +45,7 @@ def graph(parser, args):
|
||||
if args.specs:
|
||||
tty.die("Can't specify specs with --installed")
|
||||
args.dot = True
|
||||
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
specs = env.all_specs()
|
||||
else:
|
||||
specs = spack.store.db.query()
|
||||
specs = spack.store.db.query()
|
||||
|
||||
else:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=not args.static)
|
||||
|
@@ -17,7 +17,6 @@
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
from spack.package import preferred_version
|
||||
|
||||
description = 'get detailed information on a particular package'
|
||||
section = 'basic'
|
||||
@@ -57,7 +56,7 @@ def variant(s):
|
||||
class VariantFormatter(object):
|
||||
def __init__(self, variants):
|
||||
self.variants = variants
|
||||
self.headers = ('Name [Default]', 'When', 'Allowed values', 'Description')
|
||||
self.headers = ('Name [Default]', 'Allowed values', 'Description')
|
||||
|
||||
# Formats
|
||||
fmt_name = '{0} [{1}]'
|
||||
@@ -68,11 +67,9 @@ def __init__(self, variants):
|
||||
self.column_widths = [len(x) for x in self.headers]
|
||||
|
||||
# Expand columns based on max line lengths
|
||||
for k, e in variants.items():
|
||||
v, w = e
|
||||
for k, v in variants.items():
|
||||
candidate_max_widths = (
|
||||
len(fmt_name.format(k, self.default(v))), # Name [Default]
|
||||
len(str(w)),
|
||||
len(v.allowed_values), # Allowed values
|
||||
len(v.description) # Description
|
||||
)
|
||||
@@ -80,29 +77,26 @@ def __init__(self, variants):
|
||||
self.column_widths = (
|
||||
max(self.column_widths[0], candidate_max_widths[0]),
|
||||
max(self.column_widths[1], candidate_max_widths[1]),
|
||||
max(self.column_widths[2], candidate_max_widths[2]),
|
||||
max(self.column_widths[3], candidate_max_widths[3])
|
||||
max(self.column_widths[2], candidate_max_widths[2])
|
||||
)
|
||||
|
||||
# Don't let name or possible values be less than max widths
|
||||
_, cols = tty.terminal_size()
|
||||
max_name = min(self.column_widths[0], 30)
|
||||
max_when = min(self.column_widths[1], 30)
|
||||
max_vals = min(self.column_widths[2], 20)
|
||||
max_vals = min(self.column_widths[1], 20)
|
||||
|
||||
# allow the description column to extend as wide as the terminal.
|
||||
max_description = min(
|
||||
self.column_widths[3],
|
||||
self.column_widths[2],
|
||||
# min width 70 cols, 14 cols of margins and column spacing
|
||||
max(cols, 70) - max_name - max_vals - 14,
|
||||
)
|
||||
self.column_widths = (max_name, max_when, max_vals, max_description)
|
||||
self.column_widths = (max_name, max_vals, max_description)
|
||||
|
||||
# Compute the format
|
||||
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
|
||||
self.fmt = "%%-%ss%%-%ss%%s" % (
|
||||
self.column_widths[0] + 4,
|
||||
self.column_widths[1] + 4,
|
||||
self.column_widths[2] + 4
|
||||
self.column_widths[1] + 4
|
||||
)
|
||||
|
||||
def default(self, v):
|
||||
@@ -120,27 +114,21 @@ def lines(self):
|
||||
underline = tuple([w * "=" for w in self.column_widths])
|
||||
yield ' ' + self.fmt % underline
|
||||
yield ''
|
||||
for k, e in sorted(self.variants.items()):
|
||||
v, w = e
|
||||
for k, v in sorted(self.variants.items()):
|
||||
name = textwrap.wrap(
|
||||
'{0} [{1}]'.format(k, self.default(v)),
|
||||
width=self.column_widths[0]
|
||||
)
|
||||
if len(w) == 1:
|
||||
w = w[0]
|
||||
if w == spack.spec.Spec():
|
||||
w = '--'
|
||||
when = textwrap.wrap(str(w), width=self.column_widths[1])
|
||||
allowed = v.allowed_values.replace('True, False', 'on, off')
|
||||
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
|
||||
allowed = textwrap.wrap(allowed, width=self.column_widths[1])
|
||||
description = []
|
||||
for d_line in v.description.split('\n'):
|
||||
description += textwrap.wrap(
|
||||
d_line,
|
||||
width=self.column_widths[3]
|
||||
width=self.column_widths[2]
|
||||
)
|
||||
for t in zip_longest(
|
||||
name, when, allowed, description, fillvalue=''
|
||||
name, allowed, description, fillvalue=''
|
||||
):
|
||||
yield " " + self.fmt % t
|
||||
|
||||
@@ -203,38 +191,29 @@ def print_text_info(pkg):
|
||||
color.cprint('')
|
||||
color.cprint(section_title('Safe versions: '))
|
||||
color.cprint(version(' None'))
|
||||
color.cprint('')
|
||||
color.cprint(section_title('Deprecated versions: '))
|
||||
color.cprint(version(' None'))
|
||||
else:
|
||||
pad = padder(pkg.versions, 4)
|
||||
|
||||
preferred = preferred_version(pkg)
|
||||
# Here we sort first on the fact that a version is marked
|
||||
# as preferred in the package, then on the fact that the
|
||||
# version is not develop, then lexicographically
|
||||
key_fn = lambda v: (pkg.versions[v].get('preferred', False),
|
||||
not v.isdevelop(),
|
||||
v)
|
||||
preferred = sorted(pkg.versions, key=key_fn).pop()
|
||||
url = ''
|
||||
if pkg.has_code:
|
||||
url = fs.for_package_version(pkg, preferred)
|
||||
|
||||
line = version(' {0}'.format(pad(preferred))) + color.cescape(url)
|
||||
color.cprint(line)
|
||||
color.cprint('')
|
||||
color.cprint(section_title('Safe versions: '))
|
||||
|
||||
safe = []
|
||||
deprecated = []
|
||||
for v in reversed(sorted(pkg.versions)):
|
||||
if pkg.has_code:
|
||||
url = fs.for_package_version(pkg, v)
|
||||
if pkg.versions[v].get('deprecated', False):
|
||||
deprecated.append((v, url))
|
||||
else:
|
||||
safe.append((v, url))
|
||||
|
||||
for title, vers in [('Safe', safe), ('Deprecated', deprecated)]:
|
||||
color.cprint('')
|
||||
color.cprint(section_title('{0} versions: '.format(title)))
|
||||
if not vers:
|
||||
color.cprint(version(' None'))
|
||||
continue
|
||||
|
||||
for v, url in vers:
|
||||
if not pkg.versions[v].get('deprecated', False):
|
||||
if pkg.has_code:
|
||||
url = fs.for_package_version(pkg, v)
|
||||
line = version(' {0}'.format(pad(v))) + color.cescape(url)
|
||||
color.cprint(line)
|
||||
|
||||
@@ -243,7 +222,7 @@ def print_text_info(pkg):
|
||||
|
||||
formatter = VariantFormatter(pkg.variants)
|
||||
for line in formatter.lines:
|
||||
color.cprint(color.cescape(line))
|
||||
color.cprint(line)
|
||||
|
||||
if hasattr(pkg, 'phases') and pkg.phases:
|
||||
color.cprint('')
|
||||
|
@@ -78,7 +78,7 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-u', '--until', type=str, dest='until', default=None,
|
||||
help="phase to stop after when installing (default None)")
|
||||
arguments.add_common_arguments(subparser, ['jobs', 'reuse'])
|
||||
arguments.add_common_arguments(subparser, ['jobs'])
|
||||
subparser.add_argument(
|
||||
'--overwrite', action='store_true',
|
||||
help="reinstall an existing spec, even if it has dependents")
|
||||
@@ -204,7 +204,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
"""
|
||||
|
||||
# handle active environment, if any
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(cli_args, 'install')
|
||||
|
||||
try:
|
||||
if env:
|
||||
@@ -219,7 +219,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
# If there is any ambiguity in the above call to matching_spec
|
||||
# (i.e. if more than one spec in the environment matches), then
|
||||
# SpackEnvironmentError is raised, with a message listing the
|
||||
# SpackEnvironmentError is rasied, with a message listing the
|
||||
# the matches. Getting to this point means there were either
|
||||
# no matches or exactly one match.
|
||||
|
||||
@@ -243,7 +243,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
if m_spec in env.roots() or cli_args.no_add:
|
||||
# either the single match is a root spec (and --no-add is
|
||||
# the default for roots) or --no-add was stated explicitly
|
||||
# the default for roots) or --no-add was stated explictly
|
||||
tty.debug('just install {0}'.format(m_spec.name))
|
||||
specs_to_install.append(m_spec)
|
||||
else:
|
||||
@@ -324,21 +324,17 @@ def get_tests(specs):
|
||||
else:
|
||||
return False
|
||||
|
||||
# Parse cli arguments and construct a dictionary
|
||||
# that will be passed to the package installer
|
||||
update_kwargs_from_args(args, kwargs)
|
||||
|
||||
if not args.spec and not args.specfiles:
|
||||
# if there are no args but an active environment
|
||||
# then install the packages from it.
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'install')
|
||||
if env:
|
||||
tests = get_tests(env.user_specs)
|
||||
kwargs['tests'] = tests
|
||||
|
||||
if not args.only_concrete:
|
||||
with env.write_transaction():
|
||||
concretized_specs = env.concretize(tests=tests, reuse=args.reuse)
|
||||
concretized_specs = env.concretize(tests=tests)
|
||||
ev.display_specs(concretized_specs)
|
||||
|
||||
# save view regeneration for later, so that we only do it
|
||||
@@ -356,7 +352,7 @@ def get_tests(specs):
|
||||
|
||||
tty.msg("Installing environment {0}".format(env.name))
|
||||
with reporter('build'):
|
||||
env.install_all(**kwargs)
|
||||
env.install_all(args, **kwargs)
|
||||
|
||||
tty.debug("Regenerating environment views for {0}"
|
||||
.format(env.name))
|
||||
@@ -385,6 +381,10 @@ def get_tests(specs):
|
||||
if args.deprecated:
|
||||
spack.config.set('config:deprecated', True, scope='command_line')
|
||||
|
||||
# Parse cli arguments and construct a dictionary
|
||||
# that will be passed to the package installer
|
||||
update_kwargs_from_args(args, kwargs)
|
||||
|
||||
# 1. Abstract specs from cli
|
||||
abstract_specs = spack.cmd.parse_specs(args.spec)
|
||||
tests = get_tests(abstract_specs)
|
||||
@@ -392,8 +392,7 @@ def get_tests(specs):
|
||||
|
||||
try:
|
||||
specs = spack.cmd.parse_specs(
|
||||
args.spec, concretize=True, tests=tests, reuse=args.reuse
|
||||
)
|
||||
args.spec, concretize=True, tests=tests)
|
||||
except SpackError as e:
|
||||
tty.debug(e)
|
||||
reporter.concretization_report(e.message)
|
||||
@@ -402,10 +401,7 @@ def get_tests(specs):
|
||||
# 2. Concrete specs from yaml files
|
||||
for file in args.specfiles:
|
||||
with open(file, 'r') as f:
|
||||
if file.endswith('yaml') or file.endswith('yml'):
|
||||
s = spack.spec.Spec.from_yaml(f)
|
||||
else:
|
||||
s = spack.spec.Spec.from_json(f)
|
||||
s = spack.spec.Spec.from_yaml(f)
|
||||
|
||||
concretized = s.concretized()
|
||||
if concretized.dag_hash() != s.dag_hash():
|
||||
|
@@ -16,6 +16,7 @@
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.tty.colify import colify
|
||||
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.dependency
|
||||
import spack.repo
|
||||
from spack.version import VersionList
|
||||
@@ -56,6 +57,8 @@ def setup_parser(subparser):
|
||||
'-v', '--virtuals', action='store_true', default=False,
|
||||
help='include virtual packages in list')
|
||||
|
||||
arguments.add_common_arguments(subparser, ['tags'])
|
||||
|
||||
|
||||
def filter_by_name(pkgs, args):
|
||||
"""
|
||||
@@ -218,13 +221,9 @@ def head(n, span_id, title, anchor=None):
|
||||
|
||||
out.write('<dt>Homepage:</dt>\n')
|
||||
out.write('<dd><ul class="first last simple">\n')
|
||||
|
||||
if pkg.homepage:
|
||||
out.write(('<li>'
|
||||
'<a class="reference external" href="%s">%s</a>'
|
||||
'</li>\n') % (pkg.homepage, escape(pkg.homepage, True)))
|
||||
else:
|
||||
out.write('No homepage\n')
|
||||
out.write(('<li>'
|
||||
'<a class="reference external" href="%s">%s</a>'
|
||||
'</li>\n') % (pkg.homepage, escape(pkg.homepage, True)))
|
||||
out.write('</ul></dd>\n')
|
||||
|
||||
out.write('<dt>Spack package:</dt>\n')
|
||||
@@ -274,6 +273,13 @@ def list(parser, args):
|
||||
# Filter the set appropriately
|
||||
sorted_packages = filter_by_name(pkgs, args)
|
||||
|
||||
# Filter by tags
|
||||
if args.tags:
|
||||
packages_with_tags = set(
|
||||
spack.repo.path.packages_with_tags(*args.tags))
|
||||
sorted_packages = set(sorted_packages) & packages_with_tags
|
||||
sorted_packages = sorted(sorted_packages)
|
||||
|
||||
if args.update:
|
||||
# change output stream if user asked for update
|
||||
if os.path.exists(args.update):
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.cmd.find
|
||||
import spack.environment as ev
|
||||
import spack.store
|
||||
import spack.user_environment as uenv
|
||||
@@ -21,7 +20,8 @@
|
||||
def setup_parser(subparser):
|
||||
"""Parser is only constructed so that this prints a nice help
|
||||
message with -h. """
|
||||
arguments.add_common_arguments(subparser, ['constraint'])
|
||||
arguments.add_common_arguments(
|
||||
subparser, ['recurse_dependencies', 'installed_specs'])
|
||||
|
||||
shells = subparser.add_mutually_exclusive_group()
|
||||
shells.add_argument(
|
||||
@@ -53,29 +53,14 @@ def setup_parser(subparser):
|
||||
the dependencies"""
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
'--list',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="show loaded packages: same as `spack find --loaded`"
|
||||
)
|
||||
|
||||
|
||||
def load(parser, args):
|
||||
env = ev.active_environment()
|
||||
|
||||
if args.list:
|
||||
results = spack.cmd.filter_loaded_specs(args.specs())
|
||||
if sys.stdout.isatty():
|
||||
spack.cmd.print_how_many_pkgs(results, "loaded")
|
||||
spack.cmd.display_specs(results)
|
||||
return
|
||||
|
||||
env = ev.get_env(args, 'load')
|
||||
specs = [spack.cmd.disambiguate_spec(spec, env, first=args.load_first)
|
||||
for spec in spack.cmd.parse_specs(args.constraint)]
|
||||
for spec in spack.cmd.parse_specs(args.specs)]
|
||||
|
||||
if not args.shell:
|
||||
specs_str = ' '.join(args.constraint) or "SPECS"
|
||||
specs_str = ' '.join(args.specs) or "SPECS"
|
||||
spack.cmd.common.shell_init_instructions(
|
||||
"spack load",
|
||||
" eval `spack load {sh_arg} %s`" % specs_str,
|
||||
|
@@ -11,6 +11,7 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment
|
||||
import spack.environment as ev
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
@@ -56,8 +57,8 @@ def setup_parser(subparser):
|
||||
help="build directory for a spec "
|
||||
"(requires it to be staged first)")
|
||||
directories.add_argument(
|
||||
'-e', '--env', action='store', dest='location_env', nargs='?', metavar="name",
|
||||
default=False, help="location of the named or current environment")
|
||||
'-e', '--env', action='store', dest='location_env',
|
||||
help="location of an environment managed by spack")
|
||||
|
||||
arguments.add_common_arguments(subparser, ['spec'])
|
||||
|
||||
@@ -71,17 +72,10 @@ def location(parser, args):
|
||||
print(spack.paths.prefix)
|
||||
return
|
||||
|
||||
# no -e corresponds to False, -e without arg to None, -e name to the string name.
|
||||
if args.location_env is not False:
|
||||
if args.location_env is None:
|
||||
# Get current environment path
|
||||
spack.cmd.require_active_env('location -e')
|
||||
path = ev.active_environment().path
|
||||
else:
|
||||
# Get named environment path
|
||||
if not ev.exists(args.location_env):
|
||||
tty.die("no such environment: '%s'" % args.location_env)
|
||||
path = ev.root(args.location_env)
|
||||
if args.location_env:
|
||||
path = spack.environment.root(args.location_env)
|
||||
if not os.path.isdir(path):
|
||||
tty.die("no such environment: '%s'" % args.location_env)
|
||||
print(path)
|
||||
return
|
||||
|
||||
@@ -103,7 +97,7 @@ def location(parser, args):
|
||||
|
||||
# install_dir command matches against installed specs.
|
||||
if args.install_dir:
|
||||
env = ev.active_environment()
|
||||
env = ev.get_env(args, 'location')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
print(spec.prefix)
|
||||
return
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user