Compare commits

..

1 Commits

Author SHA1 Message Date
Gregory Becker
771c4e4017 add --reuse-only flag and update install_status for binaries 2022-05-04 11:31:57 -07:00
763 changed files with 5559 additions and 22346 deletions

View File

@@ -24,7 +24,6 @@ jobs:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: "fedora:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -32,20 +31,14 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -58,7 +51,6 @@ jobs:
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -69,20 +61,22 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -95,7 +89,6 @@ jobs:
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -105,20 +98,22 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
@@ -126,10 +121,10 @@ jobs:
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -139,12 +134,9 @@ jobs:
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup repo
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
@@ -158,13 +150,11 @@ jobs:
macos-clingo-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -179,14 +169,12 @@ jobs:
strategy:
matrix:
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo
@@ -201,14 +189,12 @@ jobs:
strategy:
matrix:
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Setup repo
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
@@ -223,7 +209,6 @@ jobs:
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -233,20 +218,22 @@ jobs:
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
@@ -258,7 +245,6 @@ jobs:
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -269,20 +255,22 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Setup non-root user
- name: Work around CVE-2022-24765
run: |
# See [1] below
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
# a breaking behavior. See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd -m spack-test
chown -R spack-test .
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
@@ -294,15 +282,13 @@ jobs:
macos-gnupg-binaries:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -312,15 +298,13 @@ jobs:
macos-gnupg-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
brew install gawk tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -328,11 +312,3 @@ jobs:
spack bootstrap untrust github-actions-v0.2
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -43,10 +43,9 @@ jobs:
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -76,33 +75,33 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535
with:
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # @v1
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # @v2
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -22,10 +22,9 @@ on:
jobs:
install_gcc:
name: gcc with clang
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
@@ -37,11 +36,10 @@ jobs:
install_jupyter_clang:
name: jupyter
if: github.repository == 'spack/spack'
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
@@ -52,10 +50,9 @@ jobs:
install_scipy_clang:
name: scipy, mpl, pd
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9

View File

@@ -4,7 +4,6 @@ Set-Location spack
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git config --global core.longpaths true
if ($(git branch --show-current) -ne "develop")
{

View File

@@ -15,7 +15,7 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
@@ -31,7 +31,7 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
@@ -39,7 +39,7 @@ jobs:
python-version: '3.10'
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools==62.3.4 types-six
pip install --upgrade pip six setuptools types-six
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
@@ -57,7 +57,7 @@ jobs:
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
@@ -106,7 +106,7 @@ jobs:
- python-version: 3.9
concretizer: original
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
@@ -162,7 +162,7 @@ jobs:
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,${{ matrix.concretizer }}
@@ -171,7 +171,7 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
@@ -200,7 +200,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: shelltests,linux
@@ -218,7 +218,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -237,7 +237,7 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
@@ -274,7 +274,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,clingo
@@ -286,7 +286,7 @@ jobs:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
@@ -320,7 +320,7 @@ jobs:
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
files: ./coverage.xml
@@ -331,7 +331,7 @@ jobs:
needs: [ validate, style, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
@@ -350,7 +350,7 @@ jobs:
run: |
. share/spack/setup-env.sh
$(which spack) audit packages
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,audits

View File

@@ -17,7 +17,7 @@ jobs:
validate:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
@@ -33,7 +33,7 @@ jobs:
style:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
@@ -41,7 +41,7 @@ jobs:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip six setuptools==62.3.4 flake8 isort>=4.3.5 mypy>=0.800 black pywin32 types-python-dateutil
python -m pip install --upgrade pip six setuptools flake8 isort>=4.3.5 mypy>=0.800 black pywin32 types-python-dateutil
- name: Create local develop
run: |
.\spack\.github\workflows\setup_git.ps1
@@ -55,7 +55,7 @@ jobs:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
@@ -75,7 +75,7 @@ jobs:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
@@ -95,7 +95,7 @@ jobs:
needs: [ validate, style ]
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
@@ -120,7 +120,7 @@ jobs:
git config --global core.symlinks false
shell:
powershell
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
@@ -139,11 +139,11 @@ jobs:
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env:
ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi

View File

@@ -1,223 +1,3 @@
# v0.18.1 (2022-07-19)
### Spack Bugfixes
* Fix several bugs related to bootstrapping (#30834,#31042,#31180)
* Fix a regression that was causing spec hashes to differ between
Python 2 and Python 3 (#31092)
* Fixed compiler flags for oneAPI and DPC++ (#30856)
* Fixed several issues related to concretization (#31142,#31153,#31170,#31226)
* Improved support for Cray manifest file and `spack external find` (#31144,#31201,#31173.#31186)
* Assign a version to openSUSE Tumbleweed according to the GLIBC version
in the system (#19895)
* Improved Dockerfile generation for `spack containerize` (#29741,#31321)
* Fixed a few bugs related to concurrent execution of commands (#31509,#31493,#31477)
### Package updates
* WarpX: add v22.06, fixed libs property (#30866,#31102)
* openPMD: add v0.14.5, update recipe for @develop (#29484,#31023)
# v0.18.0 (2022-05-28)
`v0.18.0` is a major feature release.
## Major features in this release
1. **Concretizer now reuses by default**
`spack install --reuse` was introduced in `v0.17.0`, and `--reuse`
is now the default concretization mode. Spack will try hard to
resolve dependencies using installed packages or binaries (#30396).
To avoid reuse and to use the latest package configurations, (the
old default), you can use `spack install --fresh`, or add
configuration like this to your environment or `concretizer.yaml`:
```yaml
concretizer:
reuse: false
```
2. **Finer-grained hashes**
Spack hashes now include `link`, `run`, *and* `build` dependencies,
as well as a canonical hash of package recipes. Previously, hashes
only included `link` and `run` dependencies (though `build`
dependencies were stored by environments). We coarsened the hash to
reduce churn in user installations, but the new default concretizer
behavior mitigates this concern and gets us reuse *and* provenance.
You will be able to see the build dependencies of new installations
with `spack find`. Old installations will not change and their
hashes will not be affected. (#28156, #28504, #30717, #30861)
3. **Improved error messages**
Error handling with the new concretizer is now done with
optimization criteria rather than with unsatisfiable cores, and
Spack reports many more details about conflicting constraints.
(#30669)
4. **Unify environments when possible**
Environments have thus far supported `concretization: together` or
`concretization: separately`. These have been replaced by a new
preference in `concretizer.yaml`:
```yaml
concretizer:
unify: [true|false|when_possible]
```
`concretizer:unify:when_possible` will *try* to resolve a fully
unified environment, but if it cannot, it will create multiple
configurations of some packages where it has to. For large
environments that previously had to be concretized separately, this
can result in a huge speedup (40-50x). (#28941)
5. **Automatically find externals on Cray machines**
Spack can now automatically discover installed packages in the Cray
Programming Environment by running `spack external find` (or `spack
external read-cray-manifest` to *only* query the PE). Packages from
the PE (e.g., `cray-mpich` are added to the database with full
dependency information, and compilers from the PE are added to
`compilers.yaml`. Available with the June 2022 release of the Cray
Programming Environment. (#24894, #30428)
6. **New binary format and hardened signing**
Spack now has an updated binary format, with improvements for
security. The new format has a detached signature file, and Spack
verifies the signature before untarring or decompressing the binary
package. The previous format embedded the signature in a `tar`
file, which required the client to run `tar` *before* verifying
(#30750). Spack can still install from build caches using the old
format, but we encourage users to switch to the new format going
forward.
Production GitLab pipelines have been hardened to securely sign
binaries. There is now a separate signing stage so that signing
keys are never exposed to build system code, and signing keys are
ephemeral and only live as long as the signing pipeline stage.
(#30753)
7. **Bootstrap mirror generation**
The `spack bootstrap mirror` command can automatically create a
mirror for bootstrapping the concretizer and other needed
dependencies in an air-gapped environment. (#28556)
8. **Nascent Windows support**
Spack now has initial support for Windows. Spack core has been
refactored to run in the Windows environment, and a small number of
packages can now build for Windows. More details are
[in the documentation](https://spack.rtfd.io/en/latest/getting_started.html#spack-on-windows)
(#27021, #28385, many more)
9. **Makefile generation**
`spack env depfile` can be used to generate a `Makefile` from an
environment, which can be used to build packages the environment
in parallel on a single node. e.g.:
```console
spack -e myenv env depfile > Makefile
make
```
Spack propagates `gmake` jobserver information to builds so that
their jobs can share cores. (#30039, #30254, #30302, #30526)
10. **New variant features**
In addition to being conditional themselves, variants can now have
[conditional *values*](https://spack.readthedocs.io/en/latest/packaging_guide.html#conditional-possible-values)
that are only possible for certain configurations of a package. (#29530)
Variants can be
[declared "sticky"](https://spack.readthedocs.io/en/latest/packaging_guide.html#sticky-variants),
which prevents them from being enabled or disabled by the
concretizer. Sticky variants must be set explicitly by users
on the command line or in `packages.yaml`. (#28630)
* Allow conditional possible values in variants
* Add a "sticky" property to variants
## Other new features of note
* Environment views can optionally link only `run` dependencies
with `link:run` (#29336)
* `spack external find --all` finds library-only packages in
addition to build dependencies (#28005)
* Customizable `config:license_dir` option (#30135)
* `spack external find --path PATH` takes a custom search path (#30479)
* `spack spec` has a new `--format` argument like `spack find` (#27908)
* `spack concretize --quiet` skips printing concretized specs (#30272)
* `spack info` now has cleaner output and displays test info (#22097)
* Package-level submodule option for git commit versions (#30085, #30037)
* Using `/hash` syntax to refer to concrete specs in an environment
now works even if `/hash` is not installed. (#30276)
## Major internal refactors
* full hash (see above)
* new develop versioning scheme `0.19.0-dev0`
* Allow for multiple dependencies/dependents from the same package (#28673)
* Splice differing virtual packages (#27919)
## Performance Improvements
* Concretization of large environments with `unify: when_possible` is
much faster than concretizing separately (#28941, see above)
* Single-pass view generation algorithm is 2.6x faster (#29443)
## Archspec improvements
* `oneapi` and `dpcpp` flag support (#30783)
* better support for `M1` and `a64fx` (#30683)
## Removals and Deprecations
* Spack no longer supports Python `2.6` (#27256)
* Removed deprecated `--run-tests` option of `spack install`;
use `spack test` (#30461)
* Removed deprecated `spack flake8`; use `spack style` (#27290)
* Deprecate `spack:concretization` config option; use
`concretizer:unify` (#30038)
* Deprecate top-level module configuration; use module sets (#28659)
* `spack activate` and `spack deactivate` are deprecated in favor of
environments; will be removed in `0.19.0` (#29430; see also `link:run`
in #29336 above)
## Notable Bugfixes
* Fix bug that broke locks with many parallel builds (#27846)
* Many bugfixes and consistency improvements for the new concretizer
and `--reuse` (#30357, #30092, #29835, #29933, #28605, #29694, #28848)
## Packages
* `CMakePackage` uses `CMAKE_INSTALL_RPATH_USE_LINK_PATH` (#29703)
* Refactored `lua` support: `lua-lang` virtual supports both
`lua` and `luajit` via new `LuaPackage` build system(#28854)
* PythonPackage: now installs packages with `pip` (#27798)
* Python: improve site_packages_dir handling (#28346)
* Extends: support spec, not just package name (#27754)
* `find_libraries`: search for both .so and .dylib on macOS (#28924)
* Use stable URLs and `?full_index=1` for all github patches (#29239)
## Spack community stats
* 6,416 total packages, 458 new since `v0.17.0`
* 219 new Python packages
* 60 new R packages
* 377 people contributed to this release
* 337 committers to packages
* 85 committers to core
# v0.17.2 (2022-04-13)
### Spack bugfixes
@@ -231,7 +11,7 @@
* Fixed a few bugs affecting the spack ci command (#29518, #29419)
* Fix handling of Intel compiler environment (#29439)
* Fix a few edge cases when reindexing the DB (#28764)
* Remove "Known issues" from documentation (#29664)
* Remove "Known issues" from documentation (#29664)
* Other miscellaneous bugfixes (0b72e070583fc5bcd016f5adc8a84c99f2b7805f, #28403, #29261)
# v0.17.1 (2021-12-23)

View File

@@ -6,15 +6,34 @@ bootstrap:
# by Spack is installed in a "store" subfolder of this root directory
root: $user_cache_path/bootstrap
# Methods that can be used to bootstrap software. Each method may or
# may not be able to bootstrap all the software that Spack needs,
# may not be able to bootstrap all of the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.2
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
- name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
# This method is just Spack bootstrapping the software it needs from sources.
# It has been added here so that users can selectively disable bootstrapping
# from sources by "untrusting" it.
- name: spack-install
type: install
description: |
Specs built from sources by Spack. May take a long time.
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow

View File

@@ -14,23 +14,4 @@ concretizer:
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: true
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets
# considered.
targets:
# Determine whether we want to target specific or generic microarchitectures.
# An example of the first kind might be for instance "skylake" or "bulldozer",
# while generic microarchitectures are for instance "aarch64" or "x86_64_v4".
granularity: microarchitectures
# If "false" allow targets that are incompatible with the current host (for
# instance concretize with target "icelake" while running on "haswell").
# If "true" only allow targets that are compatible with the host.
host_compatible: true
# When "true" concretize root specs of environments together, so that each unique
# package in an environment corresponds to one concrete spec. This ensures
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: false
reuse: false

View File

@@ -33,9 +33,6 @@ config:
template_dirs:
- $spack/share/spack/templates
# Directory where licenses should be located
license_dir: $spack/etc/spack/licenses
# Temporary locations Spack can try to use for builds.
#
# Recommended options are given below.

View File

@@ -35,8 +35,7 @@ packages:
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libllvm: [llvm, llvm-amdgpu]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit]
lua-lang: [lua, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb]
mkl: [intel-mkl]
mpe: [mpe2]

View File

@@ -192,32 +192,32 @@ you can use them to customize an installation in :ref:`sec-specs`.
Reusing installed dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack install``, Spack tries hard to reuse existing installations
as dependencies, either from a local store or from remote buildcaches if configured.
This minimizes unwanted rebuilds of common dependencies, in particular if
you update Spack frequently.
.. warning::
In case you want the latest versions and configurations to be installed instead,
you can add the ``--fresh`` option:
The ``--reuse`` option described here will become the default installation
method in the next Spack version, and you will be able to get the current
behavior by using ``spack install --fresh``.
By default, when you run ``spack install``, Spack tries to build a new
version of the package you asked for, along with updated versions of
its dependencies. This gets you the latest versions and configurations,
but it can result in unwanted rebuilds if you update Spack frequently.
If you want Spack to try hard to reuse existing installations as dependencies,
you can add the ``--reuse`` option:
.. code-block:: console
$ spack install --fresh mpich
$ spack install --reuse mpich
Reusing installations in this mode is "accidental", and happening only if
there's a match between existing installations and what Spack would have installed
anyhow.
You can use the ``spack spec -I mpich`` command to see what
This will not do anything if ``mpich`` is already installed. If ``mpich``
is not installed, but dependencies like ``hwloc`` and ``libfabric`` are,
the ``mpich`` will be build with the installed versions, if possible.
You can use the :ref:`spack spec -I <cmd-spack-spec>` command to see what
will be reused and what will be built before you install.
You can configure Spack to use the ``--fresh`` behavior by default in
``concretizer.yaml``:
.. code-block:: yaml
concretizer:
reuse: false
You can configure Spack to use the ``--reuse`` behavior by default in
``concretizer.yaml``.
.. _cmd-spack-uninstall:

View File

@@ -50,13 +50,6 @@ build cache files for the "ninja" spec:
Note that the targeted spec must already be installed. Once you have a build cache,
you can add it as a mirror, discussed next.
.. warning::
Spack improved the format used for binary caches in v0.18. The entire v0.18 series
will be able to verify and install binary caches both in the new and in the old format.
Support for using the old format is expected to end in v0.19, so we advise users to
recreate relevant buildcaches using Spack v0.18 or higher.
---------------------------------------
Finding or installing build cache files
---------------------------------------

View File

@@ -1,160 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _bootstrapping:
=============
Bootstrapping
=============
In the :ref:`Getting started <getting_started>` Section we already mentioned that
Spack can bootstrap some of its dependencies, including ``clingo``. In fact, there
is an entire command dedicated to the management of every aspect of bootstrapping:
.. command-output:: spack bootstrap --help
The first thing to know to understand bootstrapping in Spack is that each of
Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console
% spack bootstrap status
Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs
[FAIL] Binary packages
[B] MISSING "gpg2": required to sign/verify buildcaches
Spack will take care of bootstrapping any missing dependency marked as [B]. Dependencies marked as [-] are instead required to be found on the system.
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. Running a command that concretize a spec, like:
.. code-block:: console
% spack solve zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.1/build_cache/darwin-catalina-x86_64/apple-clang-12.0.0/clingo-bootstrap-spack/darwin-catalina-x86_64-apple-clang-12.0.0-clingo-bootstrap-spack-p5on7i4hejl775ezndzfdkhvwra3hatn.spack
==> Installing "clingo-bootstrap@spack%apple-clang@12.0.0~docs~ipo+python build_type=Release arch=darwin-catalina-x86_64" from a buildcache
[ ... ]
triggers the bootstrapping of clingo from pre-built binaries as expected.
-----------------------
The Bootstrapping store
-----------------------
The software installed for bootstrapping purposes is deployed in a separate store.
Its location can be checked with the following command:
.. code-block:: console
% spack bootstrap root
It can also be changed with the same command by just specifying the newly desired path:
.. code-block:: console
% spack bootstrap root /opt/spack/bootstrap
You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
clingo-bootstrap@spack libassuan@2.5.5 libgpg-error@1.42 libksba@1.5.1 pinentry@1.1.1 zlib@1.2.11
gnupg@2.3.1 libgcrypt@1.9.3 libiconv@1.16 npth@1.6 python@3.8
In case it is needed you can remove all the software in the current bootstrapping store with:
.. code-block:: console
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
--------------------------------------------
Enabling and disabling bootstrapping methods
--------------------------------------------
Bootstrapping is always performed by trying the methods listed by:
.. command-output:: spack bootstrap list
in the order they appear, from top to bottom. By default Spack is
configured to try first bootstrapping from pre-built binaries and to
fall-back to bootstrapping from sources if that failed.
If need be, you can disable bootstrapping altogether by running:
.. code-block:: console
% spack bootstrap disable
in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by *untrusting*
them. For instance:
.. code-block:: console
% spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console
% spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults:
.. code-block:: console
% spack bootstrap reset
==> Bootstrapping configuration is being reset to Spack's defaults. Current configuration will be lost.
Do you want to continue? [Y/n]
%
----------------------------------------
Creating a mirror for air-gapped systems
----------------------------------------
Spack's default configuration for bootstrapping relies on the user having
access to the internet, either to fetch pre-compiled binaries or source tarballs.
Sometimes though Spack is deployed on air-gapped systems where such access is denied.
To help with similar situations Spack has a command that recreates, in a local folder
of choice, a mirror containing the source tarballs and/or binary packages needed for
bootstrapping.
.. code-block:: console
% spack bootstrap mirror --binary-packages /opt/bootstrap
==> Adding "clingo-bootstrap@spack+python %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "gnupg@2.3: %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "patchelf@0.13.1:0.13.99 %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding binary packages from "https://github.com/alalazo/spack-bootstrap-mirrors/releases/download/v0.1-rc.2/bootstrap-buildcache.tar.gz" to the mirror at /opt/bootstrap/local-mirror
To register the mirror on the platform where it's supposed to be used run the following command(s):
% spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources
% spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -219,65 +219,33 @@ Concretizer options
but you can also use ``concretizer.yaml`` to customize aspects of the
algorithm it uses to select the dependencies you install:
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
:language: yaml
.. _code-block: yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Reuse already installed packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
concretizer:
# Whether to consider installed packages or packages from buildcaches when
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization.
reuse: false
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
^^^^^^^^^^^^^^^^
``reuse``
^^^^^^^^^^^^^^^^
This controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
``package.py`` files and ``packages.yaml`` (``false``). .
.. code-block:: console
You can use ``spack install --reuse`` to enable reuse for a single installation,
and you can use ``spack install --fresh`` to do a fresh install if ``reuse`` is
enabled by default.
% spack install --reuse <spec>
.. note::
to enable reuse for a single installation, and you can use:
``reuse: false`` is the current default, but ``reuse: true`` will be the default
in the next Spack release. You will still be able to use ``spack install --fresh``
to get the old behavior.
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
``reuse: true`` is the default.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Selection of the target microarchitectures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The options under the ``targets`` attribute control which targets are considered during a solve.
Currently the options in this section are only configurable from the ``concretization.yaml`` file
and there are no corresponding command line arguments to enable them for a single solve.
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
If set to:
.. code-block:: yaml
concretizer:
targets:
granularity: microarchitectures
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
compatibility. If instead the option is set to:
.. code-block:: yaml
concretizer:
targets:
granularity: generic
Spack will consider only generic microarchitectures. For instance, when running on an
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
``x86_64_v3`` as the best target in the latter case.
The ``host_compatible`` option is a Boolean option that determines whether or not the
microarchitectures considered during the solve are constrained to be compatible with the
host Spack is currently running on. For instance, if this option is set to ``true``, a
user cannot concretize for ``target=icelake`` while running on an Haswell node.
.. _package-preferences:

View File

@@ -39,7 +39,6 @@ on these ideas for each distinct build system that Spack supports:
build_systems/autotoolspackage
build_systems/cmakepackage
build_systems/cachedcmakepackage
build_systems/mesonpackage
build_systems/qmakepackage
build_systems/sippackage
@@ -48,7 +47,6 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Language-specific
build_systems/luapackage
build_systems/octavepackage
build_systems/perlpackage
build_systems/pythonpackage

View File

@@ -1,123 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _cachedcmakepackage:
------------------
CachedCMakePackage
------------------
The CachedCMakePackage base class is used for CMake-based workflows
that create a CMake cache file prior to running ``cmake``. This is
useful for packages with arguments longer than the system limit, and
for reproducibility.
The documentation for this class assumes that the user is familiar with
the ``CMakePackage`` class from which it inherits. See the documentation
for :ref:`CMakePackage <cmakepackage>`.
^^^^^^
Phases
^^^^^^
The ``CachedCMakePackage`` base class comes with the following phases:
#. ``initconfig`` - generate the CMake cache file
#. ``cmake`` - generate the Makefile
#. ``build`` - build the package
#. ``install`` - install the package
By default, these phases run:
.. code-block:: console
$ mkdir spack-build
$ cd spack-build
$ cat << EOF > name-arch-compiler@version.cmake
# Write information on compilers and dependencies
# includes information on mpi and cuda if applicable
$ cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/installation/prefix -C name-arch-compiler@version.cmake
$ make
$ make test # optional
$ make install
The ``CachedCMakePackage`` class inherits from the ``CMakePackage``
class, and accepts all of the same options and adds all of the same
flags to the ``cmake`` command. Similar to the ``CMakePAckage`` class,
you may need to add a few arguments yourself, and the
``CachedCMakePackage`` provides the same interface to add those
flags.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding entries to the CMake cache
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to adding flags to the ``cmake`` command, you may need to
add entries to the CMake cache in the ``initconfig`` phase. This can
be done by overriding one of four methods:
#. ``CachedCMakePackage.initconfig_compiler_entries``
#. ``CachedCMakePackage.initconfig_mpi_entries``
#. ``CachedCMakePackage.initconfig_hardware_entries``
#. ``CachedCMakePackage.initconfig_package_entries``
Each of these methods returns a list of CMake cache strings. The
distinction between these methods is merely to provide a
well-structured and legible cmake cache file -- otherwise, entries
from each of these methods are handled identically.
Spack also provides convenience methods for generating CMake cache
entries. These methods are available at module scope in every Spack
package. Because CMake parses boolean options, strings, and paths
differently, there are three such methods:
#. ``cmake_cache_option``
#. ``cmake_cache_string``
#. ``cmake_cache_path``
These methods each accept three parameters -- the name of the CMake
variable associated with the entry, the value of the entry, and an
optional comment -- and return strings in the appropriate format to be
returned from any of the ``initconfig*`` methods. Additionally, these
methods may return comments beginning with the ``#`` character.
A typical usage of these methods may look something like this:
.. code-block:: python
def initconfig_mpi_entries(self)
# Get existing MPI configurations
entries = super(self, Foo).initconfig_mpi_entries()
# The existing MPI configurations key on whether ``mpi`` is in the spec
# This spec has an MPI variant, and we need to enable MPI when it is on.
# This hypothetical package controls MPI with the ``FOO_MPI`` option to
# cmake.
if '+mpi' in self.spec:
entries.append(cmake_cache_option('FOO_MPI', True, "enable mpi"))
else:
entries.append(cmake_cache_option('FOO_MPI', False, "disable mpi"))
def initconfig_package_entries(self):
# Package specific options
entries = []
entries.append('#Entries for build options')
bar_on = '+bar' in self.spec
entries.append(cmake_cache_option('FOO_BAR', bar_on, 'toggle bar'))
entries.append('#Entries for dependencies')
if self.spec['blas'].name == 'baz': # baz is our blas provider
entries.append(cmake_cache_string('FOO_BLAS', 'baz', 'Use baz'))
entries.append(cmake_cache_path('BAZ_PREFIX', self.spec['baz'].prefix))
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on CMake cache files, see:
https://cmake.org/cmake/help/latest/manual/cmake.1.html

View File

@@ -1,105 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _luapackage:
------------
LuaPackage
------------
LuaPackage is a helper for the common case of Lua packages that provide
a rockspec file. This is not meant to take a rock archive, but to build
a source archive or repository that provides a rockspec, which should cover
most lua packages. In the case a Lua package builds by Make rather than
luarocks, prefer MakefilePackage.
^^^^^^
Phases
^^^^^^
The ``LuaPackage`` base class comes with the following phases:
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
#. ``preprocess`` - adjust sources or rockspec to fix build
#. ``install`` - install the project
By default, these phases run:
.. code-block:: console
# If the archive is a source rock
$ luarocks unpack <archive>.src.rock
$ # preprocess is a noop by default
$ luarocks make <name>.rockspec
Any of these phases can be overridden in your package as necessary.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Packages that use the Lua/LuaRocks build system can be identified by the
presence of a ``*.rockspec`` file in their sourcetree, or can be fetched as
a source rock archive (``.src.rock``). This file declares things like build
instructions and dependencies, the ``.src.rock`` also contains all code.
It is common for the rockspec file to list the lua version required in
a dependency. The LuaPackage class adds appropriate dependencies on a Lua
implementation, but it is a good idea to specify the version required with
a ``depends_on`` statement. The block normally will be a table definition like
this:
.. code-block:: lua
dependencies = {
"lua >= 5.1",
}
The LuaPackage class supports source repositories and archives containing
a rockspec and directly downloading source rock files. It *does not* support
downloading dependencies listed inside a rockspec, and thus does not support
directly downloading a rockspec as an archive.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
All base dependencies are added by the build system, but LuaRocks is run to
avoid downloading extra Lua dependencies during build. If the package needs
Lua libraries outside the standard set, they should be added as dependencies.
To specify a Lua version constraint but allow all lua implementations, prefer
to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible
version. If the package requires LuaJit rather than Lua,
a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is
used instead of the Lua interpreter. Alternately, if only interpreted Lua will
work ``depends_on("lua")`` will express that.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to luarocks make
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you need to pass any arguments to the ``luarocks make`` call, you can
override the ``luarocks_args`` method like so:
.. code-block:: python
def luarocks_args(self):
return ['flag1', 'flag2']
One common use of this is to override warnings or flags for newer compilers, as in:
.. code-block:: python
def luarocks_args(self):
return ["CFLAGS='-Wno-error=implicit-function-declaration'"]
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the LuaRocks build system, see:
https://luarocks.org/

View File

@@ -95,7 +95,7 @@ class of your package. For example, you can add it to your
# Set up the hip macros needed by the build
args.extend([
'-DENABLE_HIP=ON',
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix)])
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
rocm_archs = spec.variants['amdgpu_target'].value
if 'none' not in rocm_archs:
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'

View File

@@ -23,10 +23,7 @@
import sys
from glob import glob
from docutils.statemachine import StringList
from sphinx.domains.python import PythonDomain
from sphinx.ext.apidoc import main as sphinx_apidoc
from sphinx.parsers import RSTParser
# -- Spack customizations -----------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
@@ -85,6 +82,9 @@
#
# Disable duplicate cross-reference warnings.
#
from sphinx.domains.python import PythonDomain
class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if 'refspecific' in node:
@@ -92,20 +92,8 @@ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode)
#
# Disable tabs to space expansion in code blocks
# since Makefiles require tabs.
#
class NoTabExpansionRSTParser(RSTParser):
def parse(self, inputstring, document):
if isinstance(inputstring, str):
lines = inputstring.splitlines()
inputstring = StringList(lines, document.current_source)
super().parse(inputstring, document)
def setup(sphinx):
sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
# -- General configuration -----------------------------------------------------

View File

@@ -59,8 +59,7 @@ other techniques to minimize the size of the final image:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer: together" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -246,8 +245,7 @@ software is respectively built and installed:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -368,8 +366,7 @@ produces, for instance, the following ``Dockerfile``:
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml

View File

@@ -273,9 +273,19 @@ or
Concretizing
^^^^^^^^^^^^
Once some user specs have been added to an environment, they can be concretized.
There are at the moment three different modes of operation to concretize an environment,
which are explained in details in :ref:`environments_concretization_config`.
Once some user specs have been added to an environment, they can be
concretized. *By default specs are concretized separately*, one after
the other. This mode of operation permits to deploy a full
software stack where multiple configurations of the same package
need to be installed alongside each other. Central installations done
at HPC centers by system administrators or user support groups
are a common case that fits in this behavior.
Environments *can also be configured to concretize all
the root specs in a self-consistent way* to ensure that
each package in the environment comes with a single configuration. This
mode of operation is usually what is required by software developers that
want to deploy their development environment.
Regardless of which mode of operation has been chosen, the following
command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration:
@@ -339,24 +349,6 @@ If the Environment has been concretized, Spack will install the
concretized specs. Otherwise, ``spack install`` will first concretize
the Environment and then install the concretized specs.
.. note::
Every ``spack install`` process builds one package at a time with multiple build
jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option
(see :ref:`build-jobs`). To speed up environment builds further, independent
packages can be installed in parallel by launching more Spack instances. For
example, the following will build at most four packages in parallel using
three background jobs:
.. code-block:: console
[myenv]$ spack install & spack install & spack install & spack install
Another option is to generate a ``Makefile`` and run ``make -j<N>`` to control
the number of parallel install processes. See :ref:`env-generate-depfile`
for details.
As it installs, ``spack install`` creates symbolic links in the
``logs/`` directory in the Environment, allowing for easy inspection
of build logs related to that environment. The ``spack install``
@@ -483,76 +475,32 @@ Appending to this list in the yaml is identical to using the ``spack
add`` command from the command line. However, there is more power
available from the yaml file.
.. _environments_concretization_config:
^^^^^^^^^^^^^^^^^^^
Spec concretization
^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under any environment
is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
Specs can be concretized separately or together, as already
explained in :ref:`environments_concretization`. The behavior active
under any environment is determined by the ``concretization`` property:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: false
- ncview
- netcdf
- nco
- py-sphinx
concretization: together
This mode of operation permits to deploy a full software stack where multiple configurations of the same package
need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note::
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
replace the ``concretization`` property. For reference,
``concretization: together`` is replaced by ``concretizer:unify:true``,
and ``concretization: separately`` is replaced by ``concretizer:unify:false``.
which can currently take either one of the two allowed values ``together`` or ``separately``
(the default).
.. admonition:: Re-concretization of user specs
When concretizing specs *together* or *together where possible* the entire set of specs will be
When concretizing specs together the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead the specs are concretized
the environment remains consistent. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^
@@ -799,7 +747,7 @@ directories.
select: [^mpi]
exclude: ['%pgi@18.5']
projections:
all: '{name}/{version}-{compiler.name}'
all: {name}/{version}-{compiler.name}
link: all
link_type: symlink
@@ -962,93 +910,3 @@ environment.
The ``spack env deactivate`` command will remove the default view of
the environment from the user's path.
.. _env-generate-depfile:
------------------------------------------
Generating Depfiles from Environments
------------------------------------------
Spack can generate ``Makefile``\s to make it easier to build multiple
packages in an environment in parallel. Generated ``Makefile``\s expose
targets that can be included in existing ``Makefile``\s, to allow
other targets to depend on the environment installation.
A typical workflow is as follows:
.. code:: console
spack env create -d .
spack -e . add perl
spack -e . concretize
spack -e . env depfile > Makefile
make -j64
This generates a ``Makefile`` from a concretized environment in the
current working directory, and ``make -j64`` installs the environment,
exploiting parallelism across packages as much as possible. Spack
respects the Make jobserver and forwards it to the build environment
of packages, meaning that a single ``-j`` flag is enough to control the
load, even when packages are built in parallel.
By default the following phony convenience targets are available:
- ``make all``: installs the environment (default target);
- ``make fetch-all``: only fetch sources of all packages;
- ``make clean``: cleans files used by make, but does not uninstall packages.
.. tip::
GNU Make version 4.3 and above have great support for output synchronization
through the ``-O`` and ``--output-sync`` flags, which ensure that output is
printed orderly per package install. To get synchronized output with colors,
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
The following advanced example shows how generated targets can be used in a
``Makefile``:
.. code:: Makefile
SPACK ?= spack
.PHONY: all clean fetch env
all: env
spack.lock: spack.yaml
$(SPACK) -e . concretize -f
env.mk: spack.lock
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack
fetch: spack/fetch
$(info Environment fetched!)
env: spack/env
$(info Environment installed!)
clean:
rm -rf spack.lock env.mk spack/
ifeq (,$(filter clean,$(MAKECMDGOALS)))
include env.mk
endif
When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
from its rule, which triggers concretization. When done, the generated targets
``spack/fetch`` and ``spack/env`` are available. In the above
example, the ``env`` target uses the latter as a prerequisite, meaning
that it can make use of the installed packages in its commands.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional.
.. note::
When including generated ``Makefile``\s, it is important to use
the ``--make-target-prefix`` flag and use the non-phony targets
``<target-prefix>/env`` and ``<target-prefix>/fetch`` as
prerequisites, instead of the phony targets ``<target-prefix>/all``
and ``<target-prefix>/fetch-all`` respectively.

View File

@@ -63,7 +63,6 @@ or refer to the full manual below.
configuration
config_yaml
bootstrapping
build_settings
environments
containers

View File

@@ -115,8 +115,7 @@ And here's the spack environment built by the pipeline represented as a
spack:
view: false
concretizer:
unify: false
concretization: separately
definitions:
- pkgs:

View File

@@ -61,7 +61,7 @@ You can see the packages we added earlier in the ``specs:`` section. If you
ever want to add more packages, you can either use ``spack add`` or manually
edit this file.
We also need to change the ``concretizer:unify`` option. By default, Spack
We also need to change the ``concretization:`` option. By default, Spack
concretizes each spec *separately*, allowing multiple versions of the same
package to coexist. Since we want a single consistent environment, we want to
concretize all of the specs *together*.
@@ -78,8 +78,7 @@ Here is what your ``spack.yaml`` looks like with this new setting:
# add package specs to the `specs` list
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
view: true
concretizer:
unify: true
concretization: together
^^^^^^^^^^^^^^^^
Symlink location

View File

@@ -25,5 +25,4 @@ spack:
- subversion
# Plotting
- graphviz
concretizer:
unify: true
concretization: together

6
lib/spack/env/cc vendored
View File

@@ -1,4 +1,4 @@
#!/bin/sh -f
#!/bin/sh
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
@@ -768,9 +768,7 @@ if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
echo "[$mode] ${full_command_list}" >> "$output_log"
fi
# Execute the full command, preserving spaces with IFS set

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.4 (commit b8eea9df2b4204ff27d204452cd46f5199a0b423)
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
argparse
--------

View File

@@ -61,7 +61,7 @@ def proc_cpuinfo():
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
with open("/proc/cpuinfo") as file:
for line in file:
key, separator, value = line.partition(":")
@@ -80,46 +80,26 @@ def proc_cpuinfo():
def _check_output(args, env):
output = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=subprocess.PIPE, env=env
).communicate()[0]
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
return six.text_type(output.decode("utf-8"))
def _machine():
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
#
# See: https://bugs.python.org/issue42704
output = _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return "aarch64"
return "x86_64"
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == "x86_64":
if platform.machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
@@ -145,18 +125,6 @@ def sysctl(*args):
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
return child_environment
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
@@ -216,7 +184,12 @@ def compatible_microarchitectures(info):
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
architecture_family = platform.machine()
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
# so we should normalize the name here
if architecture_family == "arm64":
architecture_family = "aarch64"
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
@@ -271,7 +244,12 @@ def compatibility_check(architecture_family):
architecture_family = (architecture_family,)
def decorator(func):
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
# pylint: disable=fixme
# TODO: on removal of Python 2.6 support this can be re-written as
# TODO: an update + a dict comprehension
for arch_family in architecture_family:
COMPATIBILITY_CHECKS[arch_family] = func
return func
return decorator
@@ -310,7 +288,7 @@ def compatibility_check_for_x86_64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)
@@ -325,9 +303,8 @@ def compatibility_check_for_aarch64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)

View File

@@ -11,7 +11,7 @@
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping # pylint: disable=deprecated-class
from collections import MutableMapping
class LazyDictionary(MutableMapping):
@@ -56,7 +56,7 @@ def _load_json_file(json_file):
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file: # pylint: disable=unspecified-encoding
with open(filename, "r") as file:
return json.load(file)
return _factory

View File

@@ -85,21 +85,7 @@
"intel": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "x86-64",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
@@ -143,20 +129,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -214,20 +186,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -290,20 +248,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -344,19 +288,8 @@
"intel": [
{
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}
@@ -400,18 +333,6 @@
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -463,20 +384,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -525,20 +432,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -597,18 +490,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -669,18 +550,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -746,18 +615,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -815,18 +672,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -887,18 +732,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -965,20 +798,6 @@
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1049,20 +868,6 @@
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1132,18 +937,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1211,18 +1004,6 @@
"versions": "19.0.1:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1317,20 +1098,6 @@
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1375,20 +1142,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
]
}
},
@@ -1439,20 +1192,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1507,20 +1246,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1576,20 +1301,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
]
}
},
@@ -1649,22 +1360,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1727,22 +1422,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1806,22 +1485,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1880,30 +1543,6 @@
"name": "znver3",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "16.0:",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -2149,6 +1788,7 @@
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
@@ -2181,26 +1821,18 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "8:10.2",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve -msve-vector-bits=512"
},
{
"versions": "10.3:",
"flags": "-mcpu=a64fx -msve-vector-bits=512"
"versions": "8:",
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
}
],
"clang": [
{
"versions": "3.9:4.9",
"flags": "-march=armv8.2-a+crc+sha2+fp16"
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "5:10",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve"
},
{
"versions": "11:",
"flags": "-mcpu=a64fx"
"versions": "5:",
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
],
"arm": [
@@ -2322,40 +1954,7 @@
"m1": {
"from": ["aarch64"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint"
],
"features": [],
"compilers": {
"gcc": [
{
@@ -2365,22 +1964,14 @@
],
"clang" : [
{
"versions": "9.0:12.0",
"versions": "9.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"versions": "11.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
]
}

View File

@@ -64,7 +64,6 @@
'is_exe',
'join_path',
'last_modification_time_recursive',
'library_extensions',
'mkdirp',
'partition_path',
'prefixes',
@@ -110,15 +109,12 @@ def path_contains_subdirectory(path, root):
return norm_path.startswith(norm_root)
#: This generates the library filenames that may appear on any OS.
library_extensions = ['a', 'la', 'so', 'tbd', 'dylib']
def possible_library_filenames(library_names):
"""Given a collection of library names like 'libfoo', generate the set of
library filenames that may be found on the system (e.g. libfoo.so).
library filenames that may be found on the system (e.g. libfoo.so). This
generates the library filenames that may appear on any OS.
"""
lib_extensions = library_extensions
lib_extensions = ['a', 'la', 'so', 'tbd', 'dylib']
return set(
'.'.join((lib, extension)) for lib, extension in
itertools.product(library_names, lib_extensions))
@@ -367,7 +363,7 @@ def group_ids(uid=None):
@system_path_filter(arg_slice=slice(1))
def chgrp(path, group, follow_symlinks=True):
def chgrp(path, group):
"""Implement the bash chgrp function on a single path"""
if is_windows:
raise OSError("Function 'chgrp' is not supported on Windows")
@@ -376,10 +372,7 @@ def chgrp(path, group, follow_symlinks=True):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
if follow_symlinks:
os.chown(path, -1, gid)
else:
os.lchown(path, -1, gid)
os.chown(path, -1, gid)
@system_path_filter(arg_slice=slice(1))
@@ -771,36 +764,39 @@ def __init__(self, inner_exception, outer_exception):
@contextmanager
@system_path_filter
def replace_directory_transaction(directory_name):
"""Temporarily renames a directory in the same parent dir. If the operations
executed within the context manager don't raise an exception, the renamed directory
is deleted. If there is an exception, the move is undone.
def replace_directory_transaction(directory_name, tmp_root=None):
"""Moves a directory to a temporary space. If the operations executed
within the context manager don't raise an exception, the directory is
deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
tmp_root (path): absolute path of the parent directory where to create
the temporary
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
directory_name = os.path.abspath(directory_name)
assert os.path.isdir(directory_name), 'Not a directory: ' + directory_name
assert os.path.isdir(directory_name), \
'Invalid directory: ' + directory_name
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path: ' + directory_name
# Note: directory_name is normalized here, meaning the trailing slash is dropped,
# so dirname is the directory's parent not the directory itself.
tmpdir = tempfile.mkdtemp(
dir=os.path.dirname(directory_name),
prefix='.backup')
directory_basename = os.path.basename(directory_name)
# We have to jump through hoops to support Windows, since
# os.rename(directory_name, tmpdir) errors there.
backup_dir = os.path.join(tmpdir, 'backup')
os.rename(directory_name, backup_dir)
tty.debug('Directory moved [src={0}, dest={1}]'.format(directory_name, backup_dir))
if tmp_root is not None:
assert os.path.isabs(tmp_root)
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
tty.debug('Temporary directory created [{0}]'.format(tmp_dir))
shutil.move(src=directory_name, dst=tmp_dir)
tty.debug('Directory moved [src={0}, dest={1}]'.format(directory_name, tmp_dir))
try:
yield backup_dir
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit) as inner_exception:
# Try to recover the original directory, if this fails, raise a
# composite exception.
@@ -808,7 +804,10 @@ def replace_directory_transaction(directory_name):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
os.rename(backup_dir, directory_name)
shutil.move(
src=os.path.join(tmp_dir, directory_basename),
dst=os.path.dirname(directory_name)
)
except Exception as outer_exception:
raise CouldNotRestoreDirectoryBackup(inner_exception, outer_exception)
@@ -816,8 +815,8 @@ def replace_directory_transaction(directory_name):
raise
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmpdir, ignore_errors=True)
tty.debug('Temporary directory deleted [{0}]'.format(tmpdir))
shutil.rmtree(tmp_dir, ignore_errors=True)
tty.debug('Temporary directory deleted [{0}]'.format(tmp_dir))
@system_path_filter
@@ -1098,32 +1097,7 @@ def visit_directory_tree(root, visitor, rel_path='', depth=0):
for f in dir_entries:
if sys.version_info >= (3, 5, 0):
rel_child = os.path.join(rel_path, f.name)
islink = f.is_symlink()
# On Windows, symlinks to directories are distinct from
# symlinks to files, and it is possible to create a
# broken symlink to a directory (e.g. using os.symlink
# without `target_is_directory=True`), invoking `isdir`
# on a symlink on Windows that is broken in this manner
# will result in an error. In this case we can work around
# the issue by reading the target and resolving the
# directory ourselves
try:
isdir = f.is_dir()
except OSError as e:
if is_windows and hasattr(e, 'winerror')\
and e.winerror == 5 and islink:
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
# link_target might be relative but
# resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative
# to the CWD and therefore
# makes sense
isdir = os.path.isdir(link_target)
else:
raise e
islink, isdir = f.is_symlink(), f.is_dir()
else:
rel_child = os.path.join(rel_path, f)
lexists, islink, isdir = lexists_islink_isdir(os.path.join(dir, f))
@@ -1131,7 +1105,7 @@ def visit_directory_tree(root, visitor, rel_path='', depth=0):
continue
if not isdir:
# handle files
# Handle files
visitor.visit_file(root, rel_child, depth)
elif not islink and visitor.before_visit_dir(root, rel_child, depth):
# Handle ordinary directories
@@ -1206,35 +1180,6 @@ def remove_if_dead_link(path):
os.unlink(path)
def readonly_file_handler(ignore_errors=False):
# TODO: generate stages etc. with write permissions wherever
# so this callback is no-longer required
"""
Generate callback for shutil.rmtree to handle permissions errors on
Windows. Some files may unexpectedly lack write permissions even
though they were generated by Spack on behalf of the user (e.g. the
stage), so this callback will detect such cases and modify the
permissions if that is the issue. For other errors, the fallback
is either to raise (if ignore_errors is False) or ignore (if
ignore_errors is True). This is only intended for Windows systems
and will raise a separate error if it is ever invoked (by accident)
on a non-Windows system.
"""
def error_remove_readonly(func, path, exc):
if not is_windows:
raise RuntimeError("This method should only be invoked on Windows")
excvalue = exc[1]
if is_windows and func in (os.rmdir, os.remove, os.unlink) and\
excvalue.errno == errno.EACCES:
# change the file to be readable,writable,executable: 0777
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# retry
func(path)
elif not ignore_errors:
raise
return error_remove_readonly
@system_path_filter
def remove_linked_tree(path):
"""Removes a directory and its contents.
@@ -1242,18 +1187,23 @@ def remove_linked_tree(path):
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
This method will force-delete files on Windows
Parameters:
path (str): Directory to be removed
"""
kwargs = {'ignore_errors': True}
# On windows, cleaning a Git stage can be an issue
# as git leaves readonly files that Python handles
# poorly on Windows. Remove readonly status and try again
def onerror(func, path, exe_info):
os.chmod(path, stat.S_IWUSR)
try:
func(path)
except Exception as e:
tty.warn(e)
pass
# Windows readonly files cannot be removed by Python
# directly.
kwargs = {'ignore_errors': True}
if is_windows:
kwargs['ignore_errors'] = False
kwargs['onerror'] = readonly_file_handler(ignore_errors=True)
kwargs = {'onerror': onerror}
if os.path.exists(path):
if os.path.islink(path):

View File

@@ -11,9 +11,7 @@
import os
import re
import sys
import traceback
from datetime import datetime, timedelta
from typing import List, Tuple
import six
from six import string_types
@@ -1011,64 +1009,3 @@ def __repr__(self):
def __str__(self):
return str(self.data)
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self):
"""Whether any exceptions were handled."""
return bool(self.exceptions)
def forward(self, context):
# type: (str) -> GroupedExceptionForwarder
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
return GroupedExceptionForwarder(context, self)
def _receive_forwarded(self, context, exc, tb):
# type: (str, Exception, List[str]) -> None
self.exceptions.append((context, exc, tb))
def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors."""
each_exception_message = [
'{0} raised {1}: {2}{3}'.format(
context,
exc.__class__.__name__,
exc,
'\n{0}'.format(''.join(tb)) if with_tracebacks else '',
)
for context, exc, tb in self.exceptions
]
return 'due to the following failures:\n{0}'.format(
'\n'.join(each_exception_message)
)
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
def __init__(self, context, handler):
# type: (str, GroupedExceptionHandler) -> None
self._context = context
self._handler = handler
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True

View File

@@ -809,23 +809,19 @@ def __enter__(self):
def background_reader(reader, echo_writer, _kill):
# for each line printed to logfile, read it
# if echo: write line to user
try:
while True:
is_killed = _kill.wait(.1)
# Flush buffered build output to file
# stdout/err fds refer to log file
self.stderr.flush()
self.stdout.flush()
while True:
is_killed = _kill.wait(.1)
self.stderr.flush()
self.stdout.flush()
line = reader.readline()
while line:
if self.echo:
self.echo_writer.write('{0}'.format(line.decode()))
self.echo_writer.flush()
line = reader.readline()
if self.echo and line:
echo_writer.write('{0}'.format(line.decode()))
echo_writer.flush()
if is_killed:
break
finally:
reader.close()
if is_killed:
break
self._active = True
with replace_environment(self.env):
@@ -841,6 +837,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
self._ioflag = False
else:
self.writer.close()
self.reader.close()
self.echo_writer.flush()
self.stdout.flush()
self.stderr.flush()
@@ -856,7 +853,10 @@ def force_echo(self):
if not self._active:
raise RuntimeError(
"Can't call force_echo() outside log_output region!")
yield
try:
yield self
finally:
pass
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: (major, minor, micro, dev release) tuple
spack_version_info = (0, 18, 1)
spack_version_info = (0, 18, 0, 'dev0')
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
spack_version = '.'.join(str(s) for s in spack_version_info)

View File

@@ -276,30 +276,11 @@ def _search_duplicate_specs_in_externals(error_cls):
)
@package_directives
def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test method is not included in build-time callbacks"""
errors = []
for pkg_name in pkgs:
pkg = spack.repo.get(pkg_name)
test_callbacks = pkg.build_time_test_callbacks
if test_callbacks and 'test' in test_callbacks:
msg = ('{0} package contains "test" method in '
'build_time_test_callbacks')
instr = ('Remove "test" from: [{0}]'
.format(', '.join(test_callbacks)))
errors.append(error_cls(msg.format(pkg.name), [instr]))
return errors
@package_directives
def _check_patch_urls(pkgs, error_cls):
"""Ensure that patches fetched from GitHub have stable sha256 hashes."""
github_patch_url_re = (
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
".+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
r"^https?://github\.com/.+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
)
errors = []

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@
from __future__ import print_function
import contextlib
import copy
import fnmatch
import functools
import json
@@ -22,7 +21,6 @@
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.lang import GroupedExceptionHandler
import spack.binary_distribution
import spack.config
@@ -38,11 +36,6 @@
import spack.util.environment
import spack.util.executable
import spack.util.path
import spack.util.spack_yaml
import spack.util.url
#: Name of the file containing metadata about the bootstrapping source
METADATA_YAML_FILENAME = 'metadata.yaml'
#: Map a bootstrapper type to the corresponding class
_bootstrap_methods = {}
@@ -210,43 +203,12 @@ def _executables_in_store(executables, query_spec, query_info=None):
return False
class _BootstrapperBase(object):
"""Base class to derive types that can bootstrap software for Spack"""
config_scope_name = ''
@_bootstrapper(type='buildcache')
class _BuildcacheBootstrapper(object):
"""Install the software needed during bootstrapping from a buildcache."""
def __init__(self, conf):
self.name = conf['name']
self.url = conf['info']['url']
@property
def mirror_url(self):
# Absolute paths
if os.path.isabs(self.url):
return spack.util.url.format(self.url)
# Check for :// and assume it's an url if we find it
if '://' in self.url:
return self.url
# Otherwise, it's a relative path
return spack.util.url.format(os.path.join(self.metadata_dir, self.url))
@property
def mirror_scope(self):
return spack.config.InternalConfigScope(
self.config_scope_name, {'mirrors:': {self.name: self.mirror_url}}
)
@_bootstrapper(type='buildcache')
class _BuildcacheBootstrapper(_BootstrapperBase):
"""Install the software needed during bootstrapping from a buildcache."""
config_scope_name = 'bootstrap_buildcache'
def __init__(self, conf):
super(_BuildcacheBootstrapper, self).__init__(conf)
self.metadata_dir = spack.util.path.canonicalize_path(conf['metadata'])
self.last_search = None
@staticmethod
@@ -269,8 +231,9 @@ def _spec_and_platform(abstract_spec_str):
def _read_metadata(self, package_name):
"""Return metadata about the given package."""
json_filename = '{0}.json'.format(package_name)
json_dir = self.metadata_dir
json_path = os.path.join(json_dir, json_filename)
json_path = os.path.join(
spack.paths.share_path, 'bootstrap', self.name, json_filename
)
with open(json_path) as f:
data = json.load(f)
return data
@@ -344,6 +307,12 @@ def _install_and_test(
return True
return False
@property
def mirror_scope(self):
return spack.config.InternalConfigScope(
'bootstrap_buildcache', {'mirrors:': {self.name: self.url}}
)
def try_import(self, module, abstract_spec_str):
test_fn, info = functools.partial(_try_import_from_store, module), {}
if test_fn(query_spec=abstract_spec_str, query_info=info):
@@ -373,13 +342,9 @@ def try_search_path(self, executables, abstract_spec_str):
@_bootstrapper(type='install')
class _SourceBootstrapper(_BootstrapperBase):
class _SourceBootstrapper(object):
"""Install the software needed during bootstrapping from sources."""
config_scope_name = 'bootstrap_source'
def __init__(self, conf):
super(_SourceBootstrapper, self).__init__(conf)
self.metadata_dir = spack.util.path.canonicalize_path(conf['metadata'])
self.conf = conf
self.last_search = None
@@ -412,8 +377,7 @@ def try_import(self, module, abstract_spec_str):
tty.debug(msg.format(module, abstract_spec_str))
# Install the spec that should make the module importable
with spack.config.override(self.mirror_scope):
concrete_spec.package.do_install(fail_fast=True)
concrete_spec.package.do_install(fail_fast=True)
if _try_import_from_store(module, query_spec=concrete_spec, query_info=info):
self.last_search = info
@@ -426,8 +390,6 @@ def try_search_path(self, executables, abstract_spec_str):
self.last_search = info
return True
tty.info("Bootstrapping {0} from sources".format(abstract_spec_str))
# If we compile code from sources detecting a few build tools
# might reduce compilation time by a fair amount
_add_externals_if_missing()
@@ -440,8 +402,7 @@ def try_search_path(self, executables, abstract_spec_str):
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str))
with spack.config.override(self.mirror_scope):
concrete_spec.package.do_install()
concrete_spec.package.do_install()
if _executables_in_store(executables, concrete_spec, query_info=info):
self.last_search = info
return True
@@ -456,11 +417,11 @@ def _make_bootstrapper(conf):
return _bootstrap_methods[btype](conf)
def source_is_enabled_or_raise(conf):
"""Raise ValueError if the source is not enabled for bootstrapping"""
def _source_is_trusted(conf):
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
if not trusted.get(name, False):
raise ValueError('source is not trusted')
if name not in trusted:
return False
return trusted[name]
def spec_for_current_python():
@@ -525,26 +486,36 @@ def ensure_module_importable_or_raise(module, abstract_spec=None):
return
abstract_spec = abstract_spec or module
source_configs = spack.config.get('bootstrap:sources', [])
h = GroupedExceptionHandler()
errors = {}
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
source_is_enabled_or_raise(current_config)
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
'not trusted').format(module, current_config['name'])
tty.debug(msg)
continue
b = _make_bootstrapper(current_config)
b = _make_bootstrapper(current_config)
try:
if b.try_import(module, abstract_spec):
return
except Exception as e:
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
tty.debug(msg.format(module, str(e)))
errors[current_config['name']] = e
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(module) # noqa: E501
msg = 'cannot bootstrap the "{0}" Python module '.format(module)
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
if abstract_spec:
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
msg += ' from spec "{0}"'.format(abstract_spec)
msg += ' due to the following failures:\n'
for method in errors:
err = errors[method]
msg += " '{0}' raised {1}: {2}\n".format(
method, err.__class__.__name__, str(err))
msg += ' Please run `spack -d spec zlib` for more verbose error messages'
raise ImportError(msg)
@@ -567,14 +538,16 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
return cmd
executables_str = ', '.join(executables)
source_configs = spack.config.get('bootstrap:sources', [])
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP EXECUTABLES {0}] Skipping source "{1}" since it is '
'not trusted').format(executables_str, current_config['name'])
tty.debug(msg)
continue
h = GroupedExceptionHandler()
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
source_is_enabled_or_raise(current_config)
b = _make_bootstrapper(current_config)
b = _make_bootstrapper(current_config)
try:
if b.try_search_path(executables, abstract_spec):
# Additional environment variables needed
concrete_spec, cmd = b.last_search['spec'], b.last_search['command']
@@ -589,16 +562,14 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
)
cmd.add_default_envmod(env_mods)
return cmd
except Exception as e:
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
tty.debug(msg.format(executables_str, str(e)))
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(executables_str) # noqa: E501
msg = 'cannot bootstrap any of the {0} executables '.format(executables_str)
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap any of the {0} executables'.format(executables_str)
if abstract_spec:
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
msg += ' from spec "{0}"'.format(abstract_spec)
raise RuntimeError(msg)
@@ -855,19 +826,6 @@ def ensure_flake8_in_path_or_raise():
return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
def all_root_specs(development=False):
"""Return a list of all the root specs that may be used to bootstrap Spack.
Args:
development (bool): if True include dev dependencies
"""
specs = [clingo_root_spec(), gnupg_root_spec(), patchelf_root_spec()]
if development:
specs += [isort_root_spec(), mypy_root_spec(),
black_root_spec(), flake8_root_spec()]
return specs
def _missing(name, purpose, system_only=True):
"""Message to be printed if an executable is not found"""
msg = '[{2}] MISSING "{0}": {1}'
@@ -1005,23 +963,3 @@ def status_message(section):
msg += '\n'
msg = msg.format(pass_token if not missing_software else fail_token)
return msg, missing_software
def bootstrapping_sources(scope=None):
"""Return the list of configured sources of software for bootstrapping Spack
Args:
scope (str or None): if a valid configuration scope is given, return the
list only from that scope
"""
source_configs = spack.config.get('bootstrap:sources', default=None, scope=scope)
source_configs = source_configs or []
list_of_sources = []
for entry in source_configs:
current = copy.copy(entry)
metadata_dir = spack.util.path.canonicalize_path(entry['metadata'])
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
with open(metadata_yaml) as f:
current.update(spack.util.spack_yaml.load(f))
list_of_sources.append(current)
return list_of_sources

View File

@@ -111,20 +111,6 @@
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
def should_set_parallel_jobs(jobserver_support=False):
"""Returns true in general, except when:
- The env variable SPACK_NO_PARALLEL_MAKE=1 is set
- jobserver_support is enabled, and a jobserver was found.
"""
if (
jobserver_support and
'MAKEFLAGS' in os.environ and
'--jobserver' in os.environ['MAKEFLAGS']
):
return False
return not env_flag(SPACK_NO_PARALLEL_MAKE)
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
@@ -134,6 +120,9 @@ class MakeExecutable(Executable):
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
@@ -144,8 +133,9 @@ def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
parallel = should_set_parallel_jobs(jobserver_support=True) and \
kwargs.pop('parallel', self.jobs > 1)
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
@@ -191,7 +181,7 @@ def clean_environment():
env.unset('PYTHONPATH')
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
# env.unset('MAKEFLAGS')
env.unset('MAKEFLAGS')
# Avoid that libraries of build dependencies get hijacked.
env.unset('LD_PRELOAD')
@@ -839,7 +829,7 @@ def setup_package(pkg, dirty, context='build'):
# PrgEnv modules on cray platform. Module unload does no damage when
# unnecessary
on_cray, _ = _on_cray()
if on_cray and not dirty:
if on_cray:
for mod in ['cray-mpich', 'cray-libsci']:
module('unload', mod)
@@ -1038,7 +1028,7 @@ def get_cmake_prefix_path(pkg):
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd, jsfd1, jsfd2):
input_multiprocess_fd):
context = kwargs.get('context', 'build')
@@ -1145,8 +1135,6 @@ def child_fun():
"""
parent_pipe, child_pipe = multiprocessing.Pipe()
input_multiprocess_fd = None
jobserver_fd1 = None
jobserver_fd2 = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
@@ -1156,17 +1144,11 @@ def child_fun():
'fileno'):
input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
mflags = os.environ.get('MAKEFLAGS', False)
if mflags:
m = re.search(r'--jobserver-[^=]*=(\d),(\d)', mflags)
if m:
jobserver_fd1 = MultiProcessFd(int(m.group(1)))
jobserver_fd2 = MultiProcessFd(int(m.group(2)))
p = multiprocessing.Process(
target=_setup_pkg_and_run,
args=(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd, jobserver_fd1, jobserver_fd2))
input_multiprocess_fd))
p.start()

View File

@@ -210,10 +210,6 @@ def std_initconfig_entries(self):
"#------------------{0}\n".format("-" * 60),
]
def initconfig_package_entries(self):
"""This method is to be overwritten by the package"""
return []
def initconfig(self, spec, prefix):
cache_entries = (self.std_initconfig_entries() +
self.initconfig_compiler_entries() +

View File

@@ -176,7 +176,6 @@ def _std_args(pkg):
'-G', generator,
define('CMAKE_INSTALL_PREFIX', convert_to_posix_path(pkg.prefix)),
define('CMAKE_BUILD_TYPE', build_type),
define('BUILD_TESTING', pkg.run_tests),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
@@ -362,7 +361,6 @@ def cmake_args(self):
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
* BUILD_TESTING
which will be set automatically.

View File

@@ -107,10 +107,10 @@ def cuda_flags(arch_list):
# each release of a new cuda minor version.
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4.0')
conflicts('%gcc@12:', when='+cuda ^cuda@:11.7')
conflicts('%gcc@12:', when='+cuda ^cuda@:11.6')
conflicts('%clang@12:', when='+cuda ^cuda@:11.4.0')
conflicts('%clang@13:', when='+cuda ^cuda@:11.5')
conflicts('%clang@14:', when='+cuda ^cuda@:11.7')
conflicts('%clang@14:', when='+cuda ^cuda@:11.6')
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')

View File

@@ -1,102 +0,0 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from llnl.util.filesystem import find
from spack.directives import depends_on, extends
from spack.multimethod import when
from spack.package import PackageBase
from spack.util.executable import Executable
class LuaPackage(PackageBase):
"""Specialized class for lua packages"""
phases = ['unpack', 'generate_luarocks_config', 'preprocess', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'LuaPackage'
list_depth = 1 # LuaRocks requires at least one level of spidering to find versions
depends_on('lua-lang')
extends('lua', when='^lua')
with when('^lua-luajit'):
extends('lua-luajit')
depends_on('luajit')
depends_on('lua-luajit+lualinks')
with when('^lua-luajit-openresty'):
extends('lua-luajit-openresty')
depends_on('luajit')
depends_on('lua-luajit-openresty+lualinks')
def unpack(self, spec, prefix):
if os.path.splitext(self.stage.archive_file)[1] == '.rock':
directory = self.luarocks('unpack', self.stage.archive_file, output=str)
dirlines = directory.split('\n')
# TODO: figure out how to scope this better
os.chdir(dirlines[2])
def _generate_tree_line(self, name, prefix):
return """{{ name = "{name}", root = "{prefix}" }};""".format(
name=name,
prefix=prefix,
)
def _luarocks_config_path(self):
return os.path.join(self.stage.source_path, 'spack_luarocks.lua')
def generate_luarocks_config(self, spec, prefix):
spec = self.spec
table_entries = []
for d in spec.traverse(
deptypes=("build", "run"), deptype_query="run"
):
if d.package.extends(self.extendee_spec):
table_entries.append(self._generate_tree_line(d.name, d.prefix))
path = self._luarocks_config_path()
with open(path, 'w') as config:
config.write(
"""
deps_mode="all"
rocks_trees={{
{}
}}
""".format(
"\n".join(table_entries)
)
)
return path
def setup_build_environment(self, env):
env.set('LUAROCKS_CONFIG', self._luarocks_config_path())
def preprocess(self, spec, prefix):
"""Override this to preprocess source before building with luarocks"""
pass
@property
def lua(self):
return Executable(self.spec['lua-lang'].prefix.bin.lua)
@property
def luarocks(self):
lr = Executable(self.spec['lua-lang'].prefix.bin.luarocks)
return lr
def luarocks_args(self):
return []
def install(self, spec, prefix):
rock = '.'
specs = find('.', '*.rockspec', recursive=False)
if specs:
rock = specs[0]
rocks_args = self.luarocks_args()
rocks_args.append(rock)
self.luarocks('--tree=' + prefix, 'make', *rocks_args)

View File

@@ -30,15 +30,6 @@ class IntelOneApiPackage(Package):
# organization (e.g. University/Company).
redistribute_source = False
@staticmethod
def update_description(cls):
"""Updates oneapi package descriptions with common text."""
text = """ LICENSE INFORMATION: By downloading and using this software, you agree to the terms
and conditions of the software license agreements at https://intel.ly/393CijO."""
cls.__doc__ = cls.__doc__ + text
return cls
@property
def component_dir(self):
"""Subdirectory for this component in the install prefix."""

View File

@@ -33,6 +33,7 @@
import spack.util.executable as exe
import spack.util.gpg as gpg_util
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
import spack.util.web as web_util
from spack.error import SpackError
from spack.spec import Spec
@@ -41,8 +42,10 @@
'always',
]
SPACK_PR_MIRRORS_ROOT_URL = 's3://spack-binaries-prs'
SPACK_SHARED_PR_MIRROR_URL = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
'shared_pr_mirror')
TEMP_STORAGE_MIRROR_NAME = 'ci_temporary_mirror'
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
spack_gpg = spack.main.SpackCommand('gpg')
spack_compiler = spack.main.SpackCommand('compiler')
@@ -87,8 +90,8 @@ def _create_buildgroup(opener, headers, url, project, group_name, group_type):
return build_group_id
def _populate_buildgroup(job_names, group_name, project, site,
credentials, cdash_url):
def populate_buildgroup(job_names, group_name, project, site,
credentials, cdash_url):
url = "{0}/api/v1/buildgroup.php".format(cdash_url)
headers = {
@@ -129,30 +132,16 @@ def _populate_buildgroup(job_names, group_name, project, site,
response_code = response.getcode()
if response_code != 200:
msg = 'Error response code ({0}) in _populate_buildgroup'.format(
msg = 'Error response code ({0}) in populate_buildgroup'.format(
response_code)
tty.warn(msg)
def _is_main_phase(phase_name):
def is_main_phase(phase_name):
return True if phase_name == 'specs' else False
def get_job_name(phase, strip_compiler, spec, osarch, build_group):
""" Given the necessary parts, format the gitlab job name
Arguments:
phase (str): Either 'specs' for the main phase, or the name of a
bootstrapping phase
strip_compiler (bool): Should compiler be stripped from job name
spec (spack.spec.Spec): Spec job will build
osarch: Architecture TODO: (this is a spack.spec.ArchSpec,
but sphinx doesn't recognize the type and fails).
build_group (str): Name of build group this job belongs to (a CDash
notion)
Returns: The job name
"""
item_idx = 0
format_str = ''
format_args = []
@@ -174,7 +163,7 @@ def get_job_name(phase, strip_compiler, spec, osarch, build_group):
format_args.append(spec.version)
item_idx += 1
if _is_main_phase(phase) is True or strip_compiler is False:
if is_main_phase(phase) is True or strip_compiler is False:
format_str += ' {{{0}}}'.format(item_idx)
format_args.append(spec.compiler)
item_idx += 1
@@ -191,17 +180,12 @@ def get_job_name(phase, strip_compiler, spec, osarch, build_group):
return format_str.format(*format_args)
def _get_cdash_build_name(spec, build_group):
def get_cdash_build_name(spec, build_group):
return '{0}@{1}%{2} arch={3} ({4})'.format(
spec.name, spec.version, spec.compiler, spec.architecture, build_group)
def _remove_reserved_tags(tags):
"""Convenience function to strip reserved tags from jobs"""
return [tag for tag in tags if tag not in SPACK_RESERVED_TAGS]
def _get_spec_string(spec):
def get_spec_string(spec):
format_elements = [
'{name}{@version}',
'{%compiler}',
@@ -213,15 +197,15 @@ def _get_spec_string(spec):
return spec.format(''.join(format_elements))
def _format_root_spec(spec, main_phase, strip_compiler):
def format_root_spec(spec, main_phase, strip_compiler):
if main_phase is False and strip_compiler is True:
return '{0}@{1} arch={2}'.format(
spec.name, spec.version, spec.architecture)
else:
return spec.dag_hash()
return spec.build_hash()
def _spec_deps_key(s):
def spec_deps_key(s):
return '{0}/{1}'.format(s.name, s.dag_hash(7))
@@ -233,10 +217,8 @@ def _add_dependency(spec_label, dep_label, deps):
deps[spec_label].add(dep_label)
def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False,
mirrors_to_check=None):
spec_deps_obj = _compute_spec_deps(specs, check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
def get_spec_dependencies(specs, deps, spec_labels, check_index_only=False):
spec_deps_obj = compute_spec_deps(specs, check_index_only=check_index_only)
if spec_deps_obj:
dependencies = spec_deps_obj['dependencies']
@@ -253,7 +235,7 @@ def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False,
_add_dependency(entry['spec'], entry['depends'], deps)
def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
def stage_spec_jobs(specs, check_index_only=False):
"""Take a set of release specs and generate a list of "stages", where the
jobs in any stage are dependent only on jobs in previous stages. This
allows us to maximize build parallelism within the gitlab-ci framework.
@@ -265,8 +247,6 @@ def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
are up to date on those mirrors. This flag limits that search to
the binary cache indices on those mirrors to speed the process up,
even though there is no garantee the index is up to date.
mirrors_to_checK: Optional mapping giving mirrors to check instead of
any configured mirrors.
Returns: A tuple of information objects describing the specs, dependencies
and stages:
@@ -286,11 +266,11 @@ def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
"""
# The convenience method below, "_remove_satisfied_deps()", does not modify
# The convenience method below, "remove_satisfied_deps()", does not modify
# the "deps" parameter. Instead, it returns a new dictionary where only
# dependencies which have not yet been satisfied are included in the
# return value.
def _remove_satisfied_deps(deps, satisfied_list):
def remove_satisfied_deps(deps, satisfied_list):
new_deps = {}
for key, value in iteritems(deps):
@@ -303,8 +283,8 @@ def _remove_satisfied_deps(deps, satisfied_list):
deps = {}
spec_labels = {}
_get_spec_dependencies(specs, deps, spec_labels, check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
get_spec_dependencies(
specs, deps, spec_labels, check_index_only=check_index_only)
# Save the original deps, as we need to return them at the end of the
# function. In the while loop below, the "dependencies" variable is
@@ -322,7 +302,7 @@ def _remove_satisfied_deps(deps, satisfied_list):
# Note that "dependencies" is a dictionary mapping each dependent
# package to the set of not-yet-handled dependencies. The final step
# below removes all the dependencies that are handled by this stage.
dependencies = _remove_satisfied_deps(dependencies, next_stage)
dependencies = remove_satisfied_deps(dependencies, next_stage)
if unstaged:
stages.append(unstaged.copy())
@@ -330,12 +310,13 @@ def _remove_satisfied_deps(deps, satisfied_list):
return spec_labels, deps, stages
def _print_staging_summary(spec_labels, dependencies, stages):
def print_staging_summary(spec_labels, dependencies, stages):
if not stages:
return
tty.msg(' Staging summary ([x] means a job needs rebuilding):')
for stage_index, stage in enumerate(stages):
tty.msg(' Staging summary:')
stage_index = 0
for stage in stages:
tty.msg(' stage {0} ({1} jobs):'.format(stage_index, len(stage)))
for job in sorted(stage):
@@ -343,10 +324,12 @@ def _print_staging_summary(spec_labels, dependencies, stages):
tty.msg(' [{1}] {0} -> {2}'.format(
job,
'x' if spec_labels[job]['needs_rebuild'] else ' ',
_get_spec_string(s)))
get_spec_string(s)))
stage_index += 1
def _compute_spec_deps(spec_list, check_index_only=False, mirrors_to_check=None):
def compute_spec_deps(spec_list, check_index_only=False):
"""
Computes all the dependencies for the spec(s) and generates a JSON
object which provides both a list of unique spec names as well as a
@@ -419,17 +402,17 @@ def append_dep(s, d):
continue
up_to_date_mirrors = bindist.get_mirrors_for_spec(
spec=s, mirrors_to_check=mirrors_to_check, index_only=check_index_only)
spec=s, full_hash_match=True, index_only=check_index_only)
skey = _spec_deps_key(s)
skey = spec_deps_key(s)
spec_labels[skey] = {
'spec': _get_spec_string(s),
'spec': get_spec_string(s),
'root': root_spec,
'needs_rebuild': not up_to_date_mirrors,
}
for d in s.dependencies(deptype=all):
dkey = _spec_deps_key(d)
dkey = spec_deps_key(d)
if d.external:
tty.msg('Will not stage external dep: {0}'.format(d))
continue
@@ -452,11 +435,11 @@ def append_dep(s, d):
return deps_json_obj
def _spec_matches(spec, match_string):
def spec_matches(spec, match_string):
return spec.satisfies(match_string)
def _copy_attributes(attrs_list, src_dict, dest_dict):
def copy_attributes(attrs_list, src_dict, dest_dict):
for runner_attr in attrs_list:
if runner_attr in src_dict:
if runner_attr in dest_dict and runner_attr == 'tags':
@@ -477,7 +460,7 @@ def _copy_attributes(attrs_list, src_dict, dest_dict):
dest_dict[runner_attr] = copy.deepcopy(src_dict[runner_attr])
def _find_matching_config(spec, gitlab_ci):
def find_matching_config(spec, gitlab_ci):
runner_attributes = {}
overridable_attrs = [
'image',
@@ -488,16 +471,16 @@ def _find_matching_config(spec, gitlab_ci):
'after_script',
]
_copy_attributes(overridable_attrs, gitlab_ci, runner_attributes)
copy_attributes(overridable_attrs, gitlab_ci, runner_attributes)
ci_mappings = gitlab_ci['mappings']
for ci_mapping in ci_mappings:
for match_string in ci_mapping['match']:
if _spec_matches(spec, match_string):
if spec_matches(spec, match_string):
if 'runner-attributes' in ci_mapping:
_copy_attributes(overridable_attrs,
ci_mapping['runner-attributes'],
runner_attributes)
copy_attributes(overridable_attrs,
ci_mapping['runner-attributes'],
runner_attributes)
return runner_attributes
else:
return None
@@ -505,16 +488,16 @@ def _find_matching_config(spec, gitlab_ci):
return runner_attributes
def _pkg_name_from_spec_label(spec_label):
def pkg_name_from_spec_label(spec_label):
return spec_label[:spec_label.index('/')]
def _format_job_needs(phase_name, strip_compilers, dep_jobs,
osname, build_group, prune_dag, stage_spec_dict,
enable_artifacts_buildcache):
def format_job_needs(phase_name, strip_compilers, dep_jobs,
osname, build_group, prune_dag, stage_spec_dict,
enable_artifacts_buildcache):
needs_list = []
for dep_job in dep_jobs:
dep_spec_key = _spec_deps_key(dep_job)
dep_spec_key = spec_deps_key(dep_job)
dep_spec_info = stage_spec_dict[dep_spec_key]
if not prune_dag or dep_spec_info['needs_rebuild']:
@@ -608,38 +591,7 @@ def get_spec_filter_list(env, affected_pkgs, dependencies=True, dependents=True)
def generate_gitlab_ci_yaml(env, print_summary, output_file,
prune_dag=False, check_index_only=False,
run_optimizer=False, use_dependencies=False,
artifacts_root=None, remote_mirror_override=None):
""" Generate a gitlab yaml file to run a dynamic child pipeline from
the spec matrix in the active environment.
Arguments:
env (spack.environment.Environment): Activated environment object
which must contain a gitlab-ci section describing how to map
specs to runners
print_summary (bool): Should we print a summary of all the jobs in
the stages in which they were placed.
output_file (str): File path where generated file should be written
prune_dag (bool): If True, do not generate jobs for specs already
exist built on the mirror.
check_index_only (bool): If True, attempt to fetch the mirror index
and only use that to determine whether built specs on the mirror
this mode results in faster yaml generation time). Otherwise, also
check each spec directly by url (useful if there is no index or it
might be out of date).
run_optimizer (bool): If True, post-process the generated yaml to try
try to reduce the size (attempts to collect repeated configuration
and replace with definitions).)
use_dependencies (bool): If true, use "dependencies" rather than "needs"
("needs" allows DAG scheduling). Useful if gitlab instance cannot
be configured to handle more than a few "needs" per job.
artifacts_root (str): Path where artifacts like logs, environment
files (spack.yaml, spack.lock), etc should be written. GitLab
requires this to be within the project directory.
remote_mirror_override (str): Typically only needed when one spack.yaml
is used to populate several mirrors with binaries, based on some
criteria. Spack protected pipelines populate different mirrors based
on branch name, facilitated by this option.
"""
artifacts_root=None):
with spack.concretize.disable_compiler_existence_check():
with env.write_transaction():
env.concretize()
@@ -688,19 +640,17 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
for s in affected_specs:
tty.debug(' {0}'.format(s.name))
# Downstream jobs will "need" (depend on, for both scheduling and
# artifacts, which include spack.lock file) this pipeline generation
# job by both name and pipeline id. If those environment variables
# do not exist, then maybe this is just running in a shell, in which
# case, there is no expectation gitlab will ever run the generated
# pipeline and those environment variables do not matter.
generate_job_name = os.environ.get('CI_JOB_NAME', 'job-does-not-exist')
parent_pipeline_id = os.environ.get('CI_PIPELINE_ID', 'pipeline-does-not-exist')
generate_job_name = os.environ.get('CI_JOB_NAME', None)
parent_pipeline_id = os.environ.get('CI_PIPELINE_ID', None)
# Values: "spack_pull_request", "spack_protected_branch", or not set
spack_pipeline_type = os.environ.get('SPACK_PIPELINE_TYPE', None)
is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
spack_buildcache_copy = os.environ.get('SPACK_COPY_BUILDCACHE', None)
spack_pr_branch = os.environ.get('SPACK_PR_BRANCH', None)
pr_mirror_url = None
if spack_pr_branch:
pr_mirror_url = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
spack_pr_branch)
if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
tty.die('spack ci generate requires an env containing a mirror')
@@ -755,25 +705,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
'strip-compilers': False,
})
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
# generate.
mirrors_to_check = None
if remote_mirror_override:
if spack_pipeline_type == 'spack_protected_branch':
# Overriding the main mirror in this case might result
# in skipping jobs on a release pipeline because specs are
# up to date in develop. Eventually we want to notice and take
# advantage of this by scheduling a job to copy the spec from
# develop to the release, but until we have that, this makes
# sure we schedule a rebuild job if the spec isn't already in
# override mirror.
mirrors_to_check = {
'override': remote_mirror_override
}
else:
spack.mirror.add(
'ci_pr_mirror', remote_mirror_override, cfg.default_modify_scope())
# Add per-PR mirror (and shared PR mirror) if enabled, as some specs might
# be up to date in one of those and thus not need to be rebuilt.
if pr_mirror_url:
spack.mirror.add(
'ci_pr_mirror', pr_mirror_url, cfg.default_modify_scope())
spack.mirror.add('ci_shared_pr_mirror',
SPACK_SHARED_PR_MIRROR_URL,
cfg.default_modify_scope())
pipeline_artifacts_dir = artifacts_root
if not pipeline_artifacts_dir:
@@ -848,13 +787,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
phase_spec.concretize()
staged_phases[phase_name] = stage_spec_jobs(
concrete_phase_specs,
check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
check_index_only=check_index_only)
finally:
# Clean up remote mirror override if enabled
if remote_mirror_override:
if spack_pipeline_type != 'spack_protected_branch':
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
# Clean up PR mirror if enabled
if pr_mirror_url:
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
all_job_names = []
output_object = {}
@@ -867,7 +804,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
max_needs_job = ''
# If this is configured, spack will fail "spack ci generate" if it
# generates any hash which exists under the broken specs url.
# generates any full hash which exists under the broken specs url.
broken_spec_urls = None
if broken_specs_url:
if broken_specs_url.startswith('http'):
@@ -882,7 +819,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
phase_name = phase['name']
strip_compilers = phase['strip-compilers']
main_phase = _is_main_phase(phase_name)
main_phase = is_main_phase(phase_name)
spec_labels, dependencies, stages = staged_phases[phase_name]
for stage_jobs in stages:
@@ -893,9 +830,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
for spec_label in stage_jobs:
spec_record = spec_labels[spec_label]
root_spec = spec_record['rootSpec']
pkg_name = _pkg_name_from_spec_label(spec_label)
pkg_name = pkg_name_from_spec_label(spec_label)
release_spec = root_spec[pkg_name]
release_spec_full_hash = release_spec.full_hash()
release_spec_dag_hash = release_spec.dag_hash()
release_spec_build_hash = release_spec.build_hash()
if prune_untouched_packages:
if release_spec not in affected_specs:
@@ -904,7 +843,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
spec_record['needs_rebuild'] = False
continue
runner_attribs = _find_matching_config(
runner_attribs = find_matching_config(
release_spec, gitlab_ci)
if not runner_attribs:
@@ -914,14 +853,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
tags = [tag for tag in runner_attribs['tags']]
if spack_pipeline_type is not None:
# For spack pipelines "public" and "protected" are reserved tags
tags = _remove_reserved_tags(tags)
if spack_pipeline_type == 'spack_protected_branch':
tags.extend(['aws', 'protected'])
elif spack_pipeline_type == 'spack_pull_request':
tags.extend(['public'])
variables = {}
if 'variables' in runner_attribs:
variables.update(runner_attribs['variables'])
@@ -966,13 +897,15 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
compiler_action = 'NONE'
if len(phases) > 1:
compiler_action = 'FIND_ANY'
if _is_main_phase(phase_name):
if is_main_phase(phase_name):
compiler_action = 'INSTALL_MISSING'
job_vars = {
'SPACK_ROOT_SPEC': _format_root_spec(
'SPACK_ROOT_SPEC': format_root_spec(
root_spec, main_phase, strip_compilers),
'SPACK_JOB_SPEC_DAG_HASH': release_spec_dag_hash,
'SPACK_JOB_SPEC_BUILD_HASH': release_spec_build_hash,
'SPACK_JOB_SPEC_FULL_HASH': release_spec_full_hash,
'SPACK_JOB_SPEC_PKG_NAME': release_spec.name,
'SPACK_COMPILER_ACTION': compiler_action
}
@@ -991,15 +924,15 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
# purposes, so we only get the direct dependencies.
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_pkg = _pkg_name_from_spec_label(dep_label)
dep_pkg = pkg_name_from_spec_label(dep_label)
dep_root = spec_labels[dep_label]['rootSpec']
dep_jobs.append(dep_root[dep_pkg])
job_dependencies.extend(
_format_job_needs(phase_name, strip_compilers,
dep_jobs, osname, build_group,
prune_dag, spec_labels,
enable_artifacts_buildcache))
format_job_needs(phase_name, strip_compilers,
dep_jobs, osname, build_group,
prune_dag, spec_labels,
enable_artifacts_buildcache))
rebuild_spec = spec_record['needs_rebuild']
@@ -1010,7 +943,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
# compiler we are supposed to use is listed in any of the
# bootstrap spec lists, then we will add more dependencies to
# the job (that compiler and maybe it's dependencies as well).
if _is_main_phase(phase_name):
if is_main_phase(phase_name):
spec_arch_family = (release_spec.architecture
.target
.microarchitecture
@@ -1038,7 +971,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
# be rebuilt if the compiler targeted to build it
# needs to be rebuilt.
bs_specs, _, _ = staged_phases[bs['phase-name']]
c_spec_key = _spec_deps_key(c_spec)
c_spec_key = spec_deps_key(c_spec)
rbld_comp = bs_specs[c_spec_key]['needs_rebuild']
rebuild_spec = rebuild_spec or rbld_comp
# Also update record so dependents do not fail to
@@ -1052,14 +985,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
job_dependencies.extend(
_format_job_needs(bs['phase-name'],
bs['strip-compilers'],
dep_jobs,
str(bs_arch),
build_group,
prune_dag,
bs_specs,
enable_artifacts_buildcache))
format_job_needs(bs['phase-name'],
bs['strip-compilers'],
dep_jobs,
str(bs_arch),
build_group,
prune_dag,
bs_specs,
enable_artifacts_buildcache))
else:
debug_msg = ''.join([
'Considered compiler {0} for spec ',
@@ -1076,9 +1009,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
continue
if (broken_spec_urls is not None and
release_spec_dag_hash in broken_spec_urls):
release_spec_full_hash in broken_spec_urls):
known_broken_specs_encountered.append('{0} ({1})'.format(
release_spec, release_spec_dag_hash))
release_spec, release_spec_full_hash))
if artifacts_root:
job_dependencies.append({
@@ -1089,7 +1022,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
job_vars['SPACK_SPEC_NEEDS_REBUILD'] = str(rebuild_spec)
if enable_cdash_reporting:
cdash_build_name = _get_cdash_build_name(
cdash_build_name = get_cdash_build_name(
release_spec, build_group)
all_job_names.append(cdash_build_name)
job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
@@ -1154,7 +1087,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
phase_name = phase['name']
tty.msg('Stages for phase "{0}"'.format(phase_name))
phase_stages = staged_phases[phase_name]
_print_staging_summary(*phase_stages)
print_staging_summary(*phase_stages)
tty.debug('{0} build jobs generated in {1} stages'.format(
job_id, stage_id))
@@ -1166,8 +1099,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
# Use "all_job_names" to populate the build group for this set
if enable_cdash_reporting and cdash_auth_token:
try:
_populate_buildgroup(all_job_names, build_group, cdash_project,
cdash_site, cdash_auth_token, cdash_url)
populate_buildgroup(all_job_names, build_group, cdash_project,
cdash_site, cdash_auth_token, cdash_url)
except (SpackError, HTTPError, URLError) as err:
tty.warn('Problem populating buildgroup: {0}'.format(err))
else:
@@ -1203,13 +1136,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
cleanup_job = {}
if service_job_config:
_copy_attributes(default_attrs,
service_job_config,
cleanup_job)
if 'tags' in cleanup_job:
service_tags = _remove_reserved_tags(cleanup_job['tags'])
cleanup_job['tags'] = service_tags
copy_attributes(default_attrs,
service_job_config,
cleanup_job)
cleanup_job['stage'] = 'cleanup-temp-storage'
cleanup_job['script'] = [
@@ -1218,91 +1147,22 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
cleanup_job['when'] = 'always'
cleanup_job['retry'] = service_job_retries
cleanup_job['interruptible'] = True
output_object['cleanup'] = cleanup_job
if ('signing-job-attributes' in gitlab_ci and
spack_pipeline_type == 'spack_protected_branch'):
# External signing: generate a job to check and sign binary pkgs
stage_names.append('stage-sign-pkgs')
signing_job_config = gitlab_ci['signing-job-attributes']
signing_job = {}
signing_job_attrs_to_copy = [
'image',
'tags',
'variables',
'before_script',
'script',
'after_script',
]
_copy_attributes(signing_job_attrs_to_copy,
signing_job_config,
signing_job)
signing_job_tags = []
if 'tags' in signing_job:
signing_job_tags = _remove_reserved_tags(signing_job['tags'])
for tag in ['aws', 'protected', 'notary']:
if tag not in signing_job_tags:
signing_job_tags.append(tag)
signing_job['tags'] = signing_job_tags
signing_job['stage'] = 'stage-sign-pkgs'
signing_job['when'] = 'always'
signing_job['retry'] = {
'max': 2,
'when': ['always']
}
signing_job['interruptible'] = True
output_object['sign-pkgs'] = signing_job
if spack_buildcache_copy:
# Generate a job to copy the contents from wherever the builds are getting
# pushed to the url specified in the "SPACK_BUILDCACHE_COPY" environment
# variable.
src_url = remote_mirror_override or remote_mirror_url
dest_url = spack_buildcache_copy
stage_names.append('stage-copy-buildcache')
copy_job = {
'stage': 'stage-copy-buildcache',
'tags': ['spack', 'public', 'medium', 'aws', 'x86_64'],
'image': 'ghcr.io/spack/python-aws-bash:0.0.1',
'when': 'on_success',
'interruptible': True,
'retry': service_job_retries,
'script': [
'. ./share/spack/setup-env.sh',
'spack --version',
'aws s3 sync --exclude *index.json* --exclude *pgp* {0} {1}'.format(
src_url, dest_url)
]
}
output_object['copy-mirror'] = copy_job
if rebuild_index_enabled:
# Add a final job to regenerate the index
stage_names.append('stage-rebuild-index')
final_job = {}
if service_job_config:
_copy_attributes(default_attrs,
service_job_config,
final_job)
if 'tags' in final_job:
service_tags = _remove_reserved_tags(final_job['tags'])
final_job['tags'] = service_tags
copy_attributes(default_attrs,
service_job_config,
final_job)
index_target_mirror = mirror_urls[0]
if remote_mirror_override:
index_target_mirror = remote_mirror_override
if is_pr_pipeline:
index_target_mirror = pr_mirror_url
final_job['stage'] = 'stage-rebuild-index'
final_job['script'] = [
@@ -1311,7 +1171,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
final_job['when'] = 'always'
final_job['retry'] = service_job_retries
final_job['interruptible'] = True
output_object['rebuild-index'] = final_job
@@ -1344,9 +1203,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
'SPACK_PIPELINE_TYPE': str(spack_pipeline_type)
}
if remote_mirror_override:
(output_object['variables']
['SPACK_REMOTE_MIRROR_OVERRIDE']) = remote_mirror_override
if pr_mirror_url:
output_object['variables']['SPACK_PR_MIRROR_URL'] = pr_mirror_url
spack_stack_name = os.environ.get('SPACK_CI_STACK_NAME', None)
if spack_stack_name:
@@ -1371,9 +1229,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
noop_job = {}
if service_job_config:
_copy_attributes(default_attrs,
service_job_config,
noop_job)
copy_attributes(default_attrs,
service_job_config,
noop_job)
if 'script' not in noop_job:
noop_job['script'] = [
@@ -1396,7 +1254,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
def _url_encode_string(input_string):
def url_encode_string(input_string):
encoded_keyval = urlencode({'donotcare': input_string})
eq_idx = encoded_keyval.find('=') + 1
encoded_value = encoded_keyval[eq_idx:]
@@ -1404,17 +1262,6 @@ def _url_encode_string(input_string):
def import_signing_key(base64_signing_key):
""" Given Base64-encoded gpg key, decode and import it to use for
signing packages.
Arguments:
base64_signing_key (str): A gpg key including the secret key,
armor-exported and base64 encoded, so it can be stored in a
gitlab CI variable. For an example of how to generate such
a key, see:
https://github.com/spack/spack-infrastructure/blob/main/gitlab-docker/files/gen-key
"""
if not base64_signing_key:
tty.warn('No key found for signing/verifying packages')
return
@@ -1452,34 +1299,14 @@ def import_signing_key(base64_signing_key):
def can_sign_binaries():
""" Utility method to determine if this spack instance is capable of
signing binary packages. This is currently only possible if the
spack gpg keystore contains exactly one secret key."""
return len(gpg_util.signing_keys()) == 1
def can_verify_binaries():
""" Utility method to determin if this spack instance is capable (at
least in theory) of verifying signed binaries."""
return len(gpg_util.public_keys()) >= 1
def configure_compilers(compiler_action, scope=None):
""" Depending on the compiler_action parameter, either turn on the
install_missing_compilers config option, or find spack compilers,
or do nothing. This is used from rebuild jobs in bootstrapping
pipelines, where in the bootsrapping phase we would pass
FIND_ANY in case of compiler-agnostic bootstrapping, while in the
spec building phase we would pass INSTALL_MISSING in order to get
spack to use the compiler which was built in the previous phase and
is now sitting in the binary mirror.
Arguments:
compiler_action (str): 'FIND_ANY', 'INSTALL_MISSING' have meanings
described above. Any other value essentially results in a no-op.
scope (spack.config.ConfigScope): Optional. The scope in which to look for
compilers, in case 'FIND_ANY' was provided.
"""
if compiler_action == 'INSTALL_MISSING':
tty.debug('Make sure bootstrapped compiler will be installed')
config = cfg.get('config')
@@ -1503,35 +1330,6 @@ def configure_compilers(compiler_action, scope=None):
def get_concrete_specs(env, root_spec, job_name, compiler_action):
""" Build a dictionary of concrete specs relevant to a particular
rebuild job. This includes the root spec and the spec to be
rebuilt (which could be the same).
Arguments:
env (spack.environment.Environment): Activated spack environment
used to get concrete root spec by hash in case compiler_action
is anthing other than FIND_ANY.
root_spec (str): If compiler_action is FIND_ANY root_spec is
a string representation which can be turned directly into
a spec, otherwise, it's a hash used to index the activated
spack environment.
job_name (str): Name of package to be built, used to index the
concrete root spec and produce the concrete spec to be
built.
compiler_action (str): Determines how to interpret the root_spec
parameter, either as a string representation as a hash.
Returns:
.. code-block:: JSON
{
"root": "<spec>",
"<job-pkg-name>": "<spec>",
}
"""
spec_map = {
'root': None,
}
@@ -1578,19 +1376,6 @@ def _push_mirror_contents(env, specfile_path, sign_binaries, mirror_url):
def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
""" Push one or more binary packages to the mirror.
Arguments:
env (spack.environment.Environment): Optional environment. If
provided, it is used to make sure binary package to push
exists in the environment.
specfile_path (str): Path to spec.json corresponding to built pkg
to push.
mirror_url (str): Base url of target mirror
sign_binaries (bool): If True, spack will attempt to sign binary
package before pushing.
"""
try:
_push_mirror_contents(env, specfile_path, sign_binaries, mirror_url)
except Exception as inst:
@@ -1615,15 +1400,6 @@ def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
""" Looks for spack-build-out.txt in the stage directory of the given
job_spec, and attempts to copy the file into the directory given
by job_log_dir.
Arguments:
job_spec (spack.spec.Spec): Spec associated with spack install log
job_log_dir (str): Path into which build log should be copied
"""
try:
job_pkg = spack.repo.get(job_spec)
tty.debug('job package: {0}'.format(job_pkg))
@@ -1642,14 +1418,6 @@ def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
def download_and_extract_artifacts(url, work_dir):
""" Look for gitlab artifacts.zip at the given url, and attempt to download
and extract the contents into the given work_dir
Arguments:
url (str): Complete url to artifacts.zip file
work_dir (str): Path to destination where artifacts should be extracted
"""
tty.msg('Fetching artifacts from: {0}\n'.format(url))
headers = {
@@ -1689,8 +1457,6 @@ def download_and_extract_artifacts(url, work_dir):
def get_spack_info():
""" If spack is running from a git repo, return the most recent git log
entry, otherwise, return a string containing the spack version. """
git_path = os.path.join(spack.paths.prefix, ".git")
if os.path.exists(git_path):
git = exe.which("git")
@@ -1706,23 +1472,6 @@ def get_spack_info():
def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
""" Look in the local spack clone to find the checkout_commit, and if
provided, the merge_commit given as arguments. If those commits can
be found locally, then clone spack and attempt to recreate a merge
commit with the same parent commits as tested in gitlab. This looks
something like 1) git clone repo && cd repo 2) git checkout
<checkout_commit> 3) git merge <merge_commit>. If there is no
merge_commit provided, then skip step (3).
Arguments:
repro_dir (str): Location where spack should be cloned
checkout_commit (str): SHA of PR branch commit
merge_commit (str): SHA of target branch parent
Returns: True if git repo state was successfully recreated, or False
otherwise.
"""
# figure out the path to the spack git version being used for the
# reproduction
print('checkout_commit: {0}'.format(checkout_commit))
@@ -1764,7 +1513,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
fail_on_error=False)
if git.returncode != 0:
tty.error('Unable to clone your local spack repo:')
tty.error('Unable to clone your local spac repo:')
tty.msg(clone_out)
return False
@@ -1797,18 +1546,6 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
def reproduce_ci_job(url, work_dir):
""" Given a url to gitlab artifacts.zip from a failed 'spack ci rebuild' job,
attempt to setup an environment in which the failure can be reproduced
locally. This entails the following:
First download and extract artifacts. Then look through those artifacts
to glean some information needed for the reproduer (e.g. one of the
artifacts contains information about the version of spack tested by
gitlab, another is the generated pipeline yaml containing details
of the job like the docker image used to run it). The output of this
function is a set of printed instructions for running docker and then
commands to run to reproduce the build once inside the container.
"""
download_and_extract_artifacts(url, work_dir)
lock_file = fs.find(work_dir, 'spack.lock')[0]

View File

@@ -8,10 +8,7 @@
import argparse
import os
import re
import shlex
import sys
from textwrap import dedent
from typing import List, Tuple
import ruamel.yaml as yaml
import six
@@ -150,58 +147,6 @@ def get_command(cmd_name):
return getattr(get_module(cmd_name), pname)
class _UnquotedFlags(object):
"""Use a heuristic in `.extract()` to detect whether the user is trying to set
multiple flags like the docker ENV attribute allows (e.g. 'cflags=-Os -pipe').
If the heuristic finds a match (which can be checked with `__bool__()`), a warning
message explaining how to quote multiple flags correctly can be generated with
`.report()`.
"""
flags_arg_pattern = re.compile(
r'^({0})=([^\'"].*)$'.format(
'|'.join(spack.spec.FlagMap.valid_compiler_flags()),
))
def __init__(self, all_unquoted_flag_pairs):
# type: (List[Tuple[re.Match, str]]) -> None
self._flag_pairs = all_unquoted_flag_pairs
def __bool__(self):
# type: () -> bool
return bool(self._flag_pairs)
@classmethod
def extract(cls, sargs):
# type: (str) -> _UnquotedFlags
all_unquoted_flag_pairs = [] # type: List[Tuple[re.Match, str]]
prev_flags_arg = None
for arg in shlex.split(sargs):
if prev_flags_arg is not None:
all_unquoted_flag_pairs.append((prev_flags_arg, arg))
prev_flags_arg = cls.flags_arg_pattern.match(arg)
return cls(all_unquoted_flag_pairs)
def report(self):
# type: () -> str
single_errors = [
'({0}) {1} {2} => {3}'.format(
i + 1, match.group(0), next_arg,
'{0}="{1} {2}"'.format(match.group(1), match.group(2), next_arg),
)
for i, (match, next_arg) in enumerate(self._flag_pairs)
]
return dedent("""\
Some compiler or linker flags were provided without quoting their arguments,
which now causes spack to try to parse the *next* argument as a spec component
such as a variant instead of an additional compiler or linker flag. If the
intent was to set multiple flags, try quoting them together as described below.
Possible flag quotation errors (with the correctly-quoted version after the =>):
{0}""").format('\n'.join(single_errors))
def parse_specs(args, **kwargs):
"""Convenience function for parsing arguments from specs. Handles common
exceptions and dies if there are errors.
@@ -210,28 +155,29 @@ def parse_specs(args, **kwargs):
normalize = kwargs.get('normalize', False)
tests = kwargs.get('tests', False)
sargs = args
if not isinstance(args, six.string_types):
sargs = ' '.join(args)
unquoted_flags = _UnquotedFlags.extract(sargs)
try:
sargs = args
if not isinstance(args, six.string_types):
sargs = ' '.join(spack.util.string.quote(args))
specs = spack.spec.parse(sargs)
for spec in specs:
if concretize:
spec.concretize(tests=tests) # implies normalize
elif normalize:
spec.normalize(tests=tests)
return specs
except spack.spec.SpecParseError as e:
msg = e.message + "\n" + str(e.string) + "\n"
msg += (e.pos + 2) * " " + "^"
raise spack.error.SpackError(msg)
except spack.error.SpecError as e:
msg = e.message
if e.long_message:
msg += e.long_message
if unquoted_flags:
msg += '\n\n'
msg += unquoted_flags.report()
raise spack.error.SpackError(msg)

View File

@@ -6,9 +6,7 @@
import os.path
import shutil
import tempfile
import llnl.util.filesystem
import llnl.util.tty
import llnl.util.tty.color
@@ -17,9 +15,6 @@
import spack.cmd.common.arguments
import spack.config
import spack.main
import spack.mirror
import spack.spec
import spack.stage
import spack.util.path
description = "manage bootstrap configuration"
@@ -27,38 +22,6 @@
level = "long"
# Tarball to be downloaded if binary packages are requested in a local mirror
BINARY_TARBALL = 'https://github.com/spack/spack-bootstrap-mirrors/releases/download/v0.2/bootstrap-buildcache.tar.gz'
#: Subdirectory where to create the mirror
LOCAL_MIRROR_DIR = 'bootstrap_cache'
# Metadata for a generated binary mirror
BINARY_METADATA = {
'type': 'buildcache',
'description': ('Buildcache copied from a public tarball available on Github.'
'The sha256 checksum of binaries is checked before installation.'),
'info': {
'url': os.path.join('..', '..', LOCAL_MIRROR_DIR),
'homepage': 'https://github.com/spack/spack-bootstrap-mirrors',
'releases': 'https://github.com/spack/spack-bootstrap-mirrors/releases',
'tarball': BINARY_TARBALL
}
}
CLINGO_JSON = '$spack/share/spack/bootstrap/github-actions-v0.2/clingo.json'
GNUPG_JSON = '$spack/share/spack/bootstrap/github-actions-v0.2/gnupg.json'
# Metadata for a generated source mirror
SOURCE_METADATA = {
'type': 'install',
'description': 'Mirror with software needed to bootstrap Spack',
'info': {
'url': os.path.join('..', '..', LOCAL_MIRROR_DIR)
}
}
def _add_scope_option(parser):
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
@@ -104,61 +67,24 @@ def setup_parser(subparser):
)
list = sp.add_parser(
'list', help='list all the sources of software to bootstrap Spack'
'list', help='list the methods available for bootstrapping'
)
_add_scope_option(list)
trust = sp.add_parser(
'trust', help='trust a bootstrapping source'
'trust', help='trust a bootstrapping method'
)
_add_scope_option(trust)
trust.add_argument(
'name', help='name of the source to be trusted'
'name', help='name of the method to be trusted'
)
untrust = sp.add_parser(
'untrust', help='untrust a bootstrapping source'
'untrust', help='untrust a bootstrapping method'
)
_add_scope_option(untrust)
untrust.add_argument(
'name', help='name of the source to be untrusted'
)
add = sp.add_parser(
'add', help='add a new source for bootstrapping'
)
_add_scope_option(add)
add.add_argument(
'--trust', action='store_true',
help='trust the source immediately upon addition')
add.add_argument(
'name', help='name of the new source of software'
)
add.add_argument(
'metadata_dir', help='directory where to find metadata files'
)
remove = sp.add_parser(
'remove', help='remove a bootstrapping source'
)
remove.add_argument(
'name', help='name of the source to be removed'
)
mirror = sp.add_parser(
'mirror', help='create a local mirror to bootstrap Spack'
)
mirror.add_argument(
'--binary-packages', action='store_true',
help='download public binaries in the mirror'
)
mirror.add_argument(
'--dev', action='store_true',
help='download dev dependencies too'
)
mirror.add_argument(
metavar='DIRECTORY', dest='root_dir',
help='root directory in which to create the mirror and metadata'
'name', help='name of the method to be untrusted'
)
@@ -211,7 +137,10 @@ def _root(args):
def _list(args):
sources = spack.bootstrap.bootstrapping_sources(scope=args.scope)
sources = spack.config.get(
'bootstrap:sources', default=None, scope=args.scope
)
if not sources:
llnl.util.tty.msg(
"No method available for bootstrapping Spack's dependencies"
@@ -320,121 +249,6 @@ def _status(args):
print()
def _add(args):
initial_sources = spack.bootstrap.bootstrapping_sources()
names = [s['name'] for s in initial_sources]
# If the name is already used error out
if args.name in names:
msg = 'a source named "{0}" already exist. Please choose a different name'
raise RuntimeError(msg.format(args.name))
# Check that the metadata file exists
metadata_dir = spack.util.path.canonicalize_path(args.metadata_dir)
if not os.path.exists(metadata_dir) or not os.path.isdir(metadata_dir):
raise RuntimeError(
'the directory "{0}" does not exist'.format(args.metadata_dir)
)
file = os.path.join(metadata_dir, 'metadata.yaml')
if not os.path.exists(file):
raise RuntimeError('the file "{0}" does not exist'.format(file))
# Insert the new source as the highest priority one
write_scope = args.scope or spack.config.default_modify_scope(section='bootstrap')
sources = spack.config.get('bootstrap:sources', scope=write_scope) or []
sources = [
{'name': args.name, 'metadata': args.metadata_dir}
] + sources
spack.config.set('bootstrap:sources', sources, scope=write_scope)
msg = 'New bootstrapping source "{0}" added in the "{1}" configuration scope'
llnl.util.tty.msg(msg.format(args.name, write_scope))
if args.trust:
_trust(args)
def _remove(args):
initial_sources = spack.bootstrap.bootstrapping_sources()
names = [s['name'] for s in initial_sources]
if args.name not in names:
msg = ('cannot find any bootstrapping source named "{0}". '
'Run `spack bootstrap list` to see available sources.')
raise RuntimeError(msg.format(args.name))
for current_scope in spack.config.scopes():
sources = spack.config.get('bootstrap:sources', scope=current_scope) or []
if args.name in [s['name'] for s in sources]:
sources = [s for s in sources if s['name'] != args.name]
spack.config.set('bootstrap:sources', sources, scope=current_scope)
msg = ('Removed the bootstrapping source named "{0}" from the '
'"{1}" configuration scope.')
llnl.util.tty.msg(msg.format(args.name, current_scope))
trusted = spack.config.get('bootstrap:trusted', scope=current_scope) or []
if args.name in trusted:
trusted.pop(args.name)
spack.config.set('bootstrap:trusted', trusted, scope=current_scope)
msg = 'Deleting information on "{0}" from list of trusted sources'
llnl.util.tty.msg(msg.format(args.name))
def _mirror(args):
mirror_dir = spack.util.path.canonicalize_path(
os.path.join(args.root_dir, LOCAL_MIRROR_DIR)
)
# TODO: Here we are adding gnuconfig manually, but this can be fixed
# TODO: as soon as we have an option to add to a mirror all the possible
# TODO: dependencies of a spec
root_specs = spack.bootstrap.all_root_specs(development=args.dev) + ['gnuconfig']
for spec_str in root_specs:
msg = 'Adding "{0}" and dependencies to the mirror at {1}'
llnl.util.tty.msg(msg.format(spec_str, mirror_dir))
# Suppress tty from the call below for terser messages
llnl.util.tty.set_msg_enabled(False)
spec = spack.spec.Spec(spec_str).concretized()
for node in spec.traverse():
spack.mirror.create(mirror_dir, [node])
llnl.util.tty.set_msg_enabled(True)
if args.binary_packages:
msg = 'Adding binary packages from "{0}" to the mirror at {1}'
llnl.util.tty.msg(msg.format(BINARY_TARBALL, mirror_dir))
llnl.util.tty.set_msg_enabled(False)
stage = spack.stage.Stage(BINARY_TARBALL, path=tempfile.mkdtemp())
stage.create()
stage.fetch()
stage.expand_archive()
build_cache_dir = os.path.join(stage.source_path, 'build_cache')
shutil.move(build_cache_dir, mirror_dir)
llnl.util.tty.set_msg_enabled(True)
def write_metadata(subdir, metadata):
metadata_rel_dir = os.path.join('metadata', subdir)
metadata_yaml = os.path.join(
args.root_dir, metadata_rel_dir, 'metadata.yaml'
)
llnl.util.filesystem.mkdirp(os.path.dirname(metadata_yaml))
with open(metadata_yaml, mode='w') as f:
spack.util.spack_yaml.dump(metadata, stream=f)
return os.path.dirname(metadata_yaml), metadata_rel_dir
instructions = ('\nTo register the mirror on the platform where it\'s supposed '
'to be used, move "{0}" to its final location and run the '
'following command(s):\n\n').format(args.root_dir)
cmd = ' % spack bootstrap add --trust {0} <final-path>/{1}\n'
_, rel_directory = write_metadata(subdir='sources', metadata=SOURCE_METADATA)
instructions += cmd.format('local-sources', rel_directory)
if args.binary_packages:
abs_directory, rel_directory = write_metadata(
subdir='binaries', metadata=BINARY_METADATA
)
shutil.copy(spack.util.path.canonicalize_path(CLINGO_JSON), abs_directory)
shutil.copy(spack.util.path.canonicalize_path(GNUPG_JSON), abs_directory)
instructions += cmd.format('local-binaries', rel_directory)
print(instructions)
def bootstrap(parser, args):
callbacks = {
'status': _status,
@@ -444,9 +258,6 @@ def bootstrap(parser, args):
'root': _root,
'list': _list,
'trust': _trust,
'untrust': _untrust,
'add': _add,
'remove': _remove,
'mirror': _mirror
'untrust': _untrust
}
callbacks[args.subcommand](args)

View File

@@ -161,6 +161,11 @@ def setup_parser(subparser):
help=('Check single spec from json or yaml file instead of release ' +
'specs file'))
check.add_argument(
'--rebuild-on-error', default=False, action='store_true',
help="Default to rebuilding packages if errors are encountered " +
"during the process of checking whether rebuilding is needed")
check.set_defaults(func=check_fn)
# Download tarball and specfile
@@ -356,7 +361,7 @@ def list_fn(args):
try:
specs = bindist.update_cache_and_get_specs()
except bindist.FetchCacheError as e:
tty.die(e)
tty.error(e)
if not args.allarch:
arch = spack.spec.Spec.default_arch()
@@ -386,11 +391,17 @@ def preview_fn(args):
constraints = spack.cmd.parse_specs(args.specs)
specs = spack.store.find(constraints, multiple=True)
def status_fn(spec):
if spack.relocate.is_relocatable(spec):
return spec.install_stati.installed
else:
return spec.install_stati.unknown
# Cycle over the specs that match
for spec in specs:
print("Relocatable nodes")
print("--------------------------------")
print(spec.tree(status_fn=spack.relocate.is_relocatable))
print(spec.tree(status_fn=status_fn))
def check_fn(args):
@@ -425,7 +436,7 @@ def check_fn(args):
sys.exit(0)
sys.exit(bindist.check_specs_against_mirrors(
configured_mirrors, specs, args.output_file))
configured_mirrors, specs, args.output_file, args.rebuild_on_error))
def download_fn(args):
@@ -478,12 +489,11 @@ def save_specfile_fn(args):
if args.root_specfile:
with open(args.root_specfile) as fd:
root_spec_as_json = fd.read()
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
else:
root_spec = Spec(args.root_spec)
root_spec.concretize()
root_spec_as_json = root_spec.to_json(hash=ht.dag_hash)
spec_format = 'json'
root_spec_as_json = root_spec.to_json(hash=ht.build_hash)
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
save_dependency_specfiles(
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format)
@@ -697,7 +707,7 @@ def update_index(mirror_url, update_keys=False):
def update_index_fn(args):
"""Update a buildcache index."""
outdir = 'file://.'
outdir = '.'
if args.mirror_url:
outdir = args.mirror_url

View File

@@ -64,11 +64,6 @@ def setup_parser(subparser):
'--dependencies', action='store_true', default=False,
help="(Experimental) disable DAG scheduling; use "
' "plain" dependencies.')
generate.add_argument(
'--buildcache-destination', default=None,
help="Override the mirror configured in the environment (spack.yaml) " +
"in order to push binaries from the generated pipeline to a " +
"different location.")
prune_group = generate.add_mutually_exclusive_group()
prune_group.add_argument(
'--prune-dag', action='store_true', dest='prune_dag',
@@ -132,7 +127,6 @@ def ci_generate(args):
prune_dag = args.prune_dag
index_only = args.index_only
artifacts_root = args.artifacts_root
buildcache_destination = args.buildcache_destination
if not output_file:
output_file = os.path.abspath(".gitlab-ci.yml")
@@ -146,8 +140,7 @@ def ci_generate(args):
spack_ci.generate_gitlab_ci_yaml(
env, True, output_file, prune_dag=prune_dag,
check_index_only=index_only, run_optimizer=run_optimizer,
use_dependencies=use_dependencies, artifacts_root=artifacts_root,
remote_mirror_override=buildcache_destination)
use_dependencies=use_dependencies, artifacts_root=artifacts_root)
if copy_yaml_to:
copy_to_dir = os.path.dirname(copy_yaml_to)
@@ -174,7 +167,8 @@ def ci_reindex(args):
def ci_rebuild(args):
"""Check a single spec against the remote mirror, and rebuild it from
source if the mirror does not contain the hash. """
source if the mirror does not contain the full hash match of the spec
as computed locally. """
env = spack.cmd.require_active_env(cmd_name='ci rebuild')
# Make sure the environment is "gitlab-enabled", or else there's nothing
@@ -187,9 +181,6 @@ def ci_rebuild(args):
if not gitlab_ci:
tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')
tty.msg('SPACK_BUILDCACHE_DESTINATION={0}'.format(
os.environ.get('SPACK_BUILDCACHE_DESTINATION', None)))
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
@@ -206,7 +197,7 @@ def ci_rebuild(args):
compiler_action = get_env_var('SPACK_COMPILER_ACTION')
cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
remote_mirror_override = get_env_var('SPACK_REMOTE_MIRROR_OVERRIDE')
pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')
# Construct absolute paths relative to current $CI_PROJECT_DIR
@@ -254,10 +245,6 @@ def ci_rebuild(args):
tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
spack_is_pr_pipeline, spack_is_develop_pipeline))
# If no override url exists, then just push binary package to the
# normal remote mirror url.
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
@@ -293,8 +280,8 @@ def ci_rebuild(args):
env, root_spec, job_spec_pkg_name, compiler_action)
job_spec = spec_map[job_spec_pkg_name]
job_spec_json_file = '{0}.json'.format(job_spec_pkg_name)
job_spec_json_path = os.path.join(repro_dir, job_spec_json_file)
job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)
# To provide logs, cdash reports, etc for developer download/perusal,
# these things have to be put into artifacts. This means downstream
@@ -348,23 +335,23 @@ def ci_rebuild(args):
# using a compiler already installed on the target system).
spack_ci.configure_compilers(compiler_action)
# Write this job's spec json into the reproduction directory, and it will
# Write this job's spec yaml into the reproduction directory, and it will
# also be used in the generated "spack install" command to install the spec
tty.debug('job concrete spec path: {0}'.format(job_spec_json_path))
with open(job_spec_json_path, 'w') as fd:
fd.write(job_spec.to_json(hash=ht.dag_hash))
tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
with open(job_spec_yaml_path, 'w') as fd:
fd.write(job_spec.to_yaml(hash=ht.build_hash))
# Write the concrete root spec json into the reproduction directory
root_spec_json_path = os.path.join(repro_dir, 'root.json')
with open(root_spec_json_path, 'w') as fd:
fd.write(spec_map['root'].to_json(hash=ht.dag_hash))
# Write the concrete root spec yaml into the reproduction directory
root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
with open(root_spec_yaml_path, 'w') as fd:
fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))
# Write some other details to aid in reproduction into an artifact
repro_file = os.path.join(repro_dir, 'repro.json')
repro_details = {
'job_name': ci_job_name,
'job_spec_json': job_spec_json_file,
'root_spec_json': 'root.json',
'job_spec_yaml': job_spec_yaml_file,
'root_spec_yaml': 'root.yaml',
'ci_project_dir': ci_project_dir
}
with open(repro_file, 'w') as fd:
@@ -379,41 +366,25 @@ def ci_rebuild(args):
fd.write(b'\n')
# If we decided there should be a temporary storage mechanism, add that
# mirror now so it's used when we check for a hash match already
# mirror now so it's used when we check for a full hash match already
# built for this spec.
if pipeline_mirror_url:
spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
pipeline_mirror_url,
cfg.default_modify_scope())
# Check configured mirrors for a built spec with a matching hash
mirrors_to_check = None
if remote_mirror_override and spack_pipeline_type == 'spack_protected_branch':
# Passing "mirrors_to_check" below means we *only* look in the override
# mirror to see if we should skip building, which is what we want.
mirrors_to_check = {
'override': remote_mirror_override
}
# Adding this mirror to the list of configured mirrors means dependencies
# could be installed from either the override mirror or any other configured
# mirror (e.g. remote_mirror_url which is defined in the environment or
# pipeline_mirror_url), which is also what we want.
spack.mirror.add('mirror_override',
remote_mirror_override,
cfg.default_modify_scope())
# Check configured mirrors for a built spec with a matching full hash
matches = bindist.get_mirrors_for_spec(
job_spec, mirrors_to_check=mirrors_to_check, index_only=False)
job_spec, full_hash_match=True, index_only=False)
if matches:
# Got a hash match on at least one configured mirror. All
# Got a full hash match on at least one configured mirror. All
# matches represent the fully up-to-date spec, so should all be
# equivalent. If artifacts mirror is enabled, we just pick one
# of the matches and download the buildcache files from there to
# the artifacts, so they're available to be used by dependent
# jobs in subsequent stages.
tty.msg('No need to rebuild {0}, found hash match at: '.format(
tty.msg('No need to rebuild {0}, found full hash match at: '.format(
job_spec_pkg_name))
for match in matches:
tty.msg(' {0}'.format(match['mirror_url']))
@@ -432,7 +403,7 @@ def ci_rebuild(args):
# Now we are done and successful
sys.exit(0)
# No hash match anywhere means we need to rebuild spec
# No full hash match anywhere means we need to rebuild spec
# Start with spack arguments
install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]
@@ -444,6 +415,7 @@ def ci_rebuild(args):
install_args.extend([
'install',
'--keep-stage',
'--require-full-hash-match',
])
can_verify = spack_ci.can_verify_binaries()
@@ -471,8 +443,8 @@ def ci_rebuild(args):
# TODO: once we have the concrete spec registry, use the DAG hash
# to identify the spec to install, rather than the concrete spec
# json file.
install_args.extend(['-f', job_spec_json_path])
# yaml file.
install_args.extend(['-f', job_spec_yaml_path])
tty.debug('Installing {0} from source'.format(job_spec.name))
tty.debug('spack install arguments: {0}'.format(
@@ -505,13 +477,13 @@ def ci_rebuild(args):
tty.debug('spack install exited {0}'.format(install_exit_code))
# If a spec fails to build in a spack develop pipeline, we add it to a
# list of known broken hashes. This allows spack PR pipelines to
# list of known broken full hashes. This allows spack PR pipelines to
# avoid wasting compute cycles attempting to build those hashes.
if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
tty.debug('Install failed on develop')
if 'broken-specs-url' in gitlab_ci:
broken_specs_url = gitlab_ci['broken-specs-url']
dev_fail_hash = job_spec.dag_hash()
dev_fail_hash = job_spec.full_hash()
broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
tty.msg('Reporting broken develop build as: {0}'.format(
broken_spec_path))
@@ -522,7 +494,7 @@ def ci_rebuild(args):
'broken-spec': {
'job-url': get_env_var('CI_JOB_URL'),
'pipeline-url': get_env_var('CI_PIPELINE_URL'),
'concrete-spec-dict': job_spec.to_dict(hash=ht.dag_hash)
'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
}
}
@@ -548,6 +520,13 @@ def ci_rebuild(args):
# any logs from the staging directory to artifacts now
spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)
# Create buildcache on remote mirror, either on pr-specific mirror or
# on the main mirror defined in the gitlab-enabled spack environment
if spack_is_pr_pipeline:
buildcache_mirror_url = pr_mirror_url
else:
buildcache_mirror_url = remote_mirror_url
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
@@ -560,7 +539,7 @@ def ci_rebuild(args):
# per-PR mirror, if this is a PR pipeline
if buildcache_mirror_url:
spack_ci.push_mirror_contents(
env, job_spec_json_path, buildcache_mirror_url, sign_binaries
env, job_spec_yaml_path, buildcache_mirror_url, sign_binaries
)
# Create another copy of that buildcache in the per-pipeline
@@ -569,14 +548,14 @@ def ci_rebuild(args):
# prefix is set)
if pipeline_mirror_url:
spack_ci.push_mirror_contents(
env, job_spec_json_path, pipeline_mirror_url, sign_binaries
env, job_spec_yaml_path, pipeline_mirror_url, sign_binaries
)
# If this is a develop pipeline, check if the spec that we just built is
# on the broken-specs list. If so, remove it.
if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
broken_specs_url = gitlab_ci['broken-specs-url']
just_built_hash = job_spec.dag_hash()
just_built_hash = job_spec.full_hash()
broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
if web_util.url_exists(broken_spec_path):
tty.msg('Removing {0} from the list of broken specs'.format(

View File

@@ -382,9 +382,14 @@ def add_concretizer_args(subparser):
)
subgroup.add_argument(
'--reuse', action=ConfigSetAction, dest="concretizer:reuse",
const=True, default=None,
const="any", default=None,
help='reuse installed dependencies/buildcaches when possible'
)
subgroup.add_argument(
'--reuse-only', action=ConfigSetAction, dest="concretizer:reuse",
const=True, default=None,
help='operate as a binary package manager'
)
def add_s3_connection_args(subparser, add_help):

View File

@@ -18,8 +18,6 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['clean', 'dirty'])
arguments.add_concretizer_args(subparser)
subparser.add_argument(
'--dump', metavar="FILE",
help="dump a source-able environment to FILE"

View File

@@ -22,9 +22,6 @@ def setup_parser(subparser):
help="""Concretize with test dependencies. When 'root' is chosen, test
dependencies are only added for the environment's root specs. When 'all' is
chosen, test dependencies are enabled for all packages in the environment.""")
subparser.add_argument(
'-q', '--quiet', action='store_true',
help="Don't print concretized specs")
spack.cmd.common.arguments.add_concretizer_args(subparser)
@@ -41,6 +38,5 @@ def concretize(parser, args):
with env.write_transaction():
concretized_specs = env.concretize(force=args.force, tests=tests)
if not args.quiet:
ev.display_specs(concretized_specs)
ev.display_specs(concretized_specs)
env.write()

View File

@@ -187,27 +187,6 @@ def cmake_args(self):
return args"""
class LuaPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for LuaRocks-based packages"""
base_class_name = 'LuaPackage'
body_def = """\
def luarocks_args(self):
# FIXME: Add arguments to `luarocks make` other than rockspec path
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name lua-lpeg`, don't rename it lua-lua-lpeg
if not name.startswith('lua-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to lua-{0}".format(name))
name = 'lua-{0}'.format(name)
super(LuaPackageTemplate, self).__init__(name, url, *args, **kwargs)
class MesonPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for meson-based packages"""
@@ -601,7 +580,6 @@ def __init__(self, name, *args, **kwargs):
'makefile': MakefilePackageTemplate,
'intel': IntelPackageTemplate,
'meson': MesonPackageTemplate,
'lua': LuaPackageTemplate,
'sip': SIPPackageTemplate,
'generic': PackageTemplate,
}
@@ -666,9 +644,6 @@ def __call__(self, stage, url):
if url.endswith('.whl') or '.whl#' in url:
self.build_system = 'python'
return
if url.endswith('.rock'):
self.build_system = 'lua'
return
# A list of clues that give us an idea of the build system a package
# uses. If the regular expression matches a file contained in the
@@ -693,7 +668,6 @@ def __call__(self, stage, url):
(r'/Rakefile$', 'ruby'),
(r'/setup\.rb$', 'ruby'),
(r'/.*\.pro$', 'qmake'),
(r'/.*\.rockspec$', 'lua'),
(r'/(GNU)?[Mm]akefile$', 'makefile'),
(r'/DESCRIPTION$', 'octave'),
(r'/meson\.build$', 'meson'),

View File

@@ -91,8 +91,8 @@ def dev_build(self, args):
spec.concretize()
package = spack.repo.get(spec)
if spec.installed:
tty.error("Already installed in %s" % spec.prefix)
if package.installed:
tty.error("Already installed in %s" % package.prefix)
tty.msg("Uninstall or try adding a version suffix for this dev build.")
sys.exit(1)

View File

@@ -68,14 +68,8 @@ def compare_specs(a, b, to_string=False, color=None):
# Prepare a solver setup to parse differences
setup = asp.SpackSolverSetup()
# get facts for specs, making sure to include build dependencies of concrete
# specs and to descend into dependency hashes so we include all facts.
a_facts = set(t for t in setup.spec_clauses(
a, body=True, expand_hashes=True, concrete_build_deps=True,
))
b_facts = set(t for t in setup.spec_clauses(
b, body=True, expand_hashes=True, concrete_build_deps=True,
))
a_facts = set(t for t in setup.spec_clauses(a, body=True, expand_hashes=True))
b_facts = set(t for t in setup.spec_clauses(b, body=True, expand_hashes=True))
# We want to present them to the user as simple key: values
intersect = sorted(a_facts.intersection(b_facts))

View File

@@ -8,8 +8,6 @@
import sys
import tempfile
import six
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -43,8 +41,7 @@
'loads',
'view',
'update',
'revert',
'depfile'
'revert'
]
@@ -526,154 +523,6 @@ def env_revert(args):
tty.msg(msg.format(manifest_file))
def env_depfile_setup_parser(subparser):
"""generate a depfile from the concrete environment specs"""
subparser.add_argument(
'--make-target-prefix', default=None, metavar='TARGET',
help='prefix Makefile targets with <TARGET>/<name>. By default the absolute '
'path to the directory makedeps under the environment metadata dir is '
'used. Can be set to an empty string --make-target-prefix \'\'.')
subparser.add_argument(
'--make-disable-jobserver', default=True, action='store_false',
dest='jobserver', help='disable POSIX jobserver support.')
subparser.add_argument(
'-o', '--output', default=None, metavar='FILE',
help='write the depfile to FILE rather than to stdout')
subparser.add_argument(
'-G', '--generator', default='make', choices=('make',),
help='specify the depfile type. Currently only make is supported.')
def env_depfile(args):
# Currently only make is supported.
spack.cmd.require_active_env(cmd_name='env depfile')
env = ev.active_environment()
# Maps each hash in the environment to a string of install prereqs
hash_to_prereqs = {}
hash_to_spec = {}
if args.make_target_prefix is None:
target_prefix = os.path.join(env.env_subdir_path, 'makedeps')
else:
target_prefix = args.make_target_prefix
def get_target(name):
# The `all`, `fetch` and `clean` targets are phony. It doesn't make sense to
# have /abs/path/to/env/metadir/{all,clean} targets. But it *does* make
# sense to have a prefix like `env/all`, `env/fetch`, `env/clean` when they are
# supposed to be included
if name in ('all', 'fetch-all', 'clean') and os.path.isabs(target_prefix):
return name
else:
return os.path.join(target_prefix, name)
def get_install_target(name):
return os.path.join(target_prefix, '.install', name)
def get_fetch_target(name):
return os.path.join(target_prefix, '.fetch', name)
for _, spec in env.concretized_specs():
for s in spec.traverse(root=True):
hash_to_spec[s.dag_hash()] = s
hash_to_prereqs[s.dag_hash()] = [
get_install_target(dep.dag_hash()) for dep in s.dependencies()]
root_dags = [s.dag_hash() for _, s in env.concretized_specs()]
# Root specs without deps are the prereqs for the environment target
root_install_targets = [get_install_target(h) for h in root_dags]
# All package install targets, not just roots.
all_install_targets = [get_install_target(h) for h in hash_to_spec.keys()]
# Fetch targets for all packages in the environment, not just roots.
all_fetch_targets = [get_fetch_target(h) for h in hash_to_spec.keys()]
buf = six.StringIO()
buf.write("""SPACK ?= spack
.PHONY: {} {} {}
{}: {}
{}: {}
{}: {}
\t@touch $@
{}: {}
\t@touch $@
{}:
\t@mkdir -p {} {}
{}: | {}
\t$(info Fetching $(SPEC))
\t$(SPACK) -e '{}' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@
{}: {}
\t$(info Installing $(SPEC))
\t{}$(SPACK) -e '{}' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package \
--no-add /$(notdir $@) && touch $@
""".format(get_target('all'), get_target('fetch-all'), get_target('clean'),
get_target('all'), get_target('env'),
get_target('fetch-all'), get_target('fetch'),
get_target('env'), ' '.join(root_install_targets),
get_target('fetch'), ' '.join(all_fetch_targets),
get_target('dirs'), get_target('.fetch'), get_target('.install'),
get_target('.fetch/%'), get_target('dirs'),
env.path,
get_target('.install/%'), get_target('.fetch/%'),
'+' if args.jobserver else '', env.path))
# Targets are of the form <prefix>/<name>: [<prefix>/<depname>]...,
# The prefix can be an empty string, in that case we don't add the `/`.
# The name is currently the dag hash of the spec. In principle it
# could be the package name in case of `concretization: together` so
# it can be more easily referred to, but for now we don't special case
# this.
fmt = '{name}{@version}{%compiler}{variants}{arch=architecture}'
# Set SPEC for each hash
buf.write('# Set the human-readable spec for each target\n')
for dag_hash in hash_to_prereqs.keys():
formatted_spec = hash_to_spec[dag_hash].format(fmt)
buf.write("{}: SPEC = {}\n".format(get_target('%/' + dag_hash), formatted_spec))
buf.write('\n')
# Set install dependencies
buf.write('# Install dependencies\n')
for parent, children in hash_to_prereqs.items():
if not children:
continue
buf.write('{}: {}\n'.format(get_install_target(parent), ' '.join(children)))
buf.write('\n')
# Clean target: remove target files but not their folders, cause
# --make-target-prefix can be any existing directory we do not control,
# including empty string (which means deleting the containing folder
# would delete the folder with the Makefile)
buf.write("{}:\n\trm -f -- {} {} {} {}\n".format(
get_target('clean'),
get_target('env'),
get_target('fetch'),
' '.join(all_fetch_targets),
' '.join(all_install_targets)))
makefile = buf.getvalue()
# Finally write to stdout/file.
if args.output:
with open(args.output, 'w') as f:
f.write(makefile)
else:
sys.stdout.write(makefile)
#: Dictionary mapping subcommand names and aliases to functions
subcommand_functions = {}

View File

@@ -5,8 +5,6 @@
from __future__ import print_function
import argparse
import errno
import os
import sys
import llnl.util.tty as tty
@@ -15,7 +13,6 @@
import spack
import spack.cmd
import spack.cmd.common.arguments
import spack.cray_manifest as cray_manifest
import spack.detection
import spack.error
import spack.util.environment
@@ -38,9 +35,6 @@ def setup_parser(subparser):
find_parser.add_argument(
'--not-buildable', action='store_true', default=False,
help="packages with detected externals won't be built with Spack")
find_parser.add_argument(
'-p', '--path', default=None, action='append',
help="Alternative search paths for finding externals. May be repeated")
find_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope('packages'),
@@ -61,55 +55,8 @@ def setup_parser(subparser):
'list', help='list detectable packages, by repository and name'
)
read_cray_manifest = sp.add_parser(
'read-cray-manifest', help=(
"consume a Spack-compatible description of externally-installed "
"packages, including dependency relationships"
)
)
read_cray_manifest.add_argument(
'--file', default=None,
help="specify a location other than the default")
read_cray_manifest.add_argument(
'--directory', default=None,
help="specify a directory storing a group of manifest files")
read_cray_manifest.add_argument(
'--dry-run', action='store_true', default=False,
help="don't modify DB with files that are read")
read_cray_manifest.add_argument(
'--fail-on-error', action='store_true',
help=("if a manifest file cannot be parsed, fail and report the "
"full stack trace")
)
def external_find(args):
if args.all or not (args.tags or args.packages):
# If the user calls 'spack external find' with no arguments, and
# this system has a description of installed packages, then we should
# consume it automatically.
try:
_collect_and_consume_cray_manifest_files()
except NoManifestFileError:
# It's fine to not find any manifest file if we are doing the
# search implicitly (i.e. as part of 'spack external find')
pass
except Exception as e:
# For most exceptions, just print a warning and continue.
# Note that KeyboardInterrupt does not subclass Exception
# (so CTRL-C will terminate the program as expected).
skip_msg = ("Skipping manifest and continuing with other external "
"checks")
if ((isinstance(e, IOError) or isinstance(e, OSError)) and
e.errno in [errno.EPERM, errno.EACCES]):
# The manifest file does not have sufficient permissions enabled:
# print a warning and keep going
tty.warn("Unable to read manifest due to insufficient "
"permissions.", skip_msg)
else:
tty.warn("Unable to read manifest, unexpected error: {0}"
.format(str(e)), skip_msg)
# If the user didn't specify anything, search for build tools by default
if not args.tags and not args.all and not args.packages:
args.tags = ['core-packages', 'build-tools']
@@ -141,12 +88,10 @@ def external_find(args):
# If the list of packages is empty, search for every possible package
if not args.tags and not packages_to_check:
packages_to_check = list(spack.repo.path.all_packages())
packages_to_check = spack.repo.path.all_packages()
detected_packages = spack.detection.by_executable(
packages_to_check, path_hints=args.path)
detected_packages.update(spack.detection.by_library(
packages_to_check, path_hints=args.path))
detected_packages = spack.detection.by_executable(packages_to_check)
detected_packages.update(spack.detection.by_library(packages_to_check))
new_entries = spack.detection.update_configuration(
detected_packages, scope=args.scope, buildable=not args.not_buildable
@@ -161,60 +106,6 @@ def external_find(args):
tty.msg('No new external packages detected')
def external_read_cray_manifest(args):
_collect_and_consume_cray_manifest_files(
manifest_file=args.file,
manifest_directory=args.directory,
dry_run=args.dry_run,
fail_on_error=args.fail_on_error
)
def _collect_and_consume_cray_manifest_files(
manifest_file=None, manifest_directory=None, dry_run=False,
fail_on_error=False):
manifest_files = []
if manifest_file:
manifest_files.append(manifest_file)
manifest_dirs = []
if manifest_directory:
manifest_dirs.append(manifest_directory)
if os.path.isdir(cray_manifest.default_path):
tty.debug(
"Cray manifest path {0} exists: collecting all files to read."
.format(cray_manifest.default_path))
manifest_dirs.append(cray_manifest.default_path)
else:
tty.debug("Default Cray manifest directory {0} does not exist."
.format(cray_manifest.default_path))
for directory in manifest_dirs:
for fname in os.listdir(directory):
if fname.endswith('.json'):
fpath = os.path.join(directory, fname)
tty.debug("Adding manifest file: {0}".format(fpath))
manifest_files.append(os.path.join(directory, fpath))
if not manifest_files:
raise NoManifestFileError(
"--file/--directory not specified, and no manifest found at {0}"
.format(cray_manifest.default_path))
for path in manifest_files:
tty.debug("Reading manifest file: " + path)
try:
cray_manifest.read(path, not dry_run)
except (spack.compilers.UnknownCompilerError, spack.error.SpackError) as e:
if fail_on_error:
raise
else:
tty.warn("Failure reading manifest file: {0}"
"\n\t{1}".format(path, str(e)))
def external_list(args):
# Trigger a read of all packages, might take a long time.
list(spack.repo.path.all_packages())
@@ -226,10 +117,5 @@ def external_list(args):
def external(parser, args):
action = {'find': external_find, 'list': external_list,
'read-cray-manifest': external_read_cray_manifest}
action = {'find': external_find, 'list': external_list}
action[args.external_command](args)
class NoManifestFileError(spack.error.SpackError):
pass

View File

@@ -69,10 +69,14 @@ def fetch(parser, args):
for spec in specs:
if args.missing or args.dependencies:
for s in spec.traverse(root=False):
for s in spec.traverse():
package = spack.repo.get(s)
# Skip already-installed packages with --missing
if args.missing and s.installed:
if args.missing and package.installed:
continue
s.package.do_fetch()
spec.package.do_fetch()
package.do_fetch()
package = spack.repo.get(spec)
package.do_fetch()

View File

@@ -184,9 +184,8 @@ def print_detectable(pkg):
color.cprint('')
color.cprint(section_title('Externally Detectable: '))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, 'executables') or hasattr(pkg, 'libraries'):
# If the package has an 'executables' field, it can detect an installation
if hasattr(pkg, 'executables'):
find_attributes = []
if hasattr(pkg, 'determine_version'):
find_attributes.append('version')

View File

@@ -47,6 +47,7 @@ def update_kwargs_from_args(args, kwargs):
'explicit': True, # Always true for install command
'stop_at': args.until,
'unsigned': args.unsigned,
'full_hash_match': args.full_hash_match,
})
kwargs.update({
@@ -116,6 +117,11 @@ def setup_parser(subparser):
'--no-check-signature', action='store_true',
dest='unsigned', default=False,
help="do not check signatures of binary packages")
subparser.add_argument(
'--require-full-hash-match', action='store_true',
dest='full_hash_match', default=False, help="""when installing from
binary mirrors, do not install binary package unless the full hash of the
remote spec matches that of the local spec""")
subparser.add_argument(
'--show-log-on-error', action='store_true',
help="print full build log to stderr if build fails")
@@ -153,6 +159,10 @@ def setup_parser(subparser):
if 'all' is chosen, run package tests during installation for all
packages. If neither are chosen, don't run tests for any packages."""
)
testing.add_argument(
'--run-tests', action='store_true',
help='run package tests during installation (same as --test=all)'
)
subparser.add_argument(
'--log-format',
default=None,
@@ -306,8 +316,11 @@ def install(parser, args, **kwargs):
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all':
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
@@ -464,7 +477,7 @@ def get_tests(specs):
})
# If we are using the monitor, we send configs. and create build
# The dag_hash is the main package id
# The full_hash is the main package id, the build_hash for others
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))

View File

@@ -273,7 +273,7 @@ def refresh(module_type, specs, args):
return
if not args.upstream_modules:
specs = list(s for s in specs if not s.installed_upstream)
specs = list(s for s in specs if not s.package.installed_upstream)
if not args.yes_to_all:
msg = 'You are about to regenerate {types} module files for:\n'

View File

@@ -15,8 +15,6 @@
import spack
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.config
import spack.environment
import spack.hash_types as ht
import spack.package
import spack.solver.asp as asp
@@ -76,51 +74,6 @@ def setup_parser(subparser):
spack.cmd.common.arguments.add_concretizer_args(subparser)
def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers)
if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint(
"@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")
)
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
color.cprint(
fmt % (
i,
name,
"-" if build_cost is None else installed_cost,
installed_cost if build_cost is None else build_cost,
)
)
print()
# dump the solutions as concretized specs
if 'solutions' in show:
for spec in result.specs:
# With -y, just print YAML to output.
if required_format == 'yaml':
# use write because to_yaml already has a newline.
sys.stdout.write(spec.to_yaml(hash=ht.dag_hash))
elif required_format == 'json':
sys.stdout.write(spec.to_json(hash=ht.dag_hash))
else:
sys.stdout.write(
spec.tree(color=sys.stdout.isatty(), **kwargs))
print()
if result.unsolved_specs and "solutions" in show:
tty.msg("Unsolved specs")
for spec in result.unsolved_specs:
print(spec)
print()
def solve(parser, args):
# these are the same options as `spack spec`
name_fmt = '{namespace}.{name}' if args.namespaces else '{name}'
@@ -149,42 +102,58 @@ def solve(parser, args):
if models < 0:
tty.die("model count must be non-negative: %d")
# Format required for the output (JSON, YAML or None)
required_format = args.format
# If we have an active environment, pick the specs from there
env = spack.environment.active_environment()
if env and args.specs:
msg = "cannot give explicit specs when an environment is active"
raise RuntimeError(msg)
specs = list(env.user_specs) if env else spack.cmd.parse_specs(args.specs)
specs = spack.cmd.parse_specs(args.specs)
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
solver = asp.Solver()
output = sys.stdout if "asp" in show else None
setup_only = set(show) == {'asp'}
unify = spack.config.get('concretizer:unify')
if unify != 'when_possible':
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
result = solver.solve(
specs,
out=output,
models=models,
timers=args.timers,
stats=args.stats,
setup_only=setup_only
)
if not setup_only:
_process_result(result, show, required_format, kwargs)
else:
for idx, result in enumerate(solver.solve_in_rounds(
specs, out=output, models=models, timers=args.timers, stats=args.stats
)):
if "solutions" in show:
tty.msg("ROUND {0}".format(idx))
tty.msg("")
result = solver.solve(
specs,
out=output,
models=models,
timers=args.timers,
stats=args.stats,
setup_only=(set(show) == {'asp'})
)
if 'solutions' not in show:
return
# die if no solution was found
result.raise_if_unsat()
# show the solutions as concretized specs
if 'solutions' in show:
opt, _, _ = min(result.answers)
if ("opt" in show) and (not args.format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint(
"@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")
)
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (idx, build_idx, name) in enumerate(result.criteria, 1):
color.cprint(
fmt % (
i,
name,
"-" if build_idx is None else opt[idx],
opt[idx] if build_idx is None else opt[build_idx],
)
)
print()
for spec in result.specs:
# With -y, just print YAML to output.
if args.format == 'yaml':
# use write because to_yaml already has a newline.
sys.stdout.write(spec.to_yaml(hash=ht.build_hash))
elif args.format == 'json':
sys.stdout.write(spec.to_json(hash=ht.build_hash))
else:
print("% END ROUND {0}\n".format(idx))
if not setup_only:
_process_result(result, show, required_format, kwargs)
sys.stdout.write(
spec.tree(color=sys.stdout.isatty(), **kwargs))

View File

@@ -34,16 +34,12 @@ def setup_parser(subparser):
arguments.add_common_arguments(
subparser, ['long', 'very_long', 'install_status']
)
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
subparser.add_argument(
'-y', '--yaml', action='store_const', dest='format', default=None,
const='yaml', help='print concrete spec as YAML')
format_group.add_argument(
subparser.add_argument(
'-j', '--json', action='store_const', dest='format', default=None,
const='json', help='print concrete spec as JSON')
format_group.add_argument(
'--format', action='store', default=None,
help='print concrete spec with the specified format string')
subparser.add_argument(
'-c', '--cover', action='store',
default='nodes', choices=['nodes', 'edges', 'paths'],
@@ -51,6 +47,10 @@ def setup_parser(subparser):
subparser.add_argument(
'-N', '--namespaces', action='store_true', default=False,
help='show fully qualified package names')
subparser.add_argument(
'--hash-type', default="build_hash",
choices=['build_hash', 'full_hash', 'dag_hash'],
help='generate spec with a particular hash type.')
subparser.add_argument(
'-t', '--types', action='store_true', default=False,
help='show dependency types')
@@ -80,8 +80,7 @@ def spec(parser, args):
# Use command line specified specs, otherwise try to use environment specs.
if args.specs:
input_specs = spack.cmd.parse_specs(args.specs)
concretized_specs = spack.cmd.parse_specs(args.specs, concretize=True)
specs = list(zip(input_specs, concretized_specs))
specs = [(s, s.concretized()) for s in input_specs]
else:
env = ev.active_environment()
if env:
@@ -93,13 +92,14 @@ def spec(parser, args):
for (input, output) in specs:
# With -y, just print YAML to output.
if args.format:
# The user can specify the hash type to use
hash_type = getattr(ht, args.hash_type)
if args.format == 'yaml':
# use write because to_yaml already has a newline.
sys.stdout.write(output.to_yaml(hash=ht.dag_hash))
elif args.format == 'json':
print(output.to_json(hash=ht.dag_hash))
sys.stdout.write(output.to_yaml(hash=hash_type))
else:
print(output.format(args.format))
print(output.to_json(hash=hash_type))
continue
with tree_context():

View File

@@ -27,6 +27,12 @@ def setup_parser(subparser):
def stage(parser, args):
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
if custom_path:
spack.stage.create_stage_root(custom_path)
if not args.specs:
env = ev.active_environment()
if env:
@@ -48,10 +54,6 @@ def stage(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=False)
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
# prevent multiple specs from extracting in the same folder
if len(specs) > 1 and custom_path:
tty.die("`--path` requires a single spec, but multiple were provided")

View File

@@ -337,8 +337,6 @@ def _report_suite_results(test_suite, args, constraints):
pkg_id, status = line.split()
results[pkg_id] = status
tty.msg('test specs:')
failed, skipped, untested = 0, 0, 0
for pkg_id in test_specs:
if pkg_id in results:

View File

@@ -24,7 +24,7 @@
# tutorial configuration parameters
tutorial_branch = "releases/v0.18"
tutorial_branch = "releases/v%d.%d" % spack.spack_version_info[:2]
tutorial_mirror = "file:///mirror"
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")

View File

@@ -62,14 +62,9 @@ def setup_parser(subparser):
'-a', '--all', action='store_true', dest='all',
help="remove ALL installed packages that match each supplied spec"
)
subparser.add_argument(
'--origin', dest='origin',
help="only remove DB records with the specified origin"
)
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
origin=None):
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
"""Returns a list of specs matching the not necessarily
concretized specs given from cli
@@ -90,8 +85,8 @@ def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
has_errors = False
for spec in specs:
install_query = [InstallStatuses.INSTALLED, InstallStatuses.DEPRECATED]
matching = spack.store.db.query_local(
spec, hashes=hashes, installed=install_query, origin=origin)
matching = spack.store.db.query_local(spec, hashes=hashes,
installed=install_query)
# For each spec provided, make sure it refers to only one package.
# Fail and ask user to be unambiguous if it doesn't
if not allow_multiple_matches and len(matching) > 1:
@@ -225,25 +220,15 @@ def do_uninstall(env, specs, force):
# A package is ready to be uninstalled when nothing else references it,
# unless we are requested to force uninstall it.
def is_ready(dag_hash):
if force:
return True
_, record = spack.store.db.query_by_spec_hash(dag_hash)
if not record.ref_count:
return True
# If this spec is only used as a build dependency, we can uninstall
return all(
dspec.deptypes == ("build",) or not dspec.parent.installed
for dspec in record.spec.edges_from_dependents()
)
is_ready = lambda x: not spack.store.db.query_by_spec_hash(x)[1].ref_count
if force:
is_ready = lambda x: True
while packages:
ready = [x for x in packages if is_ready(x.spec.dag_hash())]
if not ready:
msg = 'unexpected error [cannot proceed uninstalling specs with' \
' remaining link or run dependents {0}]'
' remaining dependents {0}]'
msg = msg.format(', '.join(x.name for x in packages))
raise spack.error.SpackError(msg)
@@ -255,8 +240,7 @@ def is_ready(dag_hash):
def get_uninstall_list(args, specs, env):
# Gets the list of installed specs that match the ones give via cli
# args.all takes care of the case where '-a' is given in the cli
uninstall_list = find_matching_specs(env, specs, args.all, args.force,
args.origin)
uninstall_list = find_matching_specs(env, specs, args.all, args.force)
# Takes care of '-R'
active_dpts, inactive_dpts = installed_dependents(uninstall_list, env)

View File

@@ -252,13 +252,6 @@ def find_new_compilers(path_hints=None, scope=None):
merged configuration.
"""
compilers = find_compilers(path_hints)
return select_new_compilers(compilers, scope)
def select_new_compilers(compilers, scope=None):
"""Given a list of compilers, remove those that are already defined in
the configuration.
"""
compilers_not_in_config = []
for c in compilers:
arch_spec = spack.spec.ArchSpec((None, c.operating_system, c.target))
@@ -502,8 +495,7 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
@llnl.util.lang.memoized
def class_for_compiler_name(compiler_name):
"""Given a compiler module name, get the corresponding Compiler class."""
if not supported(compiler_name):
raise UnknownCompilerError(compiler_name)
assert supported(compiler_name)
# Hack to be able to call the compiler `apple-clang` while still
# using a valid python name for the module
@@ -773,8 +765,7 @@ def name_matches(name, name_list):
toolchains.add(compiler_cls.__name__)
if len(toolchains) > 1:
if toolchains == set(['Clang', 'AppleClang', 'Aocc']) or \
toolchains == set(['Dpcpp', 'Oneapi']):
if toolchains == set(['Clang', 'AppleClang', 'Aocc']):
return False
tty.debug("[TOOLCHAINS] {0}".format(toolchains))
return True
@@ -797,13 +788,6 @@ def __init__(self):
"Spack could not find any compilers!")
class UnknownCompilerError(spack.error.SpackError):
def __init__(self, compiler_name):
super(UnknownCompilerError, self).__init__(
"Spack doesn't support the requested compiler: {0}"
.format(compiler_name))
class NoCompilerForSpecError(spack.error.SpackError):
def __init__(self, compiler_spec, target):
super(NoCompilerForSpecError, self).__init__(

View File

@@ -78,8 +78,10 @@ def cxx14_flag(self):
self, "the C++14 standard", "cxx14_flag", "< 4.8")
elif self.real_version < ver('4.9'):
return "-std=c++1y"
else:
elif self.real_version < ver('6.0'):
return "-std=c++14"
else:
return ""
@property
def cxx17_flag(self):

View File

@@ -88,7 +88,7 @@
#: Path to the default configuration
configuration_defaults_path = (
'defaults', os.path.join(spack.paths.etc_path, 'defaults')
'defaults', os.path.join(spack.paths.etc_path, 'spack', 'defaults')
)
#: Hard-coded default values for some key configuration options.
@@ -104,13 +104,12 @@
'build_jobs': min(16, cpus_available()),
'build_stage': '$tempdir/spack-stage',
'concretizer': 'clingo',
'license_dir': spack.paths.default_license_dir,
}
}
#: metavar to use for commands that accept scopes
#: this is shorter and more readable than listing all choices
scopes_metavar = '{defaults,system,site,user}[/PLATFORM] or env:ENVIRONMENT'
scopes_metavar = '{defaults,system,site,user}[/PLATFORM]'
#: Base name for the (internal) overrides scope.
overrides_base_name = 'overrides-'
@@ -816,7 +815,7 @@ def _config():
# Site configuration is per spack instance, for sites or projects
# No site-level configs should be checked into spack by default.
configuration_paths.append(
('site', os.path.join(spack.paths.etc_path)),
('site', os.path.join(spack.paths.etc_path, 'spack')),
)
# User configuration can override both spack defaults and site config

View File

@@ -171,12 +171,11 @@ def strip(self):
def paths(self):
"""Important paths in the image"""
Paths = collections.namedtuple('Paths', [
'environment', 'store', 'hidden_view', 'view'
'environment', 'store', 'view'
])
return Paths(
environment='/opt/spack-environment',
store='/opt/software',
hidden_view='/opt/._view',
view='/opt/view'
)

View File

@@ -1,192 +0,0 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import jsonschema
import six
import llnl.util.tty as tty
import spack.cmd
import spack.hash_types as hash_types
from spack.schema.cray_manifest import schema as manifest_schema
#: Cray systems can store a Spack-compatible description of system
#: packages here.
default_path = '/opt/cray/pe/cpe-descriptive-manifest/'
compiler_name_translation = {
'nvidia': 'nvhpc',
'rocm': 'rocmcc',
}
def translated_compiler_name(manifest_compiler_name):
"""
When creating a Compiler object, Spack expects a name matching
one of the classes in `spack.compilers`. Names in the Cray manifest
may differ; for cases where we know the name refers to a compiler in
Spack, this function translates it automatically.
This function will raise an error if there is no recorded translation
and the name doesn't match a known compiler name.
"""
if manifest_compiler_name in compiler_name_translation:
return compiler_name_translation[manifest_compiler_name]
elif manifest_compiler_name in spack.compilers.supported_compilers():
return manifest_compiler_name
else:
raise spack.compilers.UnknownCompilerError(
"Manifest parsing - unknown compiler: {0}"
.format(manifest_compiler_name))
def compiler_from_entry(entry):
compiler_name = translated_compiler_name(entry['name'])
paths = entry['executables']
version = entry['version']
arch = entry['arch']
operating_system = arch['os']
target = arch['target']
compiler_cls = spack.compilers.class_for_compiler_name(compiler_name)
spec = spack.spec.CompilerSpec(compiler_cls.name, version)
paths = [paths.get(x, None) for x in ('cc', 'cxx', 'f77', 'fc')]
return compiler_cls(
spec, operating_system, target, paths
)
def spec_from_entry(entry):
arch_str = ""
if 'arch' in entry:
arch_format = "arch={platform}-{os}-{target}"
arch_str = arch_format.format(
platform=entry['arch']['platform'],
os=entry['arch']['platform_os'],
target=entry['arch']['target']['name']
)
compiler_str = ""
if 'compiler' in entry:
compiler_format = "%{name}@{version}"
compiler_str = compiler_format.format(
name=translated_compiler_name(entry['compiler']['name']),
version=entry['compiler']['version']
)
spec_format = "{name}@{version} {compiler} {arch}"
spec_str = spec_format.format(
name=entry['name'],
version=entry['version'],
compiler=compiler_str,
arch=arch_str
)
package = spack.repo.get(entry['name'])
if 'parameters' in entry:
variant_strs = list()
for name, value in entry['parameters'].items():
# TODO: also ensure that the variant value is valid?
if not (name in package.variants):
tty.debug("Omitting variant {0} for entry {1}/{2}"
.format(name, entry['name'], entry['hash'][:7]))
continue
# Value could be a list (of strings), boolean, or string
if isinstance(value, six.string_types):
variant_strs.append('{0}={1}'.format(name, value))
else:
try:
iter(value)
variant_strs.append(
'{0}={1}'.format(name, ','.join(value)))
continue
except TypeError:
# Not an iterable
pass
# At this point not a string or collection, check for boolean
if value in [True, False]:
bool_symbol = '+' if value else '~'
variant_strs.append('{0}{1}'.format(bool_symbol, name))
else:
raise ValueError(
"Unexpected value for {0} ({1}): {2}".format(
name, str(type(value)), str(value)
)
)
spec_str += ' ' + ' '.join(variant_strs)
spec, = spack.cmd.parse_specs(spec_str.split())
for ht in [hash_types.dag_hash, hash_types.build_hash,
hash_types.full_hash]:
setattr(spec, ht.attr, entry['hash'])
spec._concrete = True
spec._hashes_final = True
spec.external_path = entry['prefix']
spec.origin = 'external-db'
spack.spec.Spec.ensure_valid_variants(spec)
return spec
def entries_to_specs(entries):
spec_dict = {}
for entry in entries:
try:
spec = spec_from_entry(entry)
spec_dict[spec._hash] = spec
except spack.repo.UnknownPackageError:
tty.debug("Omitting package {0}: no corresponding repo package"
.format(entry['name']))
except spack.error.SpackError:
raise
except Exception:
tty.warn("Could not parse entry: " + str(entry))
for entry in filter(lambda x: 'dependencies' in x, entries):
dependencies = entry['dependencies']
for name, properties in dependencies.items():
dep_hash = properties['hash']
deptypes = properties['type']
if dep_hash in spec_dict:
if entry['hash'] not in spec_dict:
continue
parent_spec = spec_dict[entry['hash']]
dep_spec = spec_dict[dep_hash]
parent_spec._add_dependency(dep_spec, deptypes)
return spec_dict
def read(path, apply_updates):
with open(path, 'r') as json_file:
json_data = json.load(json_file)
jsonschema.validate(json_data, manifest_schema)
specs = entries_to_specs(json_data['specs'])
tty.debug("{0}: {1} specs read from manifest".format(
path,
str(len(specs))))
compilers = list()
if 'compilers' in json_data:
compilers.extend(compiler_from_entry(x)
for x in json_data['compilers'])
tty.debug("{0}: {1} compilers read from manifest".format(
path,
str(len(compilers))))
# Filter out the compilers that already appear in the configuration
compilers = spack.compilers.select_new_compilers(compilers)
if apply_updates and compilers:
spack.compilers.add_compilers_to_config(
compilers, init_config=False)
if apply_updates:
for spec in specs.values():
spack.store.db.add(spec, directory_layout=None)

View File

@@ -91,8 +91,7 @@
_pkg_lock_timeout = None
# Types of dependencies tracked by the database
# We store by DAG hash, so we track the dependencies that the DAG hash includes.
_tracked_deps = ht.dag_hash.deptype
_tracked_deps = ('link', 'run')
# Default list of fields written for each install record
default_install_record_fields = [
@@ -188,7 +187,6 @@ def __init__(
installation_time=None,
deprecated_for=None,
in_buildcache=False,
origin=None
):
self.spec = spec
self.path = str(path) if path else None
@@ -198,7 +196,6 @@ def __init__(
self.installation_time = installation_time or _now()
self.deprecated_for = deprecated_for
self.in_buildcache = in_buildcache
self.origin = origin
def install_type_matches(self, installed):
installed = InstallStatuses.canonicalize(installed)
@@ -220,9 +217,6 @@ def to_dict(self, include_fields=default_install_record_fields):
else:
rec_dict.update({field_name: getattr(self, field_name)})
if self.origin:
rec_dict['origin'] = self.origin
return rec_dict
@classmethod
@@ -356,10 +350,10 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
self.prefix_fail_path = os.path.join(self._db_dir, 'prefix_failures')
# Create needed directories and files
if not is_upstream and not os.path.exists(self._db_dir):
if not os.path.exists(self._db_dir):
fs.mkdirp(self._db_dir)
if not is_upstream and not os.path.exists(self._failure_dir):
if not os.path.exists(self._failure_dir) and not is_upstream:
fs.mkdirp(self._failure_dir)
self.is_upstream = is_upstream
@@ -434,7 +428,7 @@ def _failed_spec_path(self, spec):
.format(spec.name))
return os.path.join(self._failure_dir,
'{0}-{1}'.format(spec.name, spec.dag_hash()))
'{0}-{1}'.format(spec.name, spec.full_hash()))
def clear_all_failures(self):
"""Force remove install failure tracking files."""
@@ -646,12 +640,8 @@ def _write_to_file(self, stream):
# TODO: fix this before we support multiple install locations.
database = {
'database': {
# TODO: move this to a top-level _meta section if we ever
# TODO: bump the DB version to 7
'version': str(_db_version),
# dictionary of installation records, keyed by DAG hash
'installs': installs,
'version': str(_db_version)
}
}
@@ -691,13 +681,6 @@ def db_for_spec_hash(self, hash_key):
return db
def query_by_spec_hash(self, hash_key, data=None):
"""Get a spec for hash, and whether it's installed upstream.
Return:
(tuple): (bool, optional InstallRecord): bool tells us whether
the spec is installed upstream. Its InstallRecord is also
returned if it's installed at all; otherwise None.
"""
if data and hash_key in data:
return False, data[hash_key]
if not data:
@@ -1064,7 +1047,9 @@ def _read(self):
self._state_is_inconsistent = False
return
elif self.is_upstream:
tty.warn('upstream not found: {0}'.format(self._index_path))
raise UpstreamDatabaseLockingError(
"No database index file is present, and upstream"
" databases cannot generate an index file")
def _add(
self,
@@ -1102,7 +1087,6 @@ def _add(
"Specs added to DB must be concrete.")
key = spec.dag_hash()
spec_pkg_hash = spec._package_hash
upstream, record = self.query_by_spec_hash(key)
if upstream:
return
@@ -1147,10 +1131,6 @@ def _add(
'explicit': explicit,
'installation_time': installation_time
}
# Commands other than 'spack install' may add specs to the DB,
# we can record the source of an installed Spec with 'origin'
if hasattr(spec, 'origin'):
extra_args['origin'] = spec.origin
self._data[key] = InstallRecord(
new_spec, path, installed, ref_count=0, **extra_args
)
@@ -1164,10 +1144,10 @@ def _add(
record.ref_count += 1
# Mark concrete once everything is built, and preserve
# the original hashes of concrete specs.
# the original hash of concrete specs.
new_spec._mark_concrete()
new_spec._hash = key
new_spec._package_hash = spec_pkg_hash
new_spec._full_hash = spec._full_hash
else:
# It is already in the database
@@ -1482,7 +1462,6 @@ def _query(
end_date=None,
hashes=None,
in_buildcache=any,
origin=None
):
"""Run a query on the database."""
@@ -1511,9 +1490,6 @@ def _query(
if hashes is not None and rec.spec.dag_hash() not in hashes:
continue
if origin and not (origin == rec.origin):
continue
if not rec.install_type_matches(installed):
continue
@@ -1607,12 +1583,11 @@ def unused_specs(self):
needed, visited = set(), set()
with self.read_transaction():
for key, rec in self._data.items():
if not rec.explicit:
continue
# recycle `visited` across calls to avoid redundantly traversing
for spec in rec.spec.traverse(visited=visited, deptype=("link", "run")):
needed.add(spec.dag_hash())
if rec.explicit:
# recycle `visited` across calls to avoid
# redundantly traversing
for spec in rec.spec.traverse(visited=visited):
needed.add(spec.dag_hash())
unused = [rec.spec for key, rec in self._data.items()
if key not in needed and rec.installed]

View File

@@ -74,8 +74,7 @@ def executables_in_path(path_hints=None):
def libraries_in_ld_library_path(path_hints=None):
"""Get the paths of all libraries available from LD_LIBRARY_PATH,
LIBRARY_PATH, DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH.
"""Get the paths of all libraries available from LD_LIBRARY_PATH.
For convenience, this is constructed as a dictionary where the keys are
the library paths and the values are the names of the libraries
@@ -86,15 +85,9 @@ def libraries_in_ld_library_path(path_hints=None):
Args:
path_hints (list): list of paths to be searched. If None the list will be
constructed based on the set of LD_LIBRARY_PATH, LIBRARY_PATH,
DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH environment
variables.
constructed based on the LD_LIBRARY_PATH environment variable.
"""
path_hints = path_hints or \
spack.util.environment.get_path('LIBRARY_PATH') + \
spack.util.environment.get_path('LD_LIBRARY_PATH') + \
spack.util.environment.get_path('DYLD_LIBRARY_PATH') + \
spack.util.environment.get_path('DYLD_FALLBACK_LIBRARY_PATH')
path_hints = path_hints or spack.util.environment.get_path('LD_LIBRARY_PATH')
search_paths = llnl.util.filesystem.search_paths_for_libraries(*path_hints)
path_to_lib = {}

View File

@@ -48,13 +48,13 @@ class OpenMpi(Package):
from spack.resource import Resource
from spack.version import Version, VersionChecksumError
__all__ = ['DirectiveError', 'DirectiveMeta', 'version', 'conflicts', 'depends_on',
'extends', 'provides', 'patch', 'variant', 'resource']
__all__ = ['DirectiveError', 'DirectiveMeta']
#: These are variant names used by Spack internally; packages can't use them
reserved_names = ['patches', 'dev_path']
#: Names of possible directives. This list is populated elsewhere in the file.
#: Names of possible directives. This list is populated elsewhere in the file and then
#: added to `__all__` at the bottom.
directive_names = []
_patch_order_index = 0
@@ -731,3 +731,7 @@ class DependencyPatchError(DirectiveError):
class UnsupportedPackageDirective(DirectiveError):
"""Raised when an invalid or unsupported package directive is specified."""
#: add all directive names to __all__
__all__.extend(directive_names)

View File

@@ -9,7 +9,6 @@
import posixpath
import re
import shutil
import sys
import tempfile
from contextlib import contextmanager
@@ -25,7 +24,6 @@
import spack.util.spack_json as sjson
from spack.error import SpackError
is_windows = sys.platform == 'win32'
# Note: Posixpath is used here as opposed to
# os.path.join due to spack.spec.Spec.format
# requiring forward slash path seperators at this stage
@@ -110,9 +108,13 @@ def write_spec(self, spec, path):
"""Write a spec out to a file."""
_check_concrete(spec)
with open(path, 'w') as f:
# The hash of the projection is the DAG hash which contains
# the full provenance, so it's availabe if we want it later
spec.to_json(f, hash=ht.dag_hash)
# The hash the the projection is the DAG hash but we write out the
# full provenance by full hash so it's availabe if we want it later
# extension = os.path.splitext(path)[-1].lower()
# if 'json' in extension:
spec.to_json(f, hash=ht.full_hash)
# elif 'yaml' in extension:
# spec.to_yaml(f, hash=ht.full_hash)
def write_host_environment(self, spec):
"""The host environment is a json file with os, kernel, and spack
@@ -238,10 +240,10 @@ def create_install_directory(self, spec):
def ensure_installed(self, spec):
"""
Throws InconsistentInstallDirectoryError if:
Throws DirectoryLayoutError if:
1. spec prefix does not exist
2. spec prefix does not contain a spec file, or
3. We read a spec with the wrong DAG hash out of an existing install directory.
2. spec prefix does not contain a spec file
3. the spec file does not correspond to the spec
"""
_check_concrete(spec)
path = self.path_for_spec(spec)
@@ -257,7 +259,25 @@ def ensure_installed(self, spec):
" " + path)
installed_spec = self.read_spec(spec_file_path)
if installed_spec.dag_hash() != spec.dag_hash():
if installed_spec == spec:
return
# DAG hashes currently do not include build dependencies.
#
# TODO: remove this when we do better concretization and don't
# ignore build-only deps in hashes.
elif (installed_spec.copy(deps=('link', 'run')) ==
spec.copy(deps=('link', 'run'))):
# The directory layout prefix is based on the dag hash, so among
# specs with differing full-hash but matching dag-hash, only one
# may be installed. This means for example that for two instances
# that differ only in CMake version used to build, only one will
# be installed.
return
if spec.dag_hash() == installed_spec.dag_hash():
raise SpecHashCollisionError(spec, installed_spec)
else:
raise InconsistentInstallDirectoryError(
'Spec file in %s does not match hash!' % spec_file_path)
@@ -329,14 +349,6 @@ def remove_install_directory(self, spec, deprecated=False):
path = self.path_for_spec(spec)
assert(path.startswith(self.root))
# Windows readonly files cannot be removed by Python
# directly, change permissions before attempting to remove
if is_windows:
kwargs = {'ignore_errors': False,
'onerror': fs.readonly_file_handler(ignore_errors=False)}
else:
kwargs = {} # the default value for ignore_errors is false
if deprecated:
if os.path.exists(path):
try:
@@ -345,9 +357,10 @@ def remove_install_directory(self, spec, deprecated=False):
os.remove(metapath)
except OSError as e:
raise six.raise_from(RemoveFailedError(spec, path, e), e)
elif os.path.exists(path):
try:
shutil.rmtree(path, **kwargs)
shutil.rmtree(path)
except OSError as e:
raise six.raise_from(RemoveFailedError(spec, path, e), e)
@@ -445,8 +458,8 @@ def add_extension(self, spec, ext_spec):
def check_extension_conflict(self, spec, ext_spec):
exts = self._extension_map(spec)
if ext_spec.name in exts:
installed_spec = exts[ext_spec.name]
if ext_spec.dag_hash() == installed_spec.dag_hash():
installed_spec = exts[ext_spec.name].copy(deps=('link', 'run'))
if ext_spec.copy(deps=('link', 'run')) == installed_spec:
raise ExtensionAlreadyInstalledError(spec, ext_spec)
else:
raise ExtensionConflictError(spec, ext_spec, installed_spec)
@@ -566,6 +579,15 @@ def __init__(self, message, long_msg=None):
super(DirectoryLayoutError, self).__init__(message, long_msg)
class SpecHashCollisionError(DirectoryLayoutError):
"""Raised when there is a hash collision in an install layout."""
def __init__(self, installed_spec, new_spec):
super(SpecHashCollisionError, self).__init__(
'Specs %s and %s have the same SHA-1 prefix!'
% (installed_spec, new_spec))
class RemoveFailedError(DirectoryLayoutError):
"""Raised when a DirectoryLayout cannot remove an install prefix."""

View File

@@ -1,334 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This package implements Spack environments.
.. _lockfile-format:
`spack.lock` format
===================
Spack environments have existed since Spack ``v0.12.0``, and there have been 4 different
``spack.lock`` formats since then. The formats are documented here.
The high-level format of a Spack lockfile hasn't changed much between versions, but the
contents have. Lockfiles are JSON-formatted and their top-level sections are:
1. ``_meta`` (object): this contains deatails about the file format, including:
* ``file-type``: always ``"spack-lockfile"``
* ``lockfile-version``: an integer representing the lockfile format version
* ``specfile-version``: an integer representing the spec format version (since
``v0.17``)
2. ``roots`` (list): an ordered list of records representing the roots of the Spack
environment. Each has two fields:
* ``hash``: a Spack spec hash uniquely identifying the concrete root spec
* ``spec``: a string representation of the abstract spec that was concretized
3. ``concrete_specs``: a dictionary containing the specs in the environment.
Compatibility
-------------
New versions of Spack can (so far) read all old lockfile formats -- they are
backward-compatible. Old versions cannot read new lockfile formats, and you'll need to
upgrade Spack to use them.
.. list-table:: Lockfile version compatibility across Spack versions
:header-rows: 1
* - Spack version
- ``v1``
- ``v2``
- ``v3``
- ``v4``
* - ``v0.12:0.14``
-
-
-
-
* - ``v0.15:0.16``
-
-
-
-
* - ``v0.17``
-
-
-
-
* - ``v0.18:``
-
-
-
-
Version 1
---------
When lockfiles were first created, there was only one hash in Spack: the DAG hash. This
DAG hash (we'll call it the old DAG hash) did *not* include build dependencies -- it
only included transitive link and run dependencies.
The spec format at this time was keyed by name. Each spec started with a key for its
name, whose value was a dictionary of other spec attributes. The lockfile put these
name-keyed specs into dictionaries keyed by their DAG hash, and the spec records did not
actually have a "hash" field in the lockfile -- you have to associate the hash from the
key with the spec record after the fact.
Dependencies in original lockfiles were keyed by ``"hash"``, i.e. the old DAG hash.
.. code-block:: json
{
"_meta": {
"file-type": "spack-lockfile",
"lockfile-version": 1
},
"roots": [
{
"hash": "<old_dag_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<old_dag_hash 2>",
"spec": "<abstract spec 2>"
}
],
"concrete_specs": {
"<old_dag_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": {
"depname_1": {
"hash": "<old_dag_hash for depname_1>",
"type": ["build", "link"]
},
"depname_2": {
"hash": "<old_dag_hash for depname_3>",
"type": ["build", "link"]
}
},
"hash": "<old_dag_hash 1>"
},
"<old_dag_hash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": {
"depname_3": {
"hash": "<old_dag_hash for depname_3>",
"type": ["build", "link"]
},
"depname_4": {
"hash": "<old_dag_hash for depname_4>",
"type": ["build", "link"]
},
},
"hash": "<old_dag_hash 2>"
},
}
}
Version 2
---------
Version 2 changes one thing: specs in the lockfile are now keyed by ``build_hash``
instead of the old ``dag_hash``. Specs have a ``hash`` attribute with their real DAG
hash, so you can't go by the dictionary key anymore to identify a spec -- you have to
read it in and look at ``"hash"``. Dependencies are still keyed by old DAG hash.
Even though we key lockfiles by ``build_hash``, specs in Spack were still deployed with
the old, coarser DAG hash. This means that in v2 and v3 lockfiles (which are keyed by
build hash), there may be multiple versions of the same spec with different build
dependencies, which means they will have different build hashes but the same DAG hash.
Spack would only have been able to actually install one of these.
.. code-block:: json
{
"_meta": {
"file-type": "spack-lockfile",
"lockfile-version": 2
},
"roots": [
{
"hash": "<build_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<build_hash 2>",
"spec": "<abstract spec 2>"
}
],
"concrete_specs": {
"<build_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": {
"depname_1": {
"hash": "<old_dag_hash for depname_1>",
"type": ["build", "link"]
},
"depname_2": {
"hash": "<old_dag_hash for depname_3>",
"type": ["build", "link"]
}
},
"hash": "<old_dag_hash 1>",
},
"<build_hash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": {
"depname_3": {
"hash": "<old_dag_hash for depname_3>",
"type": ["build", "link"]
},
"depname_4": {
"hash": "<old_dag_hash for depname_4>",
"type": ["build", "link"]
}
},
"hash": "<old_dag_hash 2>"
}
}
}
Version 3
---------
Version 3 doesn't change the top-level lockfile format, but this was when we changed the
specfile format. Specs in ``concrete_specs`` are now keyed by the build hash, with no
inner dictionary keyed by their package name. The package name is in a ``name`` field
inside each spec dictionary. The ``dependencies`` field in the specs is a list instead
of a dictionary, and each element of the list is a record with the name, dependency
types, and hash of the dependency. Instead of a key called ``hash``, dependencies are
keyed by ``build_hash``. Each spec still has a ``hash`` attribute.
Version 3 adds the ``specfile_version`` field to ``_meta`` and uses the new JSON spec
format.
.. code-block:: json
{
"_meta": {
"file-type": "spack-lockfile",
"lockfile-version": 3,
"specfile-version": 2
},
"roots": [
{
"hash": "<build_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<build_hash 2>",
"spec": "<abstract spec 2>"
},
],
"concrete_specs": {
"<build_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_1",
"build_hash": "<build_hash for depname_1>",
"type": ["build", "link"]
},
{
"name": "depname_2",
"build_hash": "<build_hash for depname_2>",
"type": ["build", "link"]
},
],
"hash": "<old_dag_hash 1>",
},
"<build_hash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_3",
"build_hash": "<build_hash for depname_3>",
"type": ["build", "link"]
},
{
"name": "depname_4",
"build_hash": "<build_hash for depname_4>",
"type": ["build", "link"]
},
],
"hash": "<old_dag_hash 2>"
}
}
}
Version 4
---------
Version 4 removes build hashes and is keyed by the new DAG hash (``hash``). The ``hash``
now includes build dependencies and a canonical hash of the ``package.py`` file.
Dependencies are keyed by ``hash`` (DAG hash) as well. There are no more ``build_hash``
fields in the specs, and there are no more issues with lockfiles being able to store
multiple specs with the same DAG hash (because the DAG hash is now finer-grained).
.. code-block:: json
{
"_meta": {
"file-type": "spack-lockfile",
"lockfile-version": 3,
"specfile-version": 2
},
"roots": [
{
"hash": "<dag_hash 1>",
"spec": "<abstract spec 1>"
},
{
"hash": "<dag_hash 2>",
"spec": "<abstract spec 2>"
}
],
"concrete_specs": {
"<dag_hash 1>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_1",
"hash": "<dag_hash for depname_1>",
"type": ["build", "link"]
},
{
"name": "depname_2",
"hash": "<dag_hash for depname_2>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 1>",
},
"<daghash 2>": {
"... <spec dict attributes> ...": { },
"dependencies": [
{
"name": "depname_3",
"hash": "<dag_hash for depname_3>",
"type": ["build", "link"]
},
{
"name": "depname_4",
"hash": "<dag_hash for depname_4>",
"type": ["build", "link"]
}
],
"hash": "<dag_hash 2>"
}
}
}
"""
from .environment import (
Environment,
SpackEnvironmentError,

View File

@@ -79,9 +79,8 @@
env_subdir_name = '.spack-env'
def default_manifest_yaml():
"""default spack.yaml file to put in new environments"""
return """\
#: default spack.yaml file to put in new environments
default_manifest_yaml = """\
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
@@ -90,16 +89,12 @@ def default_manifest_yaml():
# add package specs to the `specs` list
specs: []
view: true
concretizer:
unify: {}
""".format('true' if spack.config.get('concretizer:unify') else 'false')
"""
#: regex for validating enviroment names
valid_environment_name_re = r'^\w[\w-]*$'
#: version of the lockfile format. Must increase monotonically.
lockfile_format_version = 4
lockfile_format_version = 3
# Magic names
# The name of the standalone spec list in the manifest yaml
@@ -307,7 +302,7 @@ def _is_dev_spec_and_has_changed(spec):
return False
# Now we can check whether the code changed since the last installation
if not spec.installed:
if not spec.package.installed:
# Not installed -> nothing to compare against
return False
@@ -320,7 +315,7 @@ def _spec_needs_overwrite(spec, changed_dev_specs):
"""Check whether the current spec needs to be overwritten because either it has
changed itself or one of its dependencies have changed"""
# if it's not installed, we don't need to overwrite it
if not spec.installed:
if not spec.package.installed:
return False
# If the spec itself has changed this is a trivial decision
@@ -335,7 +330,7 @@ def _spec_needs_overwrite(spec, changed_dev_specs):
# If any dep needs overwrite, or any dep is missing and is a dev build then
# overwrite this package
if any(
((not dep.installed) and dep.satisfies('dev_path=*')) or
((not dep.package.installed) and dep.satisfies('dev_path=*')) or
_spec_needs_overwrite(dep, changed_dev_specs)
for dep in spec.traverse(root=False)
):
@@ -444,7 +439,7 @@ def _next_root(self, specs):
def content_hash(self, specs):
d = syaml.syaml_dict([
('descriptor', self.to_dict()),
('specs', [(spec.dag_hash(), spec.prefix) for spec in sorted(specs)])
('specs', [(spec.full_hash(), spec.prefix) for spec in sorted(specs)])
])
contents = sjson.dump(d)
return spack.util.hash.b32_hash(contents)
@@ -523,7 +518,7 @@ def specs_for_view(self, concretized_root_specs):
# Filter selected, installed specs
with spack.store.db.read_transaction():
specs = [s for s in specs if s in self and s.installed]
specs = [s for s in specs if s in self and s.package.installed]
return specs
@@ -628,7 +623,7 @@ def __init__(self, path, init_file=None, with_view=None, keep_relative=False):
# This attribute will be set properly from configuration
# during concretization
self.unify = None
self.concretization = None
self.clear()
if init_file:
@@ -637,11 +632,11 @@ def __init__(self, path, init_file=None, with_view=None, keep_relative=False):
# the init file.
with fs.open_if_filename(init_file) as f:
if hasattr(f, 'name') and f.name.endswith('.lock'):
self._read_manifest(default_manifest_yaml())
self._read_manifest(default_manifest_yaml)
self._read_lockfile(f)
self._set_user_specs_from_lockfile()
else:
self._read_manifest(f, raw_yaml=default_manifest_yaml())
self._read_manifest(f, raw_yaml=default_manifest_yaml)
# Rewrite relative develop paths when initializing a new
# environment in a different location from the spack.yaml file.
@@ -705,7 +700,7 @@ def _read(self):
default_manifest = not os.path.exists(self.manifest_path)
if default_manifest:
# No manifest, use default yaml
self._read_manifest(default_manifest_yaml())
self._read_manifest(default_manifest_yaml)
else:
with open(self.manifest_path) as f:
self._read_manifest(f)
@@ -771,15 +766,8 @@ def _read_manifest(self, f, raw_yaml=None):
self.views = {}
# Retrieve the current concretization strategy
configuration = config_dict(self.yaml)
# Let `concretization` overrule `concretize:unify` config for now,
# but use a translation table to have internally a representation
# as if we were using the new configuration
translation = {'separately': False, 'together': True}
try:
self.unify = translation[configuration['concretization']]
except KeyError:
self.unify = spack.config.get('concretizer:unify', False)
# default concretization to separately
self.concretization = configuration.get('concretization', 'separately')
# Retrieve dev-build packages:
self.dev_specs = configuration.get('develop', {})
@@ -1022,9 +1010,14 @@ def remove(self, query_spec, list_name=user_speclist_name, force=False):
if not matches:
# concrete specs match against concrete specs in the env
# by dag hash.
# by *dag hash*, not build hash.
dag_hashes_in_order = [
self.specs_by_hash[build_hash].dag_hash()
for build_hash in self.concretized_order
]
specs_hashes = zip(
self.concretized_user_specs, self.concretized_order
self.concretized_user_specs, dag_hashes_in_order
)
matches = [
@@ -1160,44 +1153,14 @@ def concretize(self, force=False, tests=False):
self.specs_by_hash = {}
# Pick the right concretization strategy
if self.unify == 'when_possible':
return self._concretize_together_where_possible(tests=tests)
if self.unify is True:
if self.concretization == 'together':
return self._concretize_together(tests=tests)
if self.unify is False:
if self.concretization == 'separately':
return self._concretize_separately(tests=tests)
msg = 'concretization strategy not implemented [{0}]'
raise SpackEnvironmentError(msg.format(self.unify))
def _concretize_together_where_possible(self, tests=False):
# Avoid cyclic dependency
import spack.solver.asp
# Exit early if the set of concretized specs is the set of user specs
user_specs_did_not_change = not bool(
set(self.user_specs) - set(self.concretized_user_specs)
)
if user_specs_did_not_change:
return []
# Proceed with concretization
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
result_by_user_spec = {}
solver = spack.solver.asp.Solver()
for result in solver.solve_in_rounds(self.user_specs, tests=tests):
result_by_user_spec.update(result.specs_by_input)
result = []
for abstract, concrete in sorted(result_by_user_spec.items()):
self._add_concrete_spec(abstract, concrete)
result.append((abstract, concrete))
return result
raise SpackEnvironmentError(msg.format(self.concretization))
def _concretize_together(self, tests=False):
"""Concretization strategy that concretizes all the specs
@@ -1311,7 +1274,7 @@ def _concretize_separately(self, tests=False):
by_hash = {}
for abstract, concrete in zip(root_specs, concretized_root_specs):
self._add_concrete_spec(abstract, concrete)
by_hash[concrete.dag_hash()] = concrete
by_hash[concrete.build_hash()] = concrete
# Unify the specs objects, so we get correct references to all parents
self._read_lockfile_dict(self._to_lockfile_dict())
@@ -1350,7 +1313,7 @@ def concretize_and_add(self, user_spec, concrete_spec=None, tests=False):
concrete_spec: if provided, then it is assumed that it is the
result of concretizing the provided ``user_spec``
"""
if self.unify is True:
if self.concretization == 'together':
msg = 'cannot install a single spec in an environment that is ' \
'configured to be concretized together. Run instead:\n\n' \
' $ spack add <spec>\n' \
@@ -1368,7 +1331,7 @@ def concretize_and_add(self, user_spec, concrete_spec=None, tests=False):
spec = next(
s for s in self.user_specs if s.satisfies(user_spec)
)
concrete = self.specs_by_hash.get(spec.dag_hash())
concrete = self.specs_by_hash.get(spec.build_hash())
if not concrete:
concrete = spec.concretized(tests=tests)
self._add_concrete_spec(spec, concrete)
@@ -1417,10 +1380,9 @@ def check_views(self):
# default view if they are installed.
for view_name, view in self.views.items():
for _, spec in self.concretized_specs():
if spec in view and spec.package and spec.installed:
msg = '{0} in view "{1}"'
tty.debug(msg.format(spec.name, view_name))
if spec in view and spec.package.installed:
tty.debug(
'Spec %s in view %s' % (spec.name, view_name))
except (spack.repo.UnknownPackageError,
spack.repo.UnknownNamespaceError) as e:
tty.warn(e)
@@ -1436,8 +1398,7 @@ def _env_modifications_for_default_view(self, reverse=False):
errors = []
for _, root_spec in self.concretized_specs():
if (root_spec in self.default_view and
root_spec.installed and root_spec.package):
if root_spec in self.default_view and root_spec.package.installed:
for spec in root_spec.traverse(deptype='run', root=True):
if spec.name in visited:
# It is expected that only one instance of the package
@@ -1536,7 +1497,7 @@ def _add_concrete_spec(self, spec, concrete, new=True):
# update internal lists of specs
self.concretized_user_specs.append(spec)
h = concrete.dag_hash()
h = concrete.build_hash()
self.concretized_order.append(h)
self.specs_by_hash[h] = concrete
@@ -1576,7 +1537,7 @@ def uninstalled_specs(self):
with spack.store.db.read_transaction():
for concretized_hash in self.concretized_order:
spec = self.specs_by_hash[concretized_hash]
if not spec.installed or (
if not spec.package.installed or (
spec.satisfies('dev_path=*') or
spec.satisfies('^dev_path=*')
):
@@ -1611,11 +1572,10 @@ def install_specs(self, specs=None, **install_args):
# ensure specs already installed are marked explicit
all_specs = specs or [cs for _, cs in self.concretized_specs()]
specs_installed = [s for s in all_specs if s.installed]
if specs_installed:
with spack.store.db.write_transaction(): # do all in one transaction
for spec in specs_installed:
spack.store.db.update_explicit(spec, True)
specs_installed = [s for s in all_specs if s.package.installed]
with spack.store.db.write_transaction(): # do all in one transaction
for spec in specs_installed:
spack.store.db.update_explicit(spec, True)
if not specs_to_install:
tty.msg('All of the packages are already installed')
@@ -1639,7 +1599,7 @@ def install_specs(self, specs=None, **install_args):
finally:
# Ensure links are set appropriately
for spec in specs_to_install:
if spec.installed:
if spec.package.installed:
self.new_installs.append(spec)
try:
self._install_log_links(spec)
@@ -1654,19 +1614,14 @@ def all_specs(self):
"""Return all specs, even those a user spec would shadow."""
all_specs = set()
for h in self.concretized_order:
try:
spec = self.specs_by_hash[h]
except KeyError:
tty.warn(
'Environment %s appears to be corrupt: missing spec '
'"%s"' % (self.name, h))
continue
all_specs.update(spec.traverse())
all_specs.update(self.specs_by_hash[h].traverse())
return sorted(all_specs)
def all_hashes(self):
"""Return hashes of all specs."""
"""Return hashes of all specs.
Note these hashes exclude build dependencies."""
return list(set(s.dag_hash() for s in self.all_specs()))
def roots(self):
@@ -1694,7 +1649,7 @@ def added_specs(self):
concrete = concretized.get(spec)
if not concrete:
yield spec
elif not concrete.installed:
elif not concrete.package.installed:
yield concrete
def concretized_specs(self):
@@ -1702,15 +1657,6 @@ def concretized_specs(self):
for s, h in zip(self.concretized_user_specs, self.concretized_order):
yield (s, self.specs_by_hash[h])
def get_by_hash(self, dag_hash):
matches = {}
for _, root in self.concretized_specs():
for spec in root.traverse(root=True):
dep_hash = spec.dag_hash()
if dep_hash.startswith(dag_hash):
matches[dep_hash] = spec
return list(matches.values())
def matching_spec(self, spec):
"""
Given a spec (likely not concretized), find a matching concretized
@@ -1738,7 +1684,13 @@ def matching_spec(self, spec):
for user_spec, concretized_user_spec in self.concretized_specs():
# Deal with concrete specs differently
if spec.concrete:
if spec in concretized_user_spec:
# Matching a concrete spec is more restrictive
# than just matching the dag hash
is_match = (
spec in concretized_user_spec and
concretized_user_spec[spec.name].build_hash() == spec.build_hash()
)
if is_match:
matches[spec] = spec
continue
@@ -1818,12 +1770,12 @@ def _to_lockfile_dict(self):
concrete_specs = {}
for spec in self.specs_by_hash.values():
for s in spec.traverse():
dag_hash = s.dag_hash()
if dag_hash not in concrete_specs:
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
build_hash = s.build_hash()
if build_hash not in concrete_specs:
spec_dict = s.to_node_dict(hash=ht.build_hash)
# Assumes no legacy formats, since this was just created.
spec_dict[ht.dag_hash.name] = s.dag_hash()
concrete_specs[dag_hash] = spec_dict
concrete_specs[build_hash] = spec_dict
hash_spec_list = zip(
self.concretized_order, self.concretized_user_specs)
@@ -1857,56 +1809,47 @@ def _read_lockfile(self, file_or_json):
def _read_lockfile_dict(self, d):
"""Read a lockfile dictionary into this environment."""
self.specs_by_hash = {}
roots = d['roots']
self.concretized_user_specs = [Spec(r['spec']) for r in roots]
self.concretized_order = [r['hash'] for r in roots]
json_specs_by_hash = d['concrete_specs']
root_hashes = set(self.concretized_order)
# Track specs by their lockfile key. Currently spack uses the finest
# grained hash as the lockfile key, while older formats used the build
# hash or a previous incarnation of the DAG hash (one that did not
# include build deps or package hash).
specs_by_hash = {}
# Track specs by their DAG hash, allows handling DAG hash collisions
first_seen = {}
# First pass: Put each spec in the map ignoring dependencies
for lockfile_key, node_dict in json_specs_by_hash.items():
for build_hash, node_dict in json_specs_by_hash.items():
spec = Spec.from_node_dict(node_dict)
if not spec._hash:
# in v1 lockfiles, the hash only occurs as a key
spec._hash = lockfile_key
specs_by_hash[lockfile_key] = spec
if d['_meta']['lockfile-version'] > 1:
# Build hash is stored as a key, but not as part of the node dict
# To ensure build hashes are not recomputed, we reattach here
setattr(spec, ht.build_hash.attr, build_hash)
specs_by_hash[build_hash] = spec
# Second pass: For each spec, get its dependencies from the node dict
# and add them to the spec
for lockfile_key, node_dict in json_specs_by_hash.items():
for build_hash, node_dict in json_specs_by_hash.items():
for _, dep_hash, deptypes, _ in (
Spec.dependencies_from_node_dict(node_dict)):
specs_by_hash[lockfile_key]._add_dependency(
specs_by_hash[build_hash]._add_dependency(
specs_by_hash[dep_hash], deptypes)
# Traverse the root specs one at a time in the order they appear.
# The first time we see each DAG hash, that's the one we want to
# keep. This is only required as long as we support older lockfile
# formats where the mapping from DAG hash to lockfile key is possibly
# one-to-many.
for lockfile_key in self.concretized_order:
for s in specs_by_hash[lockfile_key].traverse():
if s.dag_hash() not in first_seen:
first_seen[s.dag_hash()] = s
# If we are reading an older lockfile format (which uses dag hashes
# that exclude build deps), we use this to convert the old
# concretized_order to the full hashes (preserving the order)
old_hash_to_new = {}
self.specs_by_hash = {}
for _, spec in specs_by_hash.items():
dag_hash = spec.dag_hash()
build_hash = spec.build_hash()
if dag_hash in root_hashes:
old_hash_to_new[dag_hash] = build_hash
# Now make sure concretized_order and our internal specs dict
# contains the keys used by modern spack (i.e. the dag_hash
# that includes build deps and package hash).
self.concretized_order = [specs_by_hash[h_key].dag_hash()
for h_key in self.concretized_order]
if (dag_hash in root_hashes or build_hash in root_hashes):
self.specs_by_hash[build_hash] = spec
for spec_dag_hash in self.concretized_order:
self.specs_by_hash[spec_dag_hash] = first_seen[spec_dag_hash]
if old_hash_to_new:
# Replace any older hashes in concretized_order with hashes
# that include build deps
self.concretized_order = [
old_hash_to_new.get(h, h) for h in self.concretized_order]
def write(self, regenerate=True):
"""Writes an in-memory environment to its location on disk.
@@ -1919,15 +1862,17 @@ def write(self, regenerate=True):
regenerate (bool): regenerate views and run post-write hooks as
well as writing if True.
"""
# Warn that environments are not in the latest format.
if not is_latest_format(self.manifest_path):
ver = '.'.join(str(s) for s in spack.spack_version_info[:2])
msg = ('The environment "{}" is written to disk in a deprecated format. '
'Please update it using:\n\n'
'\tspack env update {}\n\n'
'Note that versions of Spack older than {} may not be able to '
# Intercept environment not using the latest schema format and prevent
# them from being modified
manifest_exists = os.path.exists(self.manifest_path)
if manifest_exists and not is_latest_format(self.manifest_path):
msg = ('The environment "{0}" needs to be written to disk, but '
'is currently using a deprecated format. Please update it '
'using:\n\n'
'\tspack env update {0}\n\n'
'Note that previous versions of Spack will not be able to '
'use the updated configuration.')
tty.warn(msg.format(self.name, self.name, ver))
raise RuntimeError(msg.format(self.name))
# ensure path in var/spack/environments
fs.mkdirp(self.path)
@@ -2279,16 +2224,14 @@ def _top_level_key(data):
def is_latest_format(manifest):
"""Return False if the manifest file exists and is not in the latest schema format.
"""Return True if the manifest file is at the latest schema format,
False otherwise.
Args:
manifest (str): manifest file to be analyzed
"""
try:
with open(manifest) as f:
data = syaml.load(f)
except (OSError, IOError):
return True
with open(manifest) as f:
data = syaml.load(f)
top_level_key = _top_level_key(data)
changed = spack.schema.env.update(data[top_level_key])
return not changed

View File

@@ -10,9 +10,9 @@
import llnl.util.tty as tty
#: at what level we should write stack traces or short error messages
#: whether we should write stack traces or short error messages
#: this is module-scoped because it needs to be set very early
debug = 0
debug = False
class SpackError(Exception):

View File

@@ -327,7 +327,9 @@ def fetch(self):
continue
try:
self._fetch_from_url(url)
partial_file, save_file = self._fetch_from_url(url)
if save_file and (partial_file is not None):
llnl.util.filesystem.rename(partial_file, save_file)
break
except FailedDownloadError as e:
errors.append(str(e))
@@ -377,7 +379,9 @@ def _check_headers(self, headers):
@_needs_stage
def _fetch_urllib(self, url):
save_file = self.stage.save_filename
save_file = None
if self.stage.save_filename:
save_file = self.stage.save_filename
tty.msg('Fetching {0}'.format(url))
# Run urllib but grab the mime type from the http headers
@@ -387,18 +391,16 @@ def _fetch_urllib(self, url):
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)
if os.path.lexists(save_file):
if save_file and os.path.exists(save_file):
os.remove(save_file)
msg = 'urllib failed to fetch with error {0}'.format(e)
raise FailedDownloadError(url, msg)
if os.path.lexists(save_file):
os.remove(save_file)
with open(save_file, 'wb') as _open_file:
shutil.copyfileobj(response, _open_file)
self._check_headers(str(headers))
return None, save_file
@_needs_stage
def _fetch_curl(self, url):
@@ -459,7 +461,7 @@ def _fetch_curl(self, url):
if self.archive_file:
os.remove(self.archive_file)
if partial_file and os.path.lexists(partial_file):
if partial_file and os.path.exists(partial_file):
os.remove(partial_file)
if curl.returncode == 22:
@@ -486,9 +488,7 @@ def _fetch_curl(self, url):
"Curl failed with error %d" % curl.returncode)
self._check_headers(headers)
if save_file and (partial_file is not None):
os.rename(partial_file, save_file)
return partial_file, save_file
@property # type: ignore # decorated properties unsupported in mypy
@_needs_stage
@@ -642,7 +642,7 @@ def fetch(self):
# remove old symlink if one is there.
filename = self.stage.save_filename
if os.path.lexists(filename):
if os.path.exists(filename):
os.remove(filename)
# Symlink to local cached archive.

View File

@@ -406,12 +406,12 @@ def write(self, spec, color=None, out=None):
# Colors associated with each node in the DAG.
# Edges are colored by the node they point to.
self._name_to_color = {
spec.dag_hash(): self.colors[i % len(self.colors)]
spec.full_hash(): self.colors[i % len(self.colors)]
for i, spec in enumerate(nodes_in_topological_order)
}
# Frontier tracks open edges of the graph as it's written out.
self._frontier = [[spec.dag_hash()]]
self._frontier = [[spec.full_hash()]]
while self._frontier:
# Find an unexpanded part of frontier
i = find(self._frontier, lambda f: len(f) > 1)
@@ -488,16 +488,14 @@ def write(self, spec, color=None, out=None):
node = nodes_in_topological_order.pop()
# Find the named node in the frontier and draw it.
i = find(self._frontier, lambda f: node.dag_hash() in f)
i = find(self._frontier, lambda f: node.full_hash() in f)
self._node_line(i, node)
# Replace node with its dependencies
self._frontier.pop(i)
edges = sorted(
node.edges_to_dependencies(deptype=self.deptype), reverse=True
)
if edges:
deps = [e.spec.dag_hash() for e in edges]
deps = node.dependencies(deptype=self.deptype)
if deps:
deps = sorted((d.full_hash() for d in deps), reverse=True)
self._connect_deps(i, deps, "new-deps") # anywhere.
elif self._frontier:

View File

@@ -33,14 +33,15 @@ def attr(self):
"""Private attribute stored on spec"""
return '_' + self.name
def __call__(self, spec):
"""Run this hash on the provided spec."""
return spec.spec_hash(self)
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
#: Default Hash descriptor, used by Spec.dag_hash() and stored in the DB.
dag_hash = SpecHashDescriptor(
deptype=('build', 'link', 'run'), package_hash=True, name='hash')
deptype=('link', 'run'), package_hash=False, name='hash')
#: Hash descriptor that includes build dependencies.
build_hash = SpecHashDescriptor(
deptype=('build', 'link', 'run'), package_hash=False, name='build_hash')
#: Hash descriptor used only to transfer a DAG, as is, across processes
@@ -50,19 +51,12 @@ def __call__(self, spec):
name='process_hash'
)
#: Package hash used as part of dag hash
package_hash = SpecHashDescriptor(
deptype=(), package_hash=True, name='package_hash',
override=lambda s: s.package.content_hash())
# Deprecated hash types, no longer used, but needed to understand old serialized
# spec formats
#: Full hash used in build pipelines to determine when to rebuild packages.
full_hash = SpecHashDescriptor(
deptype=('build', 'link', 'run'), package_hash=True, name='full_hash')
build_hash = SpecHashDescriptor(
deptype=('build', 'link', 'run'), package_hash=False, name='build_hash')
#: Package hash used as part of full hash
package_hash = SpecHashDescriptor(
deptype=(), package_hash=True, name='package_hash',
override=lambda s: s.package.content_hash())

View File

@@ -140,7 +140,7 @@ def _handle_external_and_upstream(pkg, explicit):
.format(pkg.prefix, package_id(pkg)))
return True
if pkg.spec.installed_upstream:
if pkg.installed_upstream:
tty.verbose('{0} is installed in an upstream Spack instance at {1}'
.format(package_id(pkg), pkg.spec.prefix))
_print_installed_pkg(pkg.prefix)
@@ -260,7 +260,8 @@ def _hms(seconds):
return ' '.join(parts)
def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
def _install_from_cache(pkg, cache_only, explicit, unsigned=False,
full_hash_match=False):
"""
Extract the package from binary cache
@@ -277,7 +278,7 @@ def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
``False`` otherwise
"""
installed_from_cache = _try_install_from_binary_cache(
pkg, explicit, unsigned=unsigned)
pkg, explicit, unsigned=unsigned, full_hash_match=full_hash_match)
pkg_id = package_id(pkg)
if not installed_from_cache:
pre = 'No binary for {0} found'.format(pkg_id)
@@ -350,7 +351,7 @@ def _process_external_package(pkg, explicit):
def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
mirrors_for_spec=None):
preferred_mirrors=None):
"""
Process the binary cache tarball.
@@ -360,17 +361,17 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
explicit (bool): the package was explicitly requested by the user
unsigned (bool): ``True`` if binary package signatures to be checked,
otherwise, ``False``
mirrors_for_spec (list): Optional list of concrete specs and mirrors
obtained by calling binary_distribution.get_mirrors_for_spec().
preferred_mirrors (list): Optional list of urls to prefer when
attempting to download the tarball
Return:
bool: ``True`` if the package was extracted from binary cache,
else ``False``
"""
download_result = binary_distribution.download_tarball(
binary_spec, unsigned, mirrors_for_spec=mirrors_for_spec)
tarball = binary_distribution.download_tarball(
binary_spec, preferred_mirrors=preferred_mirrors)
# see #10063 : install from source if tarball doesn't exist
if download_result is None:
if tarball is None:
tty.msg('{0} exists in binary cache but with different hash'
.format(pkg.name))
return False
@@ -380,16 +381,17 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
# don't print long padded paths while extracting/relocating binaries
with spack.util.path.filter_padding():
binary_distribution.extract_tarball(binary_spec, download_result,
allow_root=False, unsigned=unsigned,
force=False)
binary_distribution.extract_tarball(
binary_spec, tarball, allow_root=False, unsigned=unsigned, force=False
)
pkg.installed_from_binary_cache = True
spack.store.db.add(pkg.spec, spack.store.layout, explicit=explicit)
return True
def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
def _try_install_from_binary_cache(pkg, explicit, unsigned=False,
full_hash_match=False):
"""
Try to extract the package from binary cache.
@@ -401,13 +403,18 @@ def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
"""
pkg_id = package_id(pkg)
tty.debug('Searching for binary cache of {0}'.format(pkg_id))
matches = binary_distribution.get_mirrors_for_spec(pkg.spec)
matches = binary_distribution.get_mirrors_for_spec(
pkg.spec, full_hash_match=full_hash_match)
if not matches:
return False
return _process_binary_cache_tarball(pkg, pkg.spec, explicit, unsigned,
mirrors_for_spec=matches)
# In the absence of guidance from user or some other reason to prefer one
# mirror over another, any match will suffice, so just pick the first one.
preferred_mirrors = [match['mirror_url'] for match in matches]
binary_spec = matches[0]['spec']
return _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
preferred_mirrors=preferred_mirrors)
def clear_failures():
@@ -554,10 +561,6 @@ def log(pkg):
# Archive the environment modifications for the build.
fs.install(pkg.env_mods_path, pkg.install_env_path)
# Archive the install-phase test log, if present
if pkg.test_install_log_path and os.path.exists(pkg.test_install_log_path):
fs.install(pkg.test_install_log_path, pkg.install_test_install_log_path)
if os.path.exists(pkg.configure_args_path):
# Archive the args used for the build
fs.install(pkg.configure_args_path, pkg.install_configure_args_path)
@@ -850,7 +853,7 @@ def _check_deps_status(self, request):
raise InstallError(err.format(request.pkg_id, msg))
# Flag external and upstream packages as being installed
if dep_pkg.spec.external or dep_pkg.spec.installed_upstream:
if dep_pkg.spec.external or dep_pkg.installed_upstream:
self._flag_installed(dep_pkg)
continue
@@ -992,7 +995,7 @@ def _ensure_install_ready(self, pkg):
raise ExternalPackageError('{0} {1}'.format(pre, 'is external'))
# Upstream packages cannot be installed locally.
if pkg.spec.installed_upstream:
if pkg.installed_upstream:
raise UpstreamPackageError('{0} {1}'.format(pre, 'is upstream'))
# The package must have a prefix lock at this stage.
@@ -1197,6 +1200,7 @@ def _install_task(self, task):
install_args = task.request.install_args
cache_only = install_args.get('cache_only')
explicit = task.explicit
full_hash_match = install_args.get('full_hash_match')
tests = install_args.get('tests')
unsigned = install_args.get('unsigned')
use_cache = install_args.get('use_cache')
@@ -1209,7 +1213,8 @@ def _install_task(self, task):
# Use the binary cache if requested
if use_cache and \
_install_from_cache(pkg, cache_only, explicit, unsigned):
_install_from_cache(pkg, cache_only, explicit, unsigned,
full_hash_match):
self._update_installed(task)
if task.compiler:
spack.compilers.add_compilers_to_config(
@@ -2013,10 +2018,11 @@ def build_process(pkg, install_args):
class OverwriteInstall(object):
def __init__(self, installer, database, task):
def __init__(self, installer, database, task, tmp_root=None):
self.installer = installer
self.database = database
self.task = task
self.tmp_root = tmp_root
def install(self):
"""
@@ -2026,7 +2032,7 @@ def install(self):
install error if installation fails.
"""
try:
with fs.replace_directory_transaction(self.task.pkg.prefix):
with fs.replace_directory_transaction(self.task.pkg.prefix, self.tmp_root):
self.installer._install_task(self.task)
except fs.CouldNotRestoreDirectoryBackup as e:
self.database.remove(self.task.pkg.spec)
@@ -2297,6 +2303,7 @@ def _add_default_args(self):
('dirty', False),
('fail_fast', False),
('fake', False),
('full_hash_match', False),
('install_deps', True),
('install_package', True),
('install_source', False),

View File

@@ -30,7 +30,7 @@
import llnl.util.tty as tty
import llnl.util.tty.colify
import llnl.util.tty.color as color
from llnl.util.tty.log import log_output
from llnl.util.tty.log import log_output, winlog
import spack
import spack.cmd
@@ -375,6 +375,13 @@ def make_argument_parser(**kwargs):
# stat names in groups of 7, for nice wrapping.
stat_lines = list(zip(*(iter(stat_names),) * 7))
# help message for --show-cores
show_cores_help = 'provide additional information on concretization failures\n'
show_cores_help += 'off (default): show only the violated rule\n'
show_cores_help += 'full: show raw unsat cores from clingo\n'
show_cores_help += 'minimized: show subset-minimal unsat cores '
show_cores_help += '(Warning: this may take hours for some specs)'
parser.add_argument(
'-h', '--help',
dest='help', action='store_const', const='short', default=None,
@@ -398,6 +405,9 @@ def make_argument_parser(**kwargs):
'-d', '--debug', action='count', default=0,
help="write out debug messages "
"(more d's for more verbosity: -d, -dd, -ddd, etc.)")
parser.add_argument(
'--show-cores', choices=["off", "full", "minimized"], default="off",
help=show_cores_help)
parser.add_argument(
'--timestamp', action='store_true',
help="Add a timestamp to tty output")
@@ -480,11 +490,18 @@ def setup_main_options(args):
# errors raised by spack.config.
if args.debug:
spack.error.debug = args.debug
spack.error.debug = True
spack.util.debug.register_interrupt_handler()
spack.config.set('config:debug', True, scope='command_line')
spack.util.environment.tracing_enabled = True
if args.show_cores != "off":
# minimize_cores defaults to true, turn it off if we're showing full core
# but don't want to wait to minimize it.
spack.solver.asp.full_cores = True
if args.show_cores == 'full':
spack.solver.asp.minimize_cores = False
if args.timestamp:
tty.set_timestamp(True)
@@ -588,9 +605,14 @@ def __call__(self, *argv, **kwargs):
out = StringIO()
try:
with log_output(out):
self.returncode = _invoke_command(
self.command, self.parser, args, unknown)
if sys.platform == 'win32':
with winlog(out):
self.returncode = _invoke_command(
self.command, self.parser, args, unknown)
else:
with log_output(out):
self.returncode = _invoke_command(
self.command, self.parser, args, unknown)
except SystemExit as e:
self.returncode = e.code

View File

@@ -184,38 +184,19 @@ def _filter_compiler_wrappers_impl(self):
x = llnl.util.filesystem.FileFilter(*abs_files)
compiler_vars = [
replacements = [
('CC', self.compiler.cc),
('CXX', self.compiler.cxx),
('F77', self.compiler.f77),
('FC', self.compiler.fc)
]
# Some paths to the compiler wrappers might be substrings of the others.
# For example:
# CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)
# FC=/path/to/spack/lib/spack/env/cce/ftn
# Therefore, we perform the filtering in the reversed sorted order of
# the substituted strings. If, however, the strings are identical (e.g.
# both CC and FC are set using realpath), the filtering is done
# according to the order in compiler_vars. To achieve that, we populate
# the following array with tuples of three elements: path to the
# wrapper, negated index of the variable in compiler_vars, path to the
# real compiler. This way, the reversed sorted order of the resulting
# array is the order of replacements that we need.
replacements = []
for idx, (env_var, compiler_path) in enumerate(compiler_vars):
for env_var, compiler_path in replacements:
if env_var in os.environ:
# filter spack wrapper and links to spack wrapper in case
# build system runs realpath
wrapper = os.environ[env_var]
for wrapper_path in (wrapper, os.path.realpath(wrapper)):
replacements.append((wrapper_path, -idx, compiler_path))
for wrapper_path, _, compiler_path in sorted(replacements,
reverse=True):
x.filter(wrapper_path, compiler_path, **filter_kwargs)
x.filter(wrapper_path, compiler_path, **filter_kwargs)
# Remove this linking flag if present (it turns RPATH into RUNPATH)
x.filter('{0}--enable-new-dtags'.format(self.compiler.linker_arg), '',

View File

@@ -370,7 +370,7 @@ def get_module(
available.
"""
try:
upstream = spec.installed_upstream
upstream = spec.package.installed_upstream
except spack.repo.UnknownPackageError:
upstream, record = spack.store.db.query_by_spec_hash(spec.dag_hash())
if upstream:

View File

@@ -132,7 +132,7 @@ def __init__(self, host=None, prefix="ms1", allow_fail=False, tags=None,
self.tags = tags
self.save_local = save_local
# We key lookup of build_id by dag_hash
# We keey lookup of build_id by full_hash
self.build_ids = {}
self.setup_save()
@@ -412,8 +412,6 @@ def new_configuration(self, specs):
spec.concretize()
# Remove extra level of nesting
# This is the only place in Spack we still use full_hash, as `spack monitor`
# requires specs with full_hash-keyed dependencies.
as_dict = {"spec": spec.to_dict(hash=ht.full_hash)['spec'],
"spack_version": self.spack_version}
@@ -439,7 +437,8 @@ def failed_concretization(self, specs):
meta = spec.to_dict()['spec']
nodes = []
for node in meta.get("nodes", []):
node["full_hash"] = "FAILED_CONCRETIZATION"
for hashtype in ["build_hash", "full_hash"]:
node[hashtype] = "FAILED_CONCRETIZATION"
nodes.append(node)
meta['nodes'] = nodes
@@ -471,13 +470,13 @@ def get_build_id(self, spec, return_response=False, spec_exists=True):
"""
Retrieve a build id, either in the local cache, or query the server.
"""
dag_hash = spec.dag_hash()
if dag_hash in self.build_ids:
return self.build_ids[dag_hash]
full_hash = spec.full_hash()
if full_hash in self.build_ids:
return self.build_ids[full_hash]
# Prepare build environment data (including spack version)
data = self.build_environment.copy()
data['full_hash'] = dag_hash
data['full_hash'] = full_hash
# If the build should be tagged, add it
if self.tags:
@@ -495,10 +494,10 @@ def get_build_id(self, spec, return_response=False, spec_exists=True):
data['spec'] = syaml.load(read_file(spec_file))
if self.save_local:
return self.get_local_build_id(data, dag_hash, return_response)
return self.get_server_build_id(data, dag_hash, return_response)
return self.get_local_build_id(data, full_hash, return_response)
return self.get_server_build_id(data, full_hash, return_response)
def get_local_build_id(self, data, dag_hash, return_response):
def get_local_build_id(self, data, full_hash, return_response):
"""
Generate a local build id based on hashing the expected data
"""
@@ -511,15 +510,15 @@ def get_local_build_id(self, data, dag_hash, return_response):
return response
return bid
def get_server_build_id(self, data, dag_hash, return_response=False):
def get_server_build_id(self, data, full_hash, return_response=False):
"""
Retrieve a build id from the spack monitor server
"""
response = self.do_request("builds/new/", data=sjson.dump(data))
# Add the build id to the lookup
bid = self.build_ids[dag_hash] = response['data']['build']['build_id']
self.build_ids[dag_hash] = bid
bid = self.build_ids[full_hash] = response['data']['build']['build_id']
self.build_ids[full_hash] = bid
# If the function is called directly, the user might want output
if return_response:

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform as py_platform
import re
from subprocess import check_output
from spack.version import Version
@@ -52,17 +51,6 @@ def __init__(self):
if 'ubuntu' in distname:
version = '.'.join(version[0:2])
# openSUSE Tumbleweed is a rolling release which can change
# more than once in a week, so set version to tumbleweed$GLIBVERS
elif 'opensuse-tumbleweed' in distname or 'opensusetumbleweed' in distname:
distname = 'opensuse'
output = check_output(["ldd", "--version"]).decode()
libcvers = re.findall(r'ldd \(GNU libc\) (.*)', output)
if len(libcvers) == 1:
version = 'tumbleweed' + libcvers[0]
else:
version = 'tumbleweed' + version[0]
else:
version = version[0]

View File

@@ -26,14 +26,13 @@
import time
import traceback
import types
import warnings
from typing import Any, Callable, Dict, List, Optional # novm
import six
import llnl.util.filesystem as fsys
import llnl.util.tty as tty
from llnl.util.lang import memoized, nullcontext
from llnl.util.lang import memoized
from llnl.util.link_tree import LinkTree
import spack.compilers
@@ -53,7 +52,6 @@
import spack.store
import spack.url
import spack.util.environment
import spack.util.path
import spack.util.web
from spack.filesystem_view import YamlFilesystemView
from spack.install_test import TestFailure, TestSuite
@@ -61,6 +59,7 @@
from spack.stage import ResourceStage, Stage, StageComposite, stage_prefix
from spack.util.executable import ProcessError, which
from spack.util.package_hash import package_hash
from spack.util.path import win_exe_ext
from spack.util.prefix import Prefix
from spack.version import Version
@@ -77,9 +76,6 @@
# Filename for the Spack build/install environment modifications file.
_spack_build_envmodsfile = 'spack-build-env-mods.txt'
# Filename for the Spack install phase-time test log.
_spack_install_test_log = 'install-time-test-log.txt'
# Filename of json with total build and phase times (seconds)
_spack_times_log = 'install_times.json'
@@ -200,9 +196,9 @@ def __init__(cls, name, bases, attr_dict):
def platform_executables(self):
def to_windows_exe(exe):
if exe.endswith('$'):
exe = exe.replace('$', '%s$' % spack.util.path.win_exe_ext())
exe = exe.replace('$', '%s$' % win_exe_ext())
else:
exe += spack.util.path.win_exe_ext()
exe += win_exe_ext()
return exe
plat_exe = []
if hasattr(self, 'executables'):
@@ -438,11 +434,6 @@ def name(self):
self._name = self._name[self._name.rindex('.') + 1:]
return self._name
@property
def global_license_dir(self):
"""Returns the directory where license files for all packages are stored."""
return spack.util.path.canonicalize_path(spack.config.get('config:license_dir'))
def run_before(*phases):
"""Registers a method of a package to be run before a given phase"""
@@ -799,6 +790,15 @@ def __init__(self, spec):
super(PackageBase, self).__init__()
@property
def installed_upstream(self):
if not hasattr(self, '_installed_upstream'):
upstream, record = spack.store.db.query_by_spec_hash(
self.spec.dag_hash())
self._installed_upstream = upstream
return self._installed_upstream
@classmethod
def possible_dependencies(
cls, transitive=True, expand_virtuals=True, deptype='all',
@@ -943,8 +943,9 @@ def name(self):
@property
def global_license_dir(self):
"""Returns the directory where global license files are stored."""
return type(self).global_license_dir
"""Returns the directory where global license files for all
packages are stored."""
return os.path.join(spack.paths.prefix, 'etc', 'spack', 'licenses')
@property
def global_license_file(self):
@@ -1251,16 +1252,6 @@ def configure_args_path(self):
"""Return the configure args file path associated with staging."""
return os.path.join(self.stage.path, _spack_configure_argsfile)
@property
def test_install_log_path(self):
"""Return the install phase-time test log file path, if set."""
return getattr(self, 'test_log_file', None)
@property
def install_test_install_log_path(self):
"""Return the install location for the install phase-time test log."""
return fsys.join_path(self.metadata_dir, _spack_install_test_log)
@property
def times_log_path(self):
"""Return the times log json file."""
@@ -1276,20 +1267,6 @@ def install_test_root(self):
"""Return the install test root directory."""
return os.path.join(self.metadata_dir, 'test')
@property
def installed(self):
msg = ('the "PackageBase.installed" property is deprecated and will be '
'removed in Spack v0.19, use "Spec.installed" instead')
warnings.warn(msg)
return self.spec.installed
@property
def installed_upstream(self):
msg = ('the "PackageBase.installed_upstream" property is deprecated and will '
'be removed in Spack v0.19, use "Spec.installed_upstream" instead')
warnings.warn(msg)
return self.spec.installed_upstream
def _make_fetcher(self):
# Construct a composite fetcher that always contains at least
# one element (the root package). In case there are resources
@@ -1403,7 +1380,7 @@ def is_activated(self, view):
if not self.is_extension:
raise ValueError(
"is_activated called on package that is not an extension.")
if self.extendee_spec.installed_upstream:
if self.extendee_spec.package.installed_upstream:
# If this extends an upstream package, it cannot be activated for
# it. This bypasses construction of the extension map, which can
# can fail when run in the context of a downstream Spack instance
@@ -1429,6 +1406,22 @@ def virtuals_provided(self):
return [vspec for vspec, constraints in self.provided.items()
if any(self.spec.satisfies(c) for c in constraints)]
@property
def installed(self):
"""Installation status of a package.
Returns:
True if the package has been installed, False otherwise.
"""
try:
# If the spec is in the DB, check the installed
# attribute of the record
return spack.store.db.get_record(self.spec).installed
except KeyError:
# If the spec is not in the DB, the method
# above raises a Key error
return False
@property
def prefix(self):
"""Get the prefix into which this package should be installed."""
@@ -1677,65 +1670,39 @@ def all_patches(cls):
return patches
def content_hash(self, content=None):
"""Create a hash based on the artifacts and patches used to build this package.
"""Create a hash based on the sources and logic used to build the
package. This includes the contents of all applied patches and the
contents of applicable functions in the package subclass."""
if not self.spec.concrete:
err_msg = ("Cannot invoke content_hash on a package"
" if the associated spec is not concrete")
raise spack.error.SpackError(err_msg)
This includes:
* source artifacts (tarballs, repositories) used to build;
* content hashes (``sha256``'s) of all patches applied by Spack; and
* canonicalized contents the ``package.py`` recipe used to build.
This hash is only included in Spack's DAG hash for concrete specs, but if it
happens to be called on a package with an abstract spec, only applicable (i.e.,
determinable) portions of the hash will be included.
"""
# list of components to make up the hash
hash_content = []
# source artifacts/repositories
# TODO: resources
if self.spec.versions.concrete:
try:
source_id = fs.for_package_version(self, self.version).source_id()
except (fs.ExtrapolationError, fs.InvalidArgsError):
# ExtrapolationError happens if the package has no fetchers defined.
# InvalidArgsError happens when there are version directives with args,
# but none of them identifies an actual fetcher.
source_id = None
if not source_id:
# TODO? in cases where a digest or source_id isn't available,
# should this attempt to download the source and set one? This
# probably only happens for source repositories which are
# referenced by branch name rather than tag or commit ID.
env = spack.environment.active_environment()
from_local_sources = env and env.is_develop(self.spec)
if not self.spec.external and not from_local_sources:
message = 'Missing a source id for {s.name}@{s.version}'
tty.warn(message.format(s=self))
hash_content.append(''.encode('utf-8'))
else:
hash_content.append(source_id.encode('utf-8'))
# patch sha256's
# Only include these if they've been assigned by the concretizer.
# We check spec._patches_assigned instead of spec.concrete because
# we have to call package_hash *before* marking specs concrete
if self.spec._patches_assigned():
hash_content.extend(
':'.join((p.sha256, str(p.level))).encode('utf-8')
for p in self.spec.patches
)
# package.py contents
hash_content = list()
try:
source_id = fs.for_package_version(self, self.version).source_id()
except fs.ExtrapolationError:
source_id = None
if not source_id:
# TODO? in cases where a digest or source_id isn't available,
# should this attempt to download the source and set one? This
# probably only happens for source repositories which are
# referenced by branch name rather than tag or commit ID.
env = spack.environment.active_environment()
from_local_sources = env and env.is_develop(self.spec)
if not self.spec.external and not from_local_sources:
message = 'Missing a source id for {s.name}@{s.version}'
tty.warn(message.format(s=self))
hash_content.append(''.encode('utf-8'))
else:
hash_content.append(source_id.encode('utf-8'))
hash_content.extend(':'.join((p.sha256, str(p.level))).encode('utf-8')
for p in self.spec.patches)
hash_content.append(package_hash(self.spec, source=content).encode('utf-8'))
# put it all together and encode as base32
b32_hash = base64.b32encode(
hashlib.sha256(
bytes().join(sorted(hash_content))
).digest()
).lower()
hashlib.sha256(bytes().join(
sorted(hash_content))).digest()).lower()
# convert from bytes if running python 3
if sys.version_info[0] >= 3:
@@ -1959,33 +1926,6 @@ def cache_extra_test_sources(self, srcs):
fsys.mkdirp(os.path.dirname(dest_path))
fsys.copy(src_path, dest_path)
@contextlib.contextmanager
def _setup_test(self, verbose, externals):
self.test_failures = []
if self.test_suite:
self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
pkg_id = self.test_suite.test_pkg_id(self.spec)
else:
self.test_log_file = fsys.join_path(
self.stage.path, _spack_install_test_log)
pkg_id = self.spec.format('{name}-{version}-{hash:7}')
fsys.touch(self.test_log_file) # Otherwise log_parse complains
with tty.log.log_output(self.test_log_file, verbose) as logger:
with logger.force_echo():
tty.msg('Testing package {0}'.format(pkg_id))
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
try:
yield logger
finally:
# reset debug level
tty.set_debug(old_debug)
def do_test(self, dirty=False, externals=False):
if self.test_requires_compiler:
compilers = spack.compilers.compilers_for_spec(
@@ -1997,14 +1937,19 @@ def do_test(self, dirty=False, externals=False):
self.spec.compiler)
return
# Clear test failures
self.test_failures = []
self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
fsys.touch(self.test_log_file) # Otherwise log_parse complains
kwargs = {
'dirty': dirty, 'fake': False, 'context': 'test',
'externals': externals
}
if tty.is_verbose():
kwargs['verbose'] = True
spack.build_environment.start_build_process(
self, test_process, kwargs)
spack.build_environment.start_build_process(self, test_process, kwargs)
def test(self):
# Defer tests to virtual and concrete packages
@@ -2198,21 +2143,21 @@ def build_log_path(self):
to the staging build file until the software is successfully installed,
when it points to the file in the installation directory.
"""
return self.install_log_path if self.spec.installed else self.log_path
return self.install_log_path if self.installed else self.log_path
@classmethod
def inject_flags(cls, name, flags):
"""
flag_handler that injects all flags through the compiler wrapper.
"""
return flags, None, None
return (flags, None, None)
@classmethod
def env_flags(cls, name, flags):
"""
flag_handler that adds all flags to canonical environment variables.
"""
return None, flags, None
return (None, flags, None)
@classmethod
def build_system_flags(cls, name, flags):
@@ -2223,7 +2168,7 @@ def build_system_flags(cls, name, flags):
implements it. Currently, AutotoolsPackage and CMakePackage
implement it.
"""
return None, None, flags
return (None, None, flags)
def setup_build_environment(self, env):
"""Sets up the build environment for a package.
@@ -2378,11 +2323,7 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
if not force:
dependents = spack.store.db.installed_relatives(
spec,
direction='parents',
transitive=True,
deptype=("link", "run"),
)
spec, 'parents', True)
if dependents:
raise PackageStillNeededError(spec, dependents)
@@ -2524,10 +2465,10 @@ def _sanity_check_extension(self):
extendee_package = self.extendee_spec.package
extendee_package._check_extendable()
if not self.extendee_spec.installed:
if not extendee_package.installed:
raise ActivationError(
"Can only (de)activate extensions for installed packages.")
if not self.spec.installed:
if not self.installed:
raise ActivationError("Extensions must first be installed.")
if self.extendee_spec.name not in self.extendees:
raise ActivationError("%s does not extend %s!" %
@@ -2753,54 +2694,45 @@ def rpath_args(self):
"""
return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
def _run_test_callbacks(self, method_names, callback_type='install'):
"""Tries to call all of the listed methods, returning immediately
if the list is None."""
if method_names is None:
return
fail_fast = spack.config.get('config:fail_fast', False)
with self._setup_test(verbose=False, externals=False) as logger:
# Report running each of the methods in the build log
print_test_message(
logger, 'Running {0}-time tests'.format(callback_type), True)
for name in method_names:
try:
fn = getattr(self, name)
msg = 'RUN-TESTS: {0}-time tests [{1}]' \
.format(callback_type, name),
print_test_message(logger, msg, True)
fn()
except AttributeError as e:
msg = 'RUN-TESTS: method not implemented [{0}]' \
.format(name),
print_test_message(logger, msg, True)
self.test_failures.append((e, msg))
if fail_fast:
break
# Raise any collected failures here
if self.test_failures:
raise TestFailure(self.test_failures)
@on_package_attributes(run_tests=True)
def _run_default_build_time_test_callbacks(self):
"""Tries to call all the methods that are listed in the attribute
``build_time_test_callbacks`` if ``self.run_tests is True``.
If ``build_time_test_callbacks is None`` returns immediately.
"""
self._run_test_callbacks(self.build_time_test_callbacks, 'build')
if self.build_time_test_callbacks is None:
return
for name in self.build_time_test_callbacks:
try:
fn = getattr(self, name)
except AttributeError:
msg = 'RUN-TESTS: method not implemented [{0}]'
tty.warn(msg.format(name))
else:
tty.msg('RUN-TESTS: build-time tests [{0}]'.format(name))
fn()
@on_package_attributes(run_tests=True)
def _run_default_install_time_test_callbacks(self):
"""Tries to call all the methods that are listed in the attribute
``install_time_test_callbacks`` if ``self.run_tests is True``.
If ``install_time_test_callbacks is None`` returns immediately.
"""
self._run_test_callbacks(self.install_time_test_callbacks, 'install')
if self.install_time_test_callbacks is None:
return
for name in self.install_time_test_callbacks:
try:
fn = getattr(self, name)
except AttributeError:
msg = 'RUN-TESTS: method not implemented [{0}]'
tty.warn(msg.format(name))
else:
tty.msg('RUN-TESTS: install-time tests [{0}]'.format(name))
fn()
def has_test_method(pkg):
@@ -2825,21 +2757,27 @@ def has_test_method(pkg):
def print_test_message(logger, msg, verbose):
if verbose:
with logger.force_echo():
tty.msg(msg)
print(msg)
else:
tty.msg(msg)
print(msg)
def test_process(pkg, kwargs):
verbose = kwargs.get('verbose', False)
externals = kwargs.get('externals', False)
with tty.log.log_output(pkg.test_log_file, verbose) as logger:
with logger.force_echo():
tty.msg('Testing package {0}'
.format(pkg.test_suite.test_pkg_id(pkg.spec)))
with pkg._setup_test(verbose, externals) as logger:
if pkg.spec.external and not externals:
print_test_message(
logger, 'Skipped tests for external package', verbose)
print_test_message(logger, 'Skipped external package', verbose)
return
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
# run test methods from the package and all virtuals it
# provides virtuals have to be deduped by name
v_names = list(set([vspec.name
@@ -2858,7 +2796,8 @@ def test_process(pkg, kwargs):
ran_actual_test_function = False
try:
with fsys.working_dir(pkg.test_suite.test_dir_for_spec(pkg.spec)):
with fsys.working_dir(
pkg.test_suite.test_dir_for_spec(pkg.spec)):
for spec in test_specs:
pkg.test_suite.current_test_spec = spec
# Fail gracefully if a virtual has no package/tests
@@ -2900,9 +2839,7 @@ def test_process(pkg, kwargs):
# Run the tests
ran_actual_test_function = True
context = logger.force_echo if verbose else nullcontext
with context():
test_fn(pkg)
test_fn(pkg)
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
@@ -2910,12 +2847,15 @@ def test_process(pkg, kwargs):
raise TestFailure(pkg.test_failures)
finally:
# reset debug level
tty.set_debug(old_debug)
# flag the package as having been tested (i.e., ran one or more
# non-pass-only methods
if ran_actual_test_function:
fsys.touch(pkg.tested_file)
else:
print_test_message(logger, 'No tests to run', verbose)
print_test_message(logger, 'No tests to run', verbose)
inject_flags = PackageBase.inject_flags

View File

@@ -123,11 +123,11 @@ def accept(self, id):
def next_token_error(self, message):
"""Raise an error about the next token in the stream."""
raise ParseError(message, self.text[0], self.token.end)
raise ParseError(message, self.text, self.token.end)
def last_token_error(self, message):
"""Raise an error about the previous token in the stream."""
raise ParseError(message, self.text[0], self.token.start)
raise ParseError(message, self.text, self.token.start)
def unexpected_token(self):
self.next_token_error("Unexpected token: '%s'" % self.next.value)

View File

@@ -43,12 +43,8 @@
hooks_path = os.path.join(module_path, "hooks")
opt_path = os.path.join(prefix, "opt")
share_path = os.path.join(prefix, "share", "spack")
etc_path = os.path.join(prefix, "etc", "spack")
etc_path = os.path.join(prefix, "etc")
#
# Things in $spack/etc/spack
#
default_license_dir = os.path.join(etc_path, "licenses")
#
# Things in $spack/var/spack

View File

@@ -25,7 +25,6 @@
from spack.build_systems.cuda import CudaPackage
from spack.build_systems.gnu import GNUMirrorPackage
from spack.build_systems.intel import IntelPackage
from spack.build_systems.lua import LuaPackage
from spack.build_systems.makefile import MakefilePackage
from spack.build_systems.maven import MavenPackage
from spack.build_systems.meson import MesonPackage

View File

@@ -873,7 +873,7 @@ def is_relocatable(spec):
Raises:
ValueError: if the spec is not installed
"""
if not spec.install_status():
if not spec.installed():
raise ValueError('spec is not installed [{0}]'.format(str(spec)))
if spec.external or spec.virtual:

View File

@@ -355,17 +355,9 @@ def list_packages(rev):
ref = rev.replace('...', '')
rev = git('merge-base', ref, 'HEAD', output=str).strip()
output = git('ls-tree', '-r', '--name-only', rev, output=str)
# recursively list the packages directory
package_paths = [
line.split(os.sep) for line in output.split("\n") if line.endswith("package.py")
]
# take the directory names with one-level-deep package files
package_names = sorted(set([line[0] for line in package_paths if len(line) == 2]))
return package_names
output = git('ls-tree', '--name-only', rev, output=str)
return sorted(line for line in output.split('\n')
if line and not line.startswith('.'))
def diff_packages(rev1, rev2):

View File

@@ -112,7 +112,8 @@ def __enter__(self):
# Check which specs are already installed and mark them as skipped
# only for install_task
if self.do_fn == '_install_task':
for dep in filter(lambda x: x.installed, input_spec.traverse()):
for dep in filter(lambda x: x.package.installed,
input_spec.traverse()):
package = {
'name': dep.name,
'id': dep.dag_hash(),
@@ -139,7 +140,7 @@ def wrapper(instance, *args, **kwargs):
raise Exception
# We accounted before for what is already installed
installed_already = pkg.spec.installed
installed_already = pkg.installed
package = {
'name': pkg.name,

View File

@@ -38,13 +38,13 @@ def rewire(spliced_spec):
nodes in the DAG of that spec."""
assert spliced_spec.spliced
for spec in spliced_spec.traverse(order='post', root=True):
if not spec.build_spec.installed:
if not spec.build_spec.package.installed:
# TODO: May want to change this at least for the root spec...
# spec.build_spec.package.do_install(force=True)
raise PackageNotInstalledError(spliced_spec,
spec.build_spec,
spec)
if spec.build_spec is not spec and not spec.installed:
if spec.build_spec is not spec and not spec.package.installed:
explicit = spec is spliced_spec
rewire_node(spec, explicit)
@@ -95,8 +95,7 @@ def rewire_node(spec, explicit):
spec.prefix)
relocate.relocate_text_bin(binaries=bins_to_relocate,
prefixes=prefix_to_prefix)
# Copy package into place, except for spec.json (because spec.json
# describes the old spec and not the new spliced spec).
# copy package into place (shutil.copytree)
shutil.copytree(os.path.join(tempdir, spec.dag_hash()), spec.prefix,
ignore=shutil.ignore_patterns('spec.json',
'install_manifest.json'))
@@ -105,10 +104,7 @@ def rewire_node(spec, explicit):
spec.build_spec.prefix,
spec.prefix)
shutil.rmtree(tempdir)
# Above, we did not copy spec.json: instead, here we write the new
# (spliced) spec into spec.json, without this, Database.add would fail on
# the next line (because it checks the spec.json in the prefix against the
# spec being added to look for mismatches)
# handle all metadata changes; don't copy over spec.json file in .spack/
spack.store.layout.write_spec(spec, spack.store.layout.spec_file_path(spec))
# add to database, not sure about explicit
spack.store.db.add(spec, spack.store.layout, explicit=explicit)

View File

@@ -3,7 +3,7 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from io import BufferedReader, IOBase
from io import BufferedReader
import six
import six.moves.urllib.error as urllib_error
@@ -23,15 +23,11 @@
# https://github.com/python/cpython/pull/3249
class WrapStream(BufferedReader):
def __init__(self, raw):
# In botocore >=1.23.47, StreamingBody inherits from IOBase, so we
# only add missing attributes in older versions.
# https://github.com/boto/botocore/commit/a624815eabac50442ed7404f3c4f2664cd0aa784
if not isinstance(raw, IOBase):
raw.readable = lambda: True
raw.writable = lambda: False
raw.seekable = lambda: False
raw.closed = False
raw.flush = lambda: None
raw.readable = lambda: True
raw.writable = lambda: False
raw.seekable = lambda: False
raw.closed = False
raw.flush = lambda: None
super(WrapStream, self).__init__(raw)
def detach(self):

View File

@@ -4,8 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains jsonschema files for all of Spack's YAML formats."""
import warnings
import six
import llnl.util.lang
@@ -51,12 +49,10 @@ def _deprecated_properties(validator, deprecated, instance, schema):
msg = msg_str_or_func.format(properties=deprecated_properties)
else:
msg = msg_str_or_func(instance, deprecated_properties)
if msg is None:
return
is_error = deprecated['error']
if not is_error:
warnings.warn(msg)
llnl.util.tty.warn(msg)
else:
import jsonschema
yield jsonschema.ValidationError(msg)

View File

@@ -9,10 +9,12 @@
'type': 'object',
'properties': {
'name': {'type': 'string'},
'metadata': {'type': 'string'}
'description': {'type': 'string'},
'type': {'type': 'string'},
'info': {'type': 'object'}
},
'additionalProperties': False,
'required': ['name', 'metadata']
'required': ['name', 'description', 'type']
}
properties = {

View File

@@ -37,6 +37,5 @@
'hash': {'type': 'string'},
},
},
'buildcache_layout_version': {'type': 'number'}
},
}

Some files were not shown because too many files have changed in this diff Show More