Compare commits
1 Commits
minimal-co
...
features/r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
771c4e4017 |
134
.github/workflows/bootstrap.yml
vendored
134
.github/workflows/bootstrap.yml
vendored
@@ -24,7 +24,6 @@ jobs:
|
||||
fedora-clingo-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "fedora:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -32,20 +31,14 @@ jobs:
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison bison-devel libstdc++-static
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup non-root user
|
||||
run: |
|
||||
# See [1] below
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
useradd spack-test && mkdir -p ~spack-test
|
||||
chown -R spack-test . ~spack-test
|
||||
- name: Setup repo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap clingo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
@@ -58,7 +51,6 @@ jobs:
|
||||
ubuntu-clingo-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
@@ -69,20 +61,22 @@ jobs:
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup non-root user
|
||||
- name: Work around CVE-2022-24765
|
||||
run: |
|
||||
# See [1] below
|
||||
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
|
||||
# a breaking behavior. See:
|
||||
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
|
||||
# - https://github.com/actions/checkout/issues/760
|
||||
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
useradd spack-test && mkdir -p ~spack-test
|
||||
chown -R spack-test . ~spack-test
|
||||
- name: Setup repo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap clingo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
@@ -95,7 +89,6 @@ jobs:
|
||||
ubuntu-clingo-binaries-and-patchelf:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
@@ -105,20 +98,22 @@ jobs:
|
||||
apt-get install -y \
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup non-root user
|
||||
- name: Work around CVE-2022-24765
|
||||
run: |
|
||||
# See [1] below
|
||||
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
|
||||
# a breaking behavior. See:
|
||||
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
|
||||
# - https://github.com/actions/checkout/issues/760
|
||||
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
useradd spack-test && mkdir -p ~spack-test
|
||||
chown -R spack-test . ~spack-test
|
||||
- name: Setup repo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap clingo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
@@ -126,10 +121,10 @@ jobs:
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
|
||||
opensuse-clingo-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "opensuse/leap:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -139,12 +134,9 @@ jobs:
|
||||
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup repo
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
# See [1] below
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
@@ -158,13 +150,11 @@ jobs:
|
||||
|
||||
macos-clingo-sources:
|
||||
runs-on: macos-latest
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install cmake bison@2.7 tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
@@ -179,14 +169,12 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Bootstrap clingo
|
||||
@@ -201,14 +189,12 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Setup repo
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
@@ -223,7 +209,6 @@ jobs:
|
||||
ubuntu-gnupg-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
@@ -233,20 +218,22 @@ jobs:
|
||||
apt-get install -y \
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup non-root user
|
||||
- name: Work around CVE-2022-24765
|
||||
run: |
|
||||
# See [1] below
|
||||
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
|
||||
# a breaking behavior. See:
|
||||
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
|
||||
# - https://github.com/actions/checkout/issues/760
|
||||
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
useradd spack-test && mkdir -p ~spack-test
|
||||
chown -R spack-test . ~spack-test
|
||||
- name: Setup repo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap GnuPG
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
@@ -258,7 +245,6 @@ jobs:
|
||||
ubuntu-gnupg-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
@@ -269,20 +255,22 @@ jobs:
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
gawk
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Setup non-root user
|
||||
- name: Work around CVE-2022-24765
|
||||
run: |
|
||||
# See [1] below
|
||||
# Apparently Ubuntu patched git v2.25.1 with a security patch that introduces
|
||||
# a breaking behavior. See:
|
||||
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
|
||||
# - https://github.com/actions/checkout/issues/760
|
||||
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
|
||||
git config --global --add safe.directory /__w/spack/spack
|
||||
useradd spack-test && mkdir -p ~spack-test
|
||||
chown -R spack-test . ~spack-test
|
||||
- name: Setup repo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap GnuPG
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
@@ -294,15 +282,13 @@ jobs:
|
||||
|
||||
macos-gnupg-binaries:
|
||||
runs-on: macos-latest
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
@@ -312,15 +298,13 @@ jobs:
|
||||
|
||||
macos-gnupg-sources:
|
||||
runs-on: macos-latest
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install gawk tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
@@ -328,11 +312,3 @@ jobs:
|
||||
spack bootstrap untrust github-actions-v0.2
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
|
||||
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
|
||||
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
|
||||
# See:
|
||||
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
|
||||
# - https://github.com/actions/checkout/issues/760
|
||||
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog
|
||||
|
||||
17
.github/workflows/build-containers.yml
vendored
17
.github/workflows/build-containers.yml
vendored
@@ -43,10 +43,9 @@ jobs:
|
||||
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
|
||||
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04']]
|
||||
name: Build ${{ matrix.dockerfile[0] }}
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
|
||||
- name: Set Container Tag Normal (Nightly)
|
||||
run: |
|
||||
@@ -76,33 +75,33 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload Dockerfile
|
||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
|
||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535
|
||||
with:
|
||||
name: dockerfiles
|
||||
path: dockerfiles
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # @v1
|
||||
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # @v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # @v1
|
||||
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # @v1
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
|
||||
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # @v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # @v2
|
||||
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # @v2
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
|
||||
9
.github/workflows/macos_python.yml
vendored
9
.github/workflows/macos_python.yml
vendored
@@ -22,10 +22,9 @@ on:
|
||||
jobs:
|
||||
install_gcc:
|
||||
name: gcc with clang
|
||||
if: github.repository == 'spack/spack'
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
@@ -37,11 +36,10 @@ jobs:
|
||||
|
||||
install_jupyter_clang:
|
||||
name: jupyter
|
||||
if: github.repository == 'spack/spack'
|
||||
runs-on: macos-latest
|
||||
timeout-minutes: 700
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
@@ -52,10 +50,9 @@ jobs:
|
||||
|
||||
install_scipy_clang:
|
||||
name: scipy, mpl, pd
|
||||
if: github.repository == 'spack/spack'
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
28
.github/workflows/unit_tests.yaml
vendored
28
.github/workflows/unit_tests.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
- python-version: 3.9
|
||||
concretizer: original
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
@@ -162,7 +162,7 @@ jobs:
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
|
||||
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
@@ -171,7 +171,7 @@ jobs:
|
||||
needs: [ validate, style, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
@@ -200,7 +200,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
|
||||
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
@@ -218,7 +218,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
needs: [ validate, style, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
@@ -274,7 +274,7 @@ jobs:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
|
||||
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
@@ -286,7 +286,7 @@ jobs:
|
||||
matrix:
|
||||
python-version: [3.8]
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
@@ -320,7 +320,7 @@ jobs:
|
||||
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
|
||||
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
|
||||
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
@@ -331,7 +331,7 @@ jobs:
|
||||
needs: [ validate, style, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
|
||||
- uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # @v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
@@ -350,7 +350,7 @@ jobs:
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) audit packages
|
||||
- uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # @v2.1.0
|
||||
- uses: codecov/codecov-action@e3c560433a6cc60aec8812599b7844a7b4fa0d71 # @v2.1.0
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,audits
|
||||
|
||||
16
.github/workflows/windows_python.yml
vendored
16
.github/workflows/windows_python.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
validate:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
with:
|
||||
python-version: 3.9
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
style:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
needs: [ validate, style ]
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
needs: [ validate, style ]
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
needs: [ validate, style ]
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
git config --global core.symlinks false
|
||||
shell:
|
||||
powershell
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
|
||||
@@ -139,11 +139,11 @@ jobs:
|
||||
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
env:
|
||||
ProgressPreference: SilentlyContinue
|
||||
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Windows Spack Installer Bundle
|
||||
path: ${{ env.installer_root }}\pkg\Spack.exe
|
||||
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Windows Spack Installer
|
||||
path: ${{ env.installer_root}}\pkg\Spack.msi
|
||||
|
||||
@@ -14,39 +14,4 @@ concretizer:
|
||||
# concretizing specs. If `true`, we'll try to use as many installs/binaries
|
||||
# as possible, rather than building. If `false`, we'll always give you a fresh
|
||||
# concretization.
|
||||
reuse: true
|
||||
|
||||
# If `true`, Spack will consider minimizing builds its *topmost* priority.
|
||||
# Note that this can result in weird package configurations. In particular,
|
||||
# Spack will disable variants and might downgrade versions to avoid building
|
||||
# new packages for an install. By default, Spack respects defaults from
|
||||
# packages and preferences *before* minimizing the number of builds.
|
||||
#
|
||||
# Example for intuition: `cmake` can optionally build without openssl, but
|
||||
# it's enabled by default because many builds use that functionality. Using
|
||||
# `minimal: true` will build `cmake~openssl` unless the user asks for
|
||||
# `cmake+openssl` explicitly.
|
||||
minimal: false
|
||||
|
||||
# Options that tune which targets are considered for concretization. The
|
||||
# concretization process is very sensitive to the number targets, and the time
|
||||
# needed to reach a solution increases noticeably with the number of targets
|
||||
# considered.
|
||||
targets:
|
||||
|
||||
# Determine whether we want to target specific or generic microarchitectures.
|
||||
# An example of the first kind might be for instance "skylake" or "bulldozer",
|
||||
# while generic microarchitectures are for instance "aarch64" or "x86_64_v4".
|
||||
granularity: microarchitectures
|
||||
|
||||
# If "false" allow targets that are incompatible with the current host (for
|
||||
# instance concretize with target "icelake" while running on "haswell").
|
||||
# If "true" only allow targets that are compatible with the host.
|
||||
host_compatible: true
|
||||
|
||||
# When "true" concretize root specs of environments together, so that each unique
|
||||
# package in an environment corresponds to one concrete spec. This ensures
|
||||
# environments can always be activated. When "false" perform concretization separately
|
||||
# on each root spec, allowing different versions and variants of the same package in
|
||||
# an environment.
|
||||
unify: false
|
||||
reuse: false
|
||||
|
||||
@@ -33,9 +33,6 @@ config:
|
||||
template_dirs:
|
||||
- $spack/share/spack/templates
|
||||
|
||||
# Directory where licenses should be located
|
||||
license_dir: $spack/etc/spack/licenses
|
||||
|
||||
# Temporary locations Spack can try to use for builds.
|
||||
#
|
||||
# Recommended options are given below.
|
||||
|
||||
@@ -35,8 +35,7 @@ packages:
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
libllvm: [llvm, llvm-amdgpu]
|
||||
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
|
||||
luajit: [lua-luajit-openresty, lua-luajit]
|
||||
lua-lang: [lua, lua-luajit]
|
||||
mariadb-client: [mariadb-c-client, mariadb]
|
||||
mkl: [intel-mkl]
|
||||
mpe: [mpe2]
|
||||
|
||||
@@ -192,32 +192,32 @@ you can use them to customize an installation in :ref:`sec-specs`.
|
||||
Reusing installed dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, when you run ``spack install``, Spack tries hard to reuse existing installations
|
||||
as dependencies, either from a local store or from remote buildcaches if configured.
|
||||
This minimizes unwanted rebuilds of common dependencies, in particular if
|
||||
you update Spack frequently.
|
||||
.. warning::
|
||||
|
||||
In case you want the latest versions and configurations to be installed instead,
|
||||
you can add the ``--fresh`` option:
|
||||
The ``--reuse`` option described here will become the default installation
|
||||
method in the next Spack version, and you will be able to get the current
|
||||
behavior by using ``spack install --fresh``.
|
||||
|
||||
By default, when you run ``spack install``, Spack tries to build a new
|
||||
version of the package you asked for, along with updated versions of
|
||||
its dependencies. This gets you the latest versions and configurations,
|
||||
but it can result in unwanted rebuilds if you update Spack frequently.
|
||||
|
||||
If you want Spack to try hard to reuse existing installations as dependencies,
|
||||
you can add the ``--reuse`` option:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --fresh mpich
|
||||
$ spack install --reuse mpich
|
||||
|
||||
Reusing installations in this mode is "accidental", and happening only if
|
||||
there's a match between existing installations and what Spack would have installed
|
||||
anyhow.
|
||||
|
||||
You can use the ``spack spec -I mpich`` command to see what
|
||||
This will not do anything if ``mpich`` is already installed. If ``mpich``
|
||||
is not installed, but dependencies like ``hwloc`` and ``libfabric`` are,
|
||||
the ``mpich`` will be build with the installed versions, if possible.
|
||||
You can use the :ref:`spack spec -I <cmd-spack-spec>` command to see what
|
||||
will be reused and what will be built before you install.
|
||||
|
||||
You can configure Spack to use the ``--fresh`` behavior by default in
|
||||
``concretizer.yaml``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
reuse: false
|
||||
You can configure Spack to use the ``--reuse`` behavior by default in
|
||||
``concretizer.yaml``.
|
||||
|
||||
.. _cmd-spack-uninstall:
|
||||
|
||||
|
||||
@@ -219,65 +219,33 @@ Concretizer options
|
||||
but you can also use ``concretizer.yaml`` to customize aspects of the
|
||||
algorithm it uses to select the dependencies you install:
|
||||
|
||||
.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml
|
||||
:language: yaml
|
||||
.. _code-block: yaml
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Reuse already installed packages
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
concretizer:
|
||||
# Whether to consider installed packages or packages from buildcaches when
|
||||
# concretizing specs. If `true`, we'll try to use as many installs/binaries
|
||||
# as possible, rather than building. If `false`, we'll always give you a fresh
|
||||
# concretization.
|
||||
reuse: false
|
||||
|
||||
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
|
||||
^^^^^^^^^^^^^^^^
|
||||
``reuse``
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
This controls whether Spack will prefer to use installed packages (``true``), or
|
||||
whether it will do a "fresh" installation and prefer the latest settings from
|
||||
``package.py`` files and ``packages.yaml`` (``false``).
|
||||
You can use:
|
||||
``package.py`` files and ``packages.yaml`` (``false``). .
|
||||
|
||||
.. code-block:: console
|
||||
You can use ``spack install --reuse`` to enable reuse for a single installation,
|
||||
and you can use ``spack install --fresh`` to do a fresh install if ``reuse`` is
|
||||
enabled by default.
|
||||
|
||||
% spack install --reuse <spec>
|
||||
.. note::
|
||||
|
||||
to enable reuse for a single installation, and you can use:
|
||||
``reuse: false`` is the current default, but ``reuse: true`` will be the default
|
||||
in the next Spack release. You will still be able to use ``spack install --fresh``
|
||||
to get the old behavior.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
spack install --fresh <spec>
|
||||
|
||||
to do a fresh install if ``reuse`` is enabled by default.
|
||||
``reuse: true`` is the default.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Selection of the target microarchitectures
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The options under the ``targets`` attribute control which targets are considered during a solve.
|
||||
Currently the options in this section are only configurable from the ``concretization.yaml`` file
|
||||
and there are no corresponding command line arguments to enable them for a single solve.
|
||||
|
||||
The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``.
|
||||
If set to:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
targets:
|
||||
granularity: microarchitectures
|
||||
|
||||
Spack will consider all the microarchitectures known to ``archspec`` to label nodes for
|
||||
compatibility. If instead the option is set to:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
targets:
|
||||
granularity: generic
|
||||
|
||||
Spack will consider only generic microarchitectures. For instance, when running on an
|
||||
Haswell node, Spack will consider ``haswell`` as the best target in the former case and
|
||||
``x86_64_v3`` as the best target in the latter case.
|
||||
|
||||
The ``host_compatible`` option is a Boolean option that determines whether or not the
|
||||
microarchitectures considered during the solve are constrained to be compatible with the
|
||||
host Spack is currently running on. For instance, if this option is set to ``true``, a
|
||||
user cannot concretize for ``target=icelake`` while running on an Haswell node.
|
||||
|
||||
.. _package-preferences:
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@ on these ideas for each distinct build system that Spack supports:
|
||||
|
||||
build_systems/autotoolspackage
|
||||
build_systems/cmakepackage
|
||||
build_systems/cachedcmakepackage
|
||||
build_systems/mesonpackage
|
||||
build_systems/qmakepackage
|
||||
build_systems/sippackage
|
||||
@@ -48,7 +47,6 @@ on these ideas for each distinct build system that Spack supports:
|
||||
:maxdepth: 1
|
||||
:caption: Language-specific
|
||||
|
||||
build_systems/luapackage
|
||||
build_systems/octavepackage
|
||||
build_systems/perlpackage
|
||||
build_systems/pythonpackage
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _cachedcmakepackage:
|
||||
|
||||
------------------
|
||||
CachedCMakePackage
|
||||
------------------
|
||||
|
||||
The CachedCMakePackage base class is used for CMake-based workflows
|
||||
that create a CMake cache file prior to running ``cmake``. This is
|
||||
useful for packages with arguments longer than the system limit, and
|
||||
for reproducibility.
|
||||
|
||||
The documentation for this class assumes that the user is familiar with
|
||||
the ``CMakePackage`` class from which it inherits. See the documentation
|
||||
for :ref:`CMakePackage <cmakepackage>`.
|
||||
|
||||
^^^^^^
|
||||
Phases
|
||||
^^^^^^
|
||||
|
||||
The ``CachedCMakePackage`` base class comes with the following phases:
|
||||
|
||||
#. ``initconfig`` - generate the CMake cache file
|
||||
#. ``cmake`` - generate the Makefile
|
||||
#. ``build`` - build the package
|
||||
#. ``install`` - install the package
|
||||
|
||||
By default, these phases run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir spack-build
|
||||
$ cd spack-build
|
||||
$ cat << EOF > name-arch-compiler@version.cmake
|
||||
# Write information on compilers and dependencies
|
||||
# includes information on mpi and cuda if applicable
|
||||
$ cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/installation/prefix -C name-arch-compiler@version.cmake
|
||||
$ make
|
||||
$ make test # optional
|
||||
$ make install
|
||||
|
||||
The ``CachedCMakePackage`` class inherits from the ``CMakePackage``
|
||||
class, and accepts all of the same options and adds all of the same
|
||||
flags to the ``cmake`` command. Similar to the ``CMakePAckage`` class,
|
||||
you may need to add a few arguments yourself, and the
|
||||
``CachedCMakePackage`` provides the same interface to add those
|
||||
flags.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Adding entries to the CMake cache
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In addition to adding flags to the ``cmake`` command, you may need to
|
||||
add entries to the CMake cache in the ``initconfig`` phase. This can
|
||||
be done by overriding one of four methods:
|
||||
|
||||
#. ``CachedCMakePackage.initconfig_compiler_entries``
|
||||
#. ``CachedCMakePackage.initconfig_mpi_entries``
|
||||
#. ``CachedCMakePackage.initconfig_hardware_entries``
|
||||
#. ``CachedCMakePackage.initconfig_package_entries``
|
||||
|
||||
Each of these methods returns a list of CMake cache strings. The
|
||||
distinction between these methods is merely to provide a
|
||||
well-structured and legible cmake cache file -- otherwise, entries
|
||||
from each of these methods are handled identically.
|
||||
|
||||
Spack also provides convenience methods for generating CMake cache
|
||||
entries. These methods are available at module scope in every Spack
|
||||
package. Because CMake parses boolean options, strings, and paths
|
||||
differently, there are three such methods:
|
||||
|
||||
#. ``cmake_cache_option``
|
||||
#. ``cmake_cache_string``
|
||||
#. ``cmake_cache_path``
|
||||
|
||||
These methods each accept three parameters -- the name of the CMake
|
||||
variable associated with the entry, the value of the entry, and an
|
||||
optional comment -- and return strings in the appropriate format to be
|
||||
returned from any of the ``initconfig*`` methods. Additionally, these
|
||||
methods may return comments beginning with the ``#`` character.
|
||||
|
||||
A typical usage of these methods may look something like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def initconfig_mpi_entries(self)
|
||||
# Get existing MPI configurations
|
||||
entries = super(self, Foo).initconfig_mpi_entries()
|
||||
|
||||
# The existing MPI configurations key on whether ``mpi`` is in the spec
|
||||
# This spec has an MPI variant, and we need to enable MPI when it is on.
|
||||
# This hypothetical package controls MPI with the ``FOO_MPI`` option to
|
||||
# cmake.
|
||||
if '+mpi' in self.spec:
|
||||
entries.append(cmake_cache_option('FOO_MPI', True, "enable mpi"))
|
||||
else:
|
||||
entries.append(cmake_cache_option('FOO_MPI', False, "disable mpi"))
|
||||
|
||||
def initconfig_package_entries(self):
|
||||
# Package specific options
|
||||
entries = []
|
||||
|
||||
entries.append('#Entries for build options')
|
||||
|
||||
bar_on = '+bar' in self.spec
|
||||
entries.append(cmake_cache_option('FOO_BAR', bar_on, 'toggle bar'))
|
||||
|
||||
entries.append('#Entries for dependencies')
|
||||
|
||||
if self.spec['blas'].name == 'baz': # baz is our blas provider
|
||||
entries.append(cmake_cache_string('FOO_BLAS', 'baz', 'Use baz'))
|
||||
entries.append(cmake_cache_path('BAZ_PREFIX', self.spec['baz'].prefix))
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For more information on CMake cache files, see:
|
||||
https://cmake.org/cmake/help/latest/manual/cmake.1.html
|
||||
@@ -1,105 +0,0 @@
|
||||
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _luapackage:
|
||||
|
||||
------------
|
||||
LuaPackage
|
||||
------------
|
||||
|
||||
LuaPackage is a helper for the common case of Lua packages that provide
|
||||
a rockspec file. This is not meant to take a rock archive, but to build
|
||||
a source archive or repository that provides a rockspec, which should cover
|
||||
most lua packages. In the case a Lua package builds by Make rather than
|
||||
luarocks, prefer MakefilePackage.
|
||||
|
||||
^^^^^^
|
||||
Phases
|
||||
^^^^^^
|
||||
|
||||
The ``LuaPackage`` base class comes with the following phases:
|
||||
|
||||
#. ``unpack`` - if using a rock, unpacks the rock and moves into the source directory
|
||||
#. ``preprocess`` - adjust sources or rockspec to fix build
|
||||
#. ``install`` - install the project
|
||||
|
||||
By default, these phases run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# If the archive is a source rock
|
||||
$ luarocks unpack <archive>.src.rock
|
||||
$ # preprocess is a noop by default
|
||||
$ luarocks make <name>.rockspec
|
||||
|
||||
|
||||
Any of these phases can be overridden in your package as necessary.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Important files
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Packages that use the Lua/LuaRocks build system can be identified by the
|
||||
presence of a ``*.rockspec`` file in their sourcetree, or can be fetched as
|
||||
a source rock archive (``.src.rock``). This file declares things like build
|
||||
instructions and dependencies, the ``.src.rock`` also contains all code.
|
||||
|
||||
It is common for the rockspec file to list the lua version required in
|
||||
a dependency. The LuaPackage class adds appropriate dependencies on a Lua
|
||||
implementation, but it is a good idea to specify the version required with
|
||||
a ``depends_on`` statement. The block normally will be a table definition like
|
||||
this:
|
||||
|
||||
.. code-block:: lua
|
||||
|
||||
dependencies = {
|
||||
"lua >= 5.1",
|
||||
}
|
||||
|
||||
The LuaPackage class supports source repositories and archives containing
|
||||
a rockspec and directly downloading source rock files. It *does not* support
|
||||
downloading dependencies listed inside a rockspec, and thus does not support
|
||||
directly downloading a rockspec as an archive.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build system dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
All base dependencies are added by the build system, but LuaRocks is run to
|
||||
avoid downloading extra Lua dependencies during build. If the package needs
|
||||
Lua libraries outside the standard set, they should be added as dependencies.
|
||||
|
||||
To specify a Lua version constraint but allow all lua implementations, prefer
|
||||
to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible
|
||||
version. If the package requires LuaJit rather than Lua,
|
||||
a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is
|
||||
used instead of the Lua interpreter. Alternately, if only interpreted Lua will
|
||||
work ``depends_on("lua")`` will express that.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Passing arguments to luarocks make
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you need to pass any arguments to the ``luarocks make`` call, you can
|
||||
override the ``luarocks_args`` method like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def luarocks_args(self):
|
||||
return ['flag1', 'flag2']
|
||||
|
||||
One common use of this is to override warnings or flags for newer compilers, as in:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def luarocks_args(self):
|
||||
return ["CFLAGS='-Wno-error=implicit-function-declaration'"]
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For more information on the LuaRocks build system, see:
|
||||
https://luarocks.org/
|
||||
@@ -95,7 +95,7 @@ class of your package. For example, you can add it to your
|
||||
# Set up the hip macros needed by the build
|
||||
args.extend([
|
||||
'-DENABLE_HIP=ON',
|
||||
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix)])
|
||||
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
|
||||
rocm_archs = spec.variants['amdgpu_target'].value
|
||||
if 'none' not in rocm_archs:
|
||||
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
|
||||
|
||||
@@ -23,10 +23,7 @@
|
||||
import sys
|
||||
from glob import glob
|
||||
|
||||
from docutils.statemachine import StringList
|
||||
from sphinx.domains.python import PythonDomain
|
||||
from sphinx.ext.apidoc import main as sphinx_apidoc
|
||||
from sphinx.parsers import RSTParser
|
||||
|
||||
# -- Spack customizations -----------------------------------------------------
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
@@ -85,6 +82,9 @@
|
||||
#
|
||||
# Disable duplicate cross-reference warnings.
|
||||
#
|
||||
from sphinx.domains.python import PythonDomain
|
||||
|
||||
|
||||
class PatchedPythonDomain(PythonDomain):
|
||||
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
|
||||
if 'refspecific' in node:
|
||||
@@ -92,20 +92,8 @@ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
|
||||
return super(PatchedPythonDomain, self).resolve_xref(
|
||||
env, fromdocname, builder, typ, target, node, contnode)
|
||||
|
||||
#
|
||||
# Disable tabs to space expansion in code blocks
|
||||
# since Makefiles require tabs.
|
||||
#
|
||||
class NoTabExpansionRSTParser(RSTParser):
|
||||
def parse(self, inputstring, document):
|
||||
if isinstance(inputstring, str):
|
||||
lines = inputstring.splitlines()
|
||||
inputstring = StringList(lines, document.current_source)
|
||||
super().parse(inputstring, document)
|
||||
|
||||
def setup(sphinx):
|
||||
sphinx.add_domain(PatchedPythonDomain, override=True)
|
||||
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
|
||||
@@ -59,8 +59,7 @@ other techniques to minimize the size of the final image:
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretizer: together" \
|
||||
&& echo " unify: true" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
@@ -246,8 +245,7 @@ software is respectively built and installed:
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretizer:" \
|
||||
&& echo " unify: true" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
@@ -368,8 +366,7 @@ produces, for instance, the following ``Dockerfile``:
|
||||
&& echo " externals:" \
|
||||
&& echo " - spec: cuda%gcc" \
|
||||
&& echo " prefix: /usr/local/cuda" \
|
||||
&& echo " concretizer:" \
|
||||
&& echo " unify: true" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
@@ -281,8 +281,8 @@ need to be installed alongside each other. Central installations done
|
||||
at HPC centers by system administrators or user support groups
|
||||
are a common case that fits in this behavior.
|
||||
Environments *can also be configured to concretize all
|
||||
the root specs in a unified way* to ensure that
|
||||
each package in the environment corresponds to a single concrete spec. This
|
||||
the root specs in a self-consistent way* to ensure that
|
||||
each package in the environment comes with a single configuration. This
|
||||
mode of operation is usually what is required by software developers that
|
||||
want to deploy their development environment.
|
||||
|
||||
@@ -349,24 +349,6 @@ If the Environment has been concretized, Spack will install the
|
||||
concretized specs. Otherwise, ``spack install`` will first concretize
|
||||
the Environment and then install the concretized specs.
|
||||
|
||||
.. note::
|
||||
|
||||
Every ``spack install`` process builds one package at a time with multiple build
|
||||
jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option
|
||||
(see :ref:`build-jobs`). To speed up environment builds further, independent
|
||||
packages can be installed in parallel by launching more Spack instances. For
|
||||
example, the following will build at most four packages in parallel using
|
||||
three background jobs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[myenv]$ spack install & spack install & spack install & spack install
|
||||
|
||||
Another option is to generate a ``Makefile`` and run ``make -j<N>`` to control
|
||||
the number of parallel install processes. See :ref:`env-generate-depfile`
|
||||
for details.
|
||||
|
||||
|
||||
As it installs, ``spack install`` creates symbolic links in the
|
||||
``logs/`` directory in the Environment, allowing for easy inspection
|
||||
of build logs related to that environment. The ``spack install``
|
||||
@@ -499,7 +481,7 @@ Spec concretization
|
||||
|
||||
Specs can be concretized separately or together, as already
|
||||
explained in :ref:`environments_concretization`. The behavior active
|
||||
under any environment is determined by the ``concretizer:unify`` property:
|
||||
under any environment is determined by the ``concretization`` property:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -509,15 +491,10 @@ under any environment is determined by the ``concretizer:unify`` property:
|
||||
- netcdf
|
||||
- nco
|
||||
- py-sphinx
|
||||
concretizer:
|
||||
unify: true
|
||||
concretization: together
|
||||
|
||||
.. note::
|
||||
|
||||
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
|
||||
replace the ``concretization`` property. For reference,
|
||||
``concretization: separately`` is replaced by ``concretizer:unify:true``,
|
||||
and ``concretization: together`` is replaced by ``concretizer:unify:false``.
|
||||
which can currently take either one of the two allowed values ``together`` or ``separately``
|
||||
(the default).
|
||||
|
||||
.. admonition:: Re-concretization of user specs
|
||||
|
||||
@@ -933,93 +910,3 @@ environment.
|
||||
|
||||
The ``spack env deactivate`` command will remove the default view of
|
||||
the environment from the user's path.
|
||||
|
||||
|
||||
.. _env-generate-depfile:
|
||||
|
||||
|
||||
------------------------------------------
|
||||
Generating Depfiles from Environments
|
||||
------------------------------------------
|
||||
|
||||
Spack can generate ``Makefile``\s to make it easier to build multiple
|
||||
packages in an environment in parallel. Generated ``Makefile``\s expose
|
||||
targets that can be included in existing ``Makefile``\s, to allow
|
||||
other targets to depend on the environment installation.
|
||||
|
||||
A typical workflow is as follows:
|
||||
|
||||
.. code:: console
|
||||
|
||||
spack env create -d .
|
||||
spack -e . add perl
|
||||
spack -e . concretize
|
||||
spack -e . env depfile > Makefile
|
||||
make -j64
|
||||
|
||||
This generates a ``Makefile`` from a concretized environment in the
|
||||
current working directory, and ``make -j64`` installs the environment,
|
||||
exploiting parallelism across packages as much as possible. Spack
|
||||
respects the Make jobserver and forwards it to the build environment
|
||||
of packages, meaning that a single ``-j`` flag is enough to control the
|
||||
load, even when packages are built in parallel.
|
||||
|
||||
By default the following phony convenience targets are available:
|
||||
|
||||
- ``make all``: installs the environment (default target);
|
||||
- ``make fetch-all``: only fetch sources of all packages;
|
||||
- ``make clean``: cleans files used by make, but does not uninstall packages.
|
||||
|
||||
.. tip::
|
||||
|
||||
GNU Make version 4.3 and above have great support for output synchronization
|
||||
through the ``-O`` and ``--output-sync`` flags, which ensure that output is
|
||||
printed orderly per package install. To get synchronized output with colors,
|
||||
use ``make -j<N> SPACK_COLOR=always --output-sync=recurse``.
|
||||
|
||||
The following advanced example shows how generated targets can be used in a
|
||||
``Makefile``:
|
||||
|
||||
.. code:: Makefile
|
||||
|
||||
SPACK ?= spack
|
||||
|
||||
.PHONY: all clean fetch env
|
||||
|
||||
all: env
|
||||
|
||||
spack.lock: spack.yaml
|
||||
$(SPACK) -e . concretize -f
|
||||
|
||||
env.mk: spack.lock
|
||||
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack
|
||||
|
||||
fetch: spack/fetch
|
||||
$(info Environment fetched!)
|
||||
|
||||
env: spack/env
|
||||
$(info Environment installed!)
|
||||
|
||||
clean:
|
||||
rm -rf spack.lock env.mk spack/
|
||||
|
||||
ifeq (,$(filter clean,$(MAKECMDGOALS)))
|
||||
include env.mk
|
||||
endif
|
||||
|
||||
When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
|
||||
from its rule, which triggers concretization. When done, the generated targets
|
||||
``spack/fetch`` and ``spack/env`` are available. In the above
|
||||
example, the ``env`` target uses the latter as a prerequisite, meaning
|
||||
that it can make use of the installed packages in its commands.
|
||||
|
||||
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
|
||||
the include is conditional.
|
||||
|
||||
.. note::
|
||||
|
||||
When including generated ``Makefile``\s, it is important to use
|
||||
the ``--make-target-prefix`` flag and use the non-phony targets
|
||||
``<target-prefix>/env`` and ``<target-prefix>/fetch`` as
|
||||
prerequisites, instead of the phony targets ``<target-prefix>/all``
|
||||
and ``<target-prefix>/fetch-all`` respectively.
|
||||
@@ -115,8 +115,7 @@ And here's the spack environment built by the pipeline represented as a
|
||||
|
||||
spack:
|
||||
view: false
|
||||
concretizer:
|
||||
unify: false
|
||||
concretization: separately
|
||||
|
||||
definitions:
|
||||
- pkgs:
|
||||
|
||||
@@ -61,7 +61,7 @@ You can see the packages we added earlier in the ``specs:`` section. If you
|
||||
ever want to add more packages, you can either use ``spack add`` or manually
|
||||
edit this file.
|
||||
|
||||
We also need to change the ``concretizer:unify`` option. By default, Spack
|
||||
We also need to change the ``concretization:`` option. By default, Spack
|
||||
concretizes each spec *separately*, allowing multiple versions of the same
|
||||
package to coexist. Since we want a single consistent environment, we want to
|
||||
concretize all of the specs *together*.
|
||||
@@ -78,8 +78,7 @@ Here is what your ``spack.yaml`` looks like with this new setting:
|
||||
# add package specs to the `specs` list
|
||||
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
|
||||
view: true
|
||||
concretizer:
|
||||
unify: true
|
||||
concretization: together
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Symlink location
|
||||
|
||||
@@ -25,5 +25,4 @@ spack:
|
||||
- subversion
|
||||
# Plotting
|
||||
- graphviz
|
||||
concretizer:
|
||||
unify: true
|
||||
concretization: together
|
||||
|
||||
32
lib/spack/env/cc
vendored
32
lib/spack/env/cc
vendored
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh -f
|
||||
#!/bin/sh
|
||||
# shellcheck disable=SC2034 # evals in this script fool shellcheck
|
||||
#
|
||||
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
|
||||
@@ -401,8 +401,7 @@ input_command="$*"
|
||||
# command line and recombine them with Spack arguments later. We
|
||||
# parse these out so that we can make sure that system paths come
|
||||
# last, that package arguments come first, and that Spack arguments
|
||||
# are injected properly. Based on configuration, we also strip -Werror
|
||||
# arguments.
|
||||
# are injected properly.
|
||||
#
|
||||
# All other arguments, including -l arguments, are treated as
|
||||
# 'other_args' and left in their original order. This ensures that
|
||||
@@ -441,29 +440,6 @@ while [ $# -ne 0 ]; do
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
|
||||
# NOTE: the eval is required to allow `|` alternatives inside the variable
|
||||
eval "\
|
||||
case '$1' in
|
||||
$SPACK_COMPILER_FLAGS_KEEP)
|
||||
append other_args_list "$1"
|
||||
shift
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
"
|
||||
fi
|
||||
if [ -n "${SPACK_COMPILER_FLAGS_REMOVE}" ] ; then
|
||||
eval "\
|
||||
case '$1' in
|
||||
$SPACK_COMPILER_FLAGS_REMOVE)
|
||||
shift
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
"
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
-isystem*)
|
||||
arg="${1#-isystem}"
|
||||
@@ -792,9 +768,7 @@ if [ "$SPACK_DEBUG" = TRUE ]; then
|
||||
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
|
||||
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
|
||||
echo "[$mode] $command $input_command" >> "$input_log"
|
||||
IFS="$lsep"
|
||||
echo "[$mode] "$full_command_list >> "$output_log"
|
||||
unset IFS
|
||||
echo "[$mode] ${full_command_list}" >> "$output_log"
|
||||
fi
|
||||
|
||||
# Execute the full command, preserving spaces with IFS set
|
||||
|
||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.1.4 (commit 53fc4ac91e9b4c5e4079f15772503a80bece72ad)
|
||||
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
|
||||
|
||||
argparse
|
||||
--------
|
||||
|
||||
75
lib/spack/external/archspec/cpu/detect.py
vendored
75
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -61,7 +61,7 @@ def proc_cpuinfo():
|
||||
``/proc/cpuinfo``
|
||||
"""
|
||||
info = {}
|
||||
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
|
||||
with open("/proc/cpuinfo") as file:
|
||||
for line in file:
|
||||
key, separator, value = line.partition(":")
|
||||
|
||||
@@ -80,46 +80,26 @@ def proc_cpuinfo():
|
||||
|
||||
|
||||
def _check_output(args, env):
|
||||
output = subprocess.Popen( # pylint: disable=consider-using-with
|
||||
args, stdout=subprocess.PIPE, env=env
|
||||
).communicate()[0]
|
||||
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||
return six.text_type(output.decode("utf-8"))
|
||||
|
||||
|
||||
def _machine():
|
||||
""" "Return the machine architecture we are on"""
|
||||
operating_system = platform.system()
|
||||
|
||||
# If we are not on Darwin, trust what Python tells us
|
||||
if operating_system != "Darwin":
|
||||
return platform.machine()
|
||||
|
||||
# On Darwin it might happen that we are on M1, but using an interpreter
|
||||
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
|
||||
# need to fix that.
|
||||
#
|
||||
# See: https://bugs.python.org/issue42704
|
||||
output = _check_output(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
|
||||
).strip()
|
||||
|
||||
if "Apple" in output:
|
||||
# Note that a native Python interpreter on Apple M1 would return
|
||||
# "arm64" instead of "aarch64". Here we normalize to the latter.
|
||||
return "aarch64"
|
||||
|
||||
return "x86_64"
|
||||
|
||||
|
||||
@info_dict(operating_system="Darwin")
|
||||
def sysctl_info_dict():
|
||||
"""Returns a raw info dictionary parsing the output of sysctl."""
|
||||
child_environment = _ensure_bin_usrbin_in_path()
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
|
||||
# usually found there
|
||||
child_environment = dict(os.environ.items())
|
||||
search_paths = child_environment.get("PATH", "").split(os.pathsep)
|
||||
for additional_path in ("/sbin", "/usr/sbin"):
|
||||
if additional_path not in search_paths:
|
||||
search_paths.append(additional_path)
|
||||
child_environment["PATH"] = os.pathsep.join(search_paths)
|
||||
|
||||
def sysctl(*args):
|
||||
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
|
||||
|
||||
if _machine() == "x86_64":
|
||||
if platform.machine() == "x86_64":
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
@@ -145,18 +125,6 @@ def sysctl(*args):
|
||||
return info
|
||||
|
||||
|
||||
def _ensure_bin_usrbin_in_path():
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
|
||||
# usually found there
|
||||
child_environment = dict(os.environ.items())
|
||||
search_paths = child_environment.get("PATH", "").split(os.pathsep)
|
||||
for additional_path in ("/sbin", "/usr/sbin"):
|
||||
if additional_path not in search_paths:
|
||||
search_paths.append(additional_path)
|
||||
child_environment["PATH"] = os.pathsep.join(search_paths)
|
||||
return child_environment
|
||||
|
||||
|
||||
def adjust_raw_flags(info):
|
||||
"""Adjust the flags detected on the system to homogenize
|
||||
slightly different representations.
|
||||
@@ -216,7 +184,12 @@ def compatible_microarchitectures(info):
|
||||
Args:
|
||||
info (dict): dictionary containing information on the host cpu
|
||||
"""
|
||||
architecture_family = _machine()
|
||||
architecture_family = platform.machine()
|
||||
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
|
||||
# so we should normalize the name here
|
||||
if architecture_family == "arm64":
|
||||
architecture_family = "aarch64"
|
||||
|
||||
# If a tester is not registered, be conservative and assume no known
|
||||
# target is compatible with the host
|
||||
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
|
||||
@@ -271,7 +244,12 @@ def compatibility_check(architecture_family):
|
||||
architecture_family = (architecture_family,)
|
||||
|
||||
def decorator(func):
|
||||
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
|
||||
# pylint: disable=fixme
|
||||
# TODO: on removal of Python 2.6 support this can be re-written as
|
||||
# TODO: an update + a dict comprehension
|
||||
for arch_family in architecture_family:
|
||||
COMPATIBILITY_CHECKS[arch_family] = func
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
@@ -310,7 +288,7 @@ def compatibility_check_for_x86_64(info, target):
|
||||
arch_root = TARGETS[basename]
|
||||
return (
|
||||
(target == arch_root or arch_root in target.ancestors)
|
||||
and target.vendor in (vendor, "generic")
|
||||
and (target.vendor == vendor or target.vendor == "generic")
|
||||
and target.features.issubset(features)
|
||||
)
|
||||
|
||||
@@ -325,9 +303,8 @@ def compatibility_check_for_aarch64(info, target):
|
||||
arch_root = TARGETS[basename]
|
||||
return (
|
||||
(target == arch_root or arch_root in target.ancestors)
|
||||
and target.vendor in (vendor, "generic")
|
||||
# On macOS it seems impossible to get all the CPU features with syctl info
|
||||
and (target.features.issubset(features) or platform.system() == "Darwin")
|
||||
and (target.vendor == vendor or target.vendor == "generic")
|
||||
and target.features.issubset(features)
|
||||
)
|
||||
|
||||
|
||||
|
||||
4
lib/spack/external/archspec/cpu/schema.py
vendored
4
lib/spack/external/archspec/cpu/schema.py
vendored
@@ -11,7 +11,7 @@
|
||||
try:
|
||||
from collections.abc import MutableMapping # novm
|
||||
except ImportError:
|
||||
from collections import MutableMapping # pylint: disable=deprecated-class
|
||||
from collections import MutableMapping
|
||||
|
||||
|
||||
class LazyDictionary(MutableMapping):
|
||||
@@ -56,7 +56,7 @@ def _load_json_file(json_file):
|
||||
|
||||
def _factory():
|
||||
filename = os.path.join(json_dir, json_file)
|
||||
with open(filename, "r") as file: # pylint: disable=unspecified-encoding
|
||||
with open(filename, "r") as file:
|
||||
return json.load(file)
|
||||
|
||||
return _factory
|
||||
|
||||
@@ -88,20 +88,6 @@
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -305,20 +291,6 @@
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "pentium4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -361,18 +333,6 @@
|
||||
"versions": "16.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -424,20 +384,6 @@
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -486,20 +432,6 @@
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "corei7",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -558,18 +490,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -630,18 +550,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -707,18 +615,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -776,18 +672,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -848,18 +732,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -926,20 +798,6 @@
|
||||
"name": "knl",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "knl",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "knl",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1010,20 +868,6 @@
|
||||
"name": "skylake-avx512",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "skylake-avx512",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "skylake-avx512",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1093,18 +937,6 @@
|
||||
"versions": "18.0:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1172,18 +1004,6 @@
|
||||
"versions": "19.0.1:",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1278,20 +1098,6 @@
|
||||
"name": "icelake-client",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "icelake-client",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "icelake-client",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1336,20 +1142,6 @@
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse2"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse2"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1400,20 +1192,6 @@
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1468,20 +1246,6 @@
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse3"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1537,20 +1301,6 @@
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse4.2"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse4.2"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"flags": "-msse4.2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1610,22 +1360,6 @@
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1688,22 +1422,6 @@
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1767,22 +1485,6 @@
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1841,30 +1543,6 @@
|
||||
"name": "znver3",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"intel": [
|
||||
{
|
||||
"versions": "16.0:",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"oneapi": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"dpcpp": [
|
||||
{
|
||||
"versions": ":",
|
||||
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
|
||||
"name": "core-avx2",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -2110,6 +1788,7 @@
|
||||
"fp",
|
||||
"asimd",
|
||||
"evtstrm",
|
||||
"aes",
|
||||
"pmull",
|
||||
"sha1",
|
||||
"sha2",
|
||||
@@ -2142,26 +1821,18 @@
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16"
|
||||
},
|
||||
{
|
||||
"versions": "8:10.2",
|
||||
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve -msve-vector-bits=512"
|
||||
},
|
||||
{
|
||||
"versions": "10.3:",
|
||||
"flags": "-mcpu=a64fx -msve-vector-bits=512"
|
||||
"versions": "8:",
|
||||
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "3.9:4.9",
|
||||
"flags": "-march=armv8.2-a+crc+sha2+fp16"
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16"
|
||||
},
|
||||
{
|
||||
"versions": "5:10",
|
||||
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve"
|
||||
},
|
||||
{
|
||||
"versions": "11:",
|
||||
"flags": "-mcpu=a64fx"
|
||||
"versions": "5:",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||
}
|
||||
],
|
||||
"arm": [
|
||||
@@ -2283,40 +1954,7 @@
|
||||
"m1": {
|
||||
"from": ["aarch64"],
|
||||
"vendor": "Apple",
|
||||
"features": [
|
||||
"fp",
|
||||
"asimd",
|
||||
"evtstrm",
|
||||
"aes",
|
||||
"pmull",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"crc32",
|
||||
"atomics",
|
||||
"fphp",
|
||||
"asimdhp",
|
||||
"cpuid",
|
||||
"asimdrdm",
|
||||
"jscvt",
|
||||
"fcma",
|
||||
"lrcpc",
|
||||
"dcpop",
|
||||
"sha3",
|
||||
"asimddp",
|
||||
"sha512",
|
||||
"asimdfhm",
|
||||
"dit",
|
||||
"uscat",
|
||||
"ilrcpc",
|
||||
"flagm",
|
||||
"ssbs",
|
||||
"sb",
|
||||
"paca",
|
||||
"pacg",
|
||||
"dcpodp",
|
||||
"flagm2",
|
||||
"frint"
|
||||
],
|
||||
"features": [],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
@@ -2326,22 +1964,14 @@
|
||||
],
|
||||
"clang" : [
|
||||
{
|
||||
"versions": "9.0:12.0",
|
||||
"versions": "9.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
},
|
||||
{
|
||||
"versions": "13.0:",
|
||||
"flags" : "-mcpu=apple-m1"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "11.0:12.5",
|
||||
"versions": "11.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
},
|
||||
{
|
||||
"versions": "13.0:",
|
||||
"flags" : "-mcpu=apple-m1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -64,7 +64,6 @@
|
||||
'is_exe',
|
||||
'join_path',
|
||||
'last_modification_time_recursive',
|
||||
'library_extensions',
|
||||
'mkdirp',
|
||||
'partition_path',
|
||||
'prefixes',
|
||||
@@ -110,15 +109,12 @@ def path_contains_subdirectory(path, root):
|
||||
return norm_path.startswith(norm_root)
|
||||
|
||||
|
||||
#: This generates the library filenames that may appear on any OS.
|
||||
library_extensions = ['a', 'la', 'so', 'tbd', 'dylib']
|
||||
|
||||
|
||||
def possible_library_filenames(library_names):
|
||||
"""Given a collection of library names like 'libfoo', generate the set of
|
||||
library filenames that may be found on the system (e.g. libfoo.so).
|
||||
library filenames that may be found on the system (e.g. libfoo.so). This
|
||||
generates the library filenames that may appear on any OS.
|
||||
"""
|
||||
lib_extensions = library_extensions
|
||||
lib_extensions = ['a', 'la', 'so', 'tbd', 'dylib']
|
||||
return set(
|
||||
'.'.join((lib, extension)) for lib, extension in
|
||||
itertools.product(library_names, lib_extensions))
|
||||
@@ -367,7 +363,7 @@ def group_ids(uid=None):
|
||||
|
||||
|
||||
@system_path_filter(arg_slice=slice(1))
|
||||
def chgrp(path, group, follow_symlinks=True):
|
||||
def chgrp(path, group):
|
||||
"""Implement the bash chgrp function on a single path"""
|
||||
if is_windows:
|
||||
raise OSError("Function 'chgrp' is not supported on Windows")
|
||||
@@ -376,10 +372,7 @@ def chgrp(path, group, follow_symlinks=True):
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
else:
|
||||
gid = group
|
||||
if follow_symlinks:
|
||||
os.chown(path, -1, gid)
|
||||
else:
|
||||
os.lchown(path, -1, gid)
|
||||
os.chown(path, -1, gid)
|
||||
|
||||
|
||||
@system_path_filter(arg_slice=slice(1))
|
||||
@@ -771,36 +764,39 @@ def __init__(self, inner_exception, outer_exception):
|
||||
|
||||
@contextmanager
|
||||
@system_path_filter
|
||||
def replace_directory_transaction(directory_name):
|
||||
"""Temporarily renames a directory in the same parent dir. If the operations
|
||||
executed within the context manager don't raise an exception, the renamed directory
|
||||
is deleted. If there is an exception, the move is undone.
|
||||
def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
"""Moves a directory to a temporary space. If the operations executed
|
||||
within the context manager don't raise an exception, the directory is
|
||||
deleted. If there is an exception, the move is undone.
|
||||
|
||||
Args:
|
||||
directory_name (path): absolute path of the directory name
|
||||
tmp_root (path): absolute path of the parent directory where to create
|
||||
the temporary
|
||||
|
||||
Returns:
|
||||
temporary directory where ``directory_name`` has been moved
|
||||
"""
|
||||
# Check the input is indeed a directory with absolute path.
|
||||
# Raise before anything is done to avoid moving the wrong directory
|
||||
directory_name = os.path.abspath(directory_name)
|
||||
assert os.path.isdir(directory_name), 'Not a directory: ' + directory_name
|
||||
assert os.path.isdir(directory_name), \
|
||||
'Invalid directory: ' + directory_name
|
||||
assert os.path.isabs(directory_name), \
|
||||
'"directory_name" must contain an absolute path: ' + directory_name
|
||||
|
||||
# Note: directory_name is normalized here, meaning the trailing slash is dropped,
|
||||
# so dirname is the directory's parent not the directory itself.
|
||||
tmpdir = tempfile.mkdtemp(
|
||||
dir=os.path.dirname(directory_name),
|
||||
prefix='.backup')
|
||||
directory_basename = os.path.basename(directory_name)
|
||||
|
||||
# We have to jump through hoops to support Windows, since
|
||||
# os.rename(directory_name, tmpdir) errors there.
|
||||
backup_dir = os.path.join(tmpdir, 'backup')
|
||||
os.rename(directory_name, backup_dir)
|
||||
tty.debug('Directory moved [src={0}, dest={1}]'.format(directory_name, backup_dir))
|
||||
if tmp_root is not None:
|
||||
assert os.path.isabs(tmp_root)
|
||||
|
||||
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
|
||||
tty.debug('Temporary directory created [{0}]'.format(tmp_dir))
|
||||
|
||||
shutil.move(src=directory_name, dst=tmp_dir)
|
||||
tty.debug('Directory moved [src={0}, dest={1}]'.format(directory_name, tmp_dir))
|
||||
|
||||
try:
|
||||
yield backup_dir
|
||||
yield tmp_dir
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as inner_exception:
|
||||
# Try to recover the original directory, if this fails, raise a
|
||||
# composite exception.
|
||||
@@ -808,7 +804,10 @@ def replace_directory_transaction(directory_name):
|
||||
# Delete what was there, before copying back the original content
|
||||
if os.path.exists(directory_name):
|
||||
shutil.rmtree(directory_name)
|
||||
os.rename(backup_dir, directory_name)
|
||||
shutil.move(
|
||||
src=os.path.join(tmp_dir, directory_basename),
|
||||
dst=os.path.dirname(directory_name)
|
||||
)
|
||||
except Exception as outer_exception:
|
||||
raise CouldNotRestoreDirectoryBackup(inner_exception, outer_exception)
|
||||
|
||||
@@ -816,8 +815,8 @@ def replace_directory_transaction(directory_name):
|
||||
raise
|
||||
else:
|
||||
# Otherwise delete the temporary directory
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
tty.debug('Temporary directory deleted [{0}]'.format(tmpdir))
|
||||
shutil.rmtree(tmp_dir, ignore_errors=True)
|
||||
tty.debug('Temporary directory deleted [{0}]'.format(tmp_dir))
|
||||
|
||||
|
||||
@system_path_filter
|
||||
@@ -1098,32 +1097,7 @@ def visit_directory_tree(root, visitor, rel_path='', depth=0):
|
||||
for f in dir_entries:
|
||||
if sys.version_info >= (3, 5, 0):
|
||||
rel_child = os.path.join(rel_path, f.name)
|
||||
islink = f.is_symlink()
|
||||
# On Windows, symlinks to directories are distinct from
|
||||
# symlinks to files, and it is possible to create a
|
||||
# broken symlink to a directory (e.g. using os.symlink
|
||||
# without `target_is_directory=True`), invoking `isdir`
|
||||
# on a symlink on Windows that is broken in this manner
|
||||
# will result in an error. In this case we can work around
|
||||
# the issue by reading the target and resolving the
|
||||
# directory ourselves
|
||||
try:
|
||||
isdir = f.is_dir()
|
||||
except OSError as e:
|
||||
if is_windows and hasattr(e, 'winerror')\
|
||||
and e.winerror == 5 and islink:
|
||||
# if path is a symlink, determine destination and
|
||||
# evaluate file vs directory
|
||||
link_target = resolve_link_target_relative_to_the_link(f)
|
||||
# link_target might be relative but
|
||||
# resolve_link_target_relative_to_the_link
|
||||
# will ensure that if so, that it is relative
|
||||
# to the CWD and therefore
|
||||
# makes sense
|
||||
isdir = os.path.isdir(link_target)
|
||||
else:
|
||||
raise e
|
||||
|
||||
islink, isdir = f.is_symlink(), f.is_dir()
|
||||
else:
|
||||
rel_child = os.path.join(rel_path, f)
|
||||
lexists, islink, isdir = lexists_islink_isdir(os.path.join(dir, f))
|
||||
@@ -1131,7 +1105,7 @@ def visit_directory_tree(root, visitor, rel_path='', depth=0):
|
||||
continue
|
||||
|
||||
if not isdir:
|
||||
# handle files
|
||||
# Handle files
|
||||
visitor.visit_file(root, rel_child, depth)
|
||||
elif not islink and visitor.before_visit_dir(root, rel_child, depth):
|
||||
# Handle ordinary directories
|
||||
@@ -1206,35 +1180,6 @@ def remove_if_dead_link(path):
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
def readonly_file_handler(ignore_errors=False):
|
||||
# TODO: generate stages etc. with write permissions wherever
|
||||
# so this callback is no-longer required
|
||||
"""
|
||||
Generate callback for shutil.rmtree to handle permissions errors on
|
||||
Windows. Some files may unexpectedly lack write permissions even
|
||||
though they were generated by Spack on behalf of the user (e.g. the
|
||||
stage), so this callback will detect such cases and modify the
|
||||
permissions if that is the issue. For other errors, the fallback
|
||||
is either to raise (if ignore_errors is False) or ignore (if
|
||||
ignore_errors is True). This is only intended for Windows systems
|
||||
and will raise a separate error if it is ever invoked (by accident)
|
||||
on a non-Windows system.
|
||||
"""
|
||||
def error_remove_readonly(func, path, exc):
|
||||
if not is_windows:
|
||||
raise RuntimeError("This method should only be invoked on Windows")
|
||||
excvalue = exc[1]
|
||||
if is_windows and func in (os.rmdir, os.remove, os.unlink) and\
|
||||
excvalue.errno == errno.EACCES:
|
||||
# change the file to be readable,writable,executable: 0777
|
||||
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
||||
# retry
|
||||
func(path)
|
||||
elif not ignore_errors:
|
||||
raise
|
||||
return error_remove_readonly
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def remove_linked_tree(path):
|
||||
"""Removes a directory and its contents.
|
||||
@@ -1242,18 +1187,23 @@ def remove_linked_tree(path):
|
||||
If the directory is a symlink, follows the link and removes the real
|
||||
directory before removing the link.
|
||||
|
||||
This method will force-delete files on Windows
|
||||
|
||||
Parameters:
|
||||
path (str): Directory to be removed
|
||||
"""
|
||||
kwargs = {'ignore_errors': True}
|
||||
# On windows, cleaning a Git stage can be an issue
|
||||
# as git leaves readonly files that Python handles
|
||||
# poorly on Windows. Remove readonly status and try again
|
||||
def onerror(func, path, exe_info):
|
||||
os.chmod(path, stat.S_IWUSR)
|
||||
try:
|
||||
func(path)
|
||||
except Exception as e:
|
||||
tty.warn(e)
|
||||
pass
|
||||
|
||||
# Windows readonly files cannot be removed by Python
|
||||
# directly.
|
||||
kwargs = {'ignore_errors': True}
|
||||
if is_windows:
|
||||
kwargs['ignore_errors'] = False
|
||||
kwargs['onerror'] = readonly_file_handler(ignore_errors=True)
|
||||
kwargs = {'onerror': onerror}
|
||||
|
||||
if os.path.exists(path):
|
||||
if os.path.islink(path):
|
||||
|
||||
@@ -11,9 +11,7 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Tuple
|
||||
|
||||
import six
|
||||
from six import string_types
|
||||
@@ -1011,64 +1009,3 @@ def __repr__(self):
|
||||
|
||||
def __str__(self):
|
||||
return str(self.data)
|
||||
|
||||
|
||||
class GroupedExceptionHandler(object):
|
||||
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
|
||||
|
||||
def __init__(self):
|
||||
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
|
||||
|
||||
def __bool__(self):
|
||||
"""Whether any exceptions were handled."""
|
||||
return bool(self.exceptions)
|
||||
|
||||
def forward(self, context):
|
||||
# type: (str) -> GroupedExceptionForwarder
|
||||
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
|
||||
return GroupedExceptionForwarder(context, self)
|
||||
|
||||
def _receive_forwarded(self, context, exc, tb):
|
||||
# type: (str, Exception, List[str]) -> None
|
||||
self.exceptions.append((context, exc, tb))
|
||||
|
||||
def grouped_message(self, with_tracebacks=True):
|
||||
# type: (bool) -> str
|
||||
"""Print out an error message coalescing all the forwarded errors."""
|
||||
each_exception_message = [
|
||||
'{0} raised {1}: {2}{3}'.format(
|
||||
context,
|
||||
exc.__class__.__name__,
|
||||
exc,
|
||||
'\n{0}'.format(''.join(tb)) if with_tracebacks else '',
|
||||
)
|
||||
for context, exc, tb in self.exceptions
|
||||
]
|
||||
return 'due to the following failures:\n{0}'.format(
|
||||
'\n'.join(each_exception_message)
|
||||
)
|
||||
|
||||
|
||||
class GroupedExceptionForwarder(object):
|
||||
"""A contextmanager to capture exceptions and forward them to a
|
||||
GroupedExceptionHandler."""
|
||||
|
||||
def __init__(self, context, handler):
|
||||
# type: (str, GroupedExceptionHandler) -> None
|
||||
self._context = context
|
||||
self._handler = handler
|
||||
|
||||
def __enter__(self):
|
||||
return None
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
if exc_value is not None:
|
||||
self._handler._receive_forwarded(
|
||||
self._context,
|
||||
exc_value,
|
||||
traceback.format_tb(tb),
|
||||
)
|
||||
|
||||
# Suppress any exception from being re-raised:
|
||||
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
|
||||
return True
|
||||
|
||||
@@ -809,23 +809,19 @@ def __enter__(self):
|
||||
def background_reader(reader, echo_writer, _kill):
|
||||
# for each line printed to logfile, read it
|
||||
# if echo: write line to user
|
||||
try:
|
||||
while True:
|
||||
is_killed = _kill.wait(.1)
|
||||
# Flush buffered build output to file
|
||||
# stdout/err fds refer to log file
|
||||
self.stderr.flush()
|
||||
self.stdout.flush()
|
||||
|
||||
while True:
|
||||
is_killed = _kill.wait(.1)
|
||||
self.stderr.flush()
|
||||
self.stdout.flush()
|
||||
line = reader.readline()
|
||||
while line:
|
||||
if self.echo:
|
||||
self.echo_writer.write('{0}'.format(line.decode()))
|
||||
self.echo_writer.flush()
|
||||
line = reader.readline()
|
||||
if self.echo and line:
|
||||
echo_writer.write('{0}'.format(line.decode()))
|
||||
echo_writer.flush()
|
||||
|
||||
if is_killed:
|
||||
break
|
||||
finally:
|
||||
reader.close()
|
||||
if is_killed:
|
||||
break
|
||||
|
||||
self._active = True
|
||||
with replace_environment(self.env):
|
||||
@@ -841,6 +837,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self._ioflag = False
|
||||
else:
|
||||
self.writer.close()
|
||||
self.reader.close()
|
||||
self.echo_writer.flush()
|
||||
self.stdout.flush()
|
||||
self.stderr.flush()
|
||||
@@ -856,7 +853,10 @@ def force_echo(self):
|
||||
if not self._active:
|
||||
raise RuntimeError(
|
||||
"Can't call force_echo() outside log_output region!")
|
||||
yield
|
||||
try:
|
||||
yield self
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
|
||||
@@ -276,24 +276,6 @@ def _search_duplicate_specs_in_externals(error_cls):
|
||||
)
|
||||
|
||||
|
||||
@package_directives
|
||||
def _check_build_test_callbacks(pkgs, error_cls):
|
||||
"""Ensure stand-alone test method is not included in build-time callbacks"""
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
test_callbacks = pkg.build_time_test_callbacks
|
||||
|
||||
if test_callbacks and 'test' in test_callbacks:
|
||||
msg = ('{0} package contains "test" method in '
|
||||
'build_time_test_callbacks')
|
||||
instr = ('Remove "test" from: [{0}]'
|
||||
.format(', '.join(test_callbacks)))
|
||||
errors.append(error_cls(msg.format(pkg.name), [instr]))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
@package_directives
|
||||
def _check_patch_urls(pkgs, error_cls):
|
||||
"""Ensure that patches fetched from GitHub have stable sha256 hashes."""
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
import spack.config as config
|
||||
import spack.database as spack_db
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks
|
||||
import spack.hooks.sbang
|
||||
import spack.mirror
|
||||
@@ -181,6 +182,7 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
|
||||
|
||||
for indexed_spec in spec_list:
|
||||
dag_hash = indexed_spec.dag_hash()
|
||||
full_hash = indexed_spec._full_hash
|
||||
|
||||
if dag_hash not in self._mirrors_for_spec:
|
||||
self._mirrors_for_spec[dag_hash] = []
|
||||
@@ -188,8 +190,11 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
|
||||
for entry in self._mirrors_for_spec[dag_hash]:
|
||||
# A binary mirror can only have one spec per DAG hash, so
|
||||
# if we already have an entry under this DAG hash for this
|
||||
# mirror url, we're done.
|
||||
# mirror url, we may need to replace the spec associated
|
||||
# with it (but only if it has a different full_hash).
|
||||
if entry['mirror_url'] == mirror_url:
|
||||
if full_hash and full_hash != entry['spec']._full_hash:
|
||||
entry['spec'] = indexed_spec
|
||||
break
|
||||
else:
|
||||
self._mirrors_for_spec[dag_hash].append({
|
||||
@@ -398,11 +403,6 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
hash_fetch_url = url_util.join(
|
||||
mirror_url, _build_cache_relative_path, 'index.json.hash')
|
||||
|
||||
if not web_util.url_exists(index_fetch_url):
|
||||
# A binary mirror is not required to have an index, so avoid
|
||||
# raising FetchCacheError in that case.
|
||||
return False
|
||||
|
||||
old_cache_key = None
|
||||
fetched_hash = None
|
||||
|
||||
@@ -762,62 +762,6 @@ def sign_tarball(key, force, specfile_path):
|
||||
spack.util.gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
|
||||
|
||||
|
||||
def _fetch_spec_from_mirror(spec_url):
|
||||
s = None
|
||||
tty.debug('fetching {0}'.format(spec_url))
|
||||
_, _, spec_file = web_util.read_from_url(spec_url)
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
# Need full spec.json name or this gets confused with index.json.
|
||||
if spec_url.endswith('.json'):
|
||||
s = Spec.from_json(spec_file_contents)
|
||||
elif spec_url.endswith('.yaml'):
|
||||
s = Spec.from_yaml(spec_file_contents)
|
||||
return s
|
||||
|
||||
|
||||
def _read_specs_and_push_index(file_list, cache_prefix, db, db_root_dir):
|
||||
for file_path in file_list:
|
||||
try:
|
||||
s = _fetch_spec_from_mirror(url_util.join(cache_prefix, file_path))
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Error reading specfile: {0}'.format(file_path))
|
||||
tty.error(url_err)
|
||||
|
||||
if s:
|
||||
db.add(s, None)
|
||||
db.mark(s, 'in_buildcache', True)
|
||||
|
||||
# Now generate the index, compute its hash, and push the two files to
|
||||
# the mirror.
|
||||
index_json_path = os.path.join(db_root_dir, 'index.json')
|
||||
with open(index_json_path, 'w') as f:
|
||||
db._write_to_file(f)
|
||||
|
||||
# Read the index back in and compute its hash
|
||||
with open(index_json_path) as f:
|
||||
index_string = f.read()
|
||||
index_hash = compute_hash(index_string)
|
||||
|
||||
# Write the hash out to a local file
|
||||
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
|
||||
with open(index_hash_path, 'w') as f:
|
||||
f.write(index_hash)
|
||||
|
||||
# Push the index itself
|
||||
web_util.push_to_url(
|
||||
index_json_path,
|
||||
url_util.join(cache_prefix, 'index.json'),
|
||||
keep_original=False,
|
||||
extra_args={'ContentType': 'application/json'})
|
||||
|
||||
# Push the hash
|
||||
web_util.push_to_url(
|
||||
index_hash_path,
|
||||
url_util.join(cache_prefix, 'index.json.hash'),
|
||||
keep_original=False,
|
||||
extra_args={'ContentType': 'text/plain'})
|
||||
|
||||
|
||||
def generate_package_index(cache_prefix):
|
||||
"""Create the build cache index page.
|
||||
|
||||
@@ -846,6 +790,35 @@ def generate_package_index(cache_prefix):
|
||||
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
|
||||
cache_prefix))
|
||||
|
||||
all_mirror_specs = {}
|
||||
|
||||
for file_path in file_list:
|
||||
try:
|
||||
spec_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(spec_url))
|
||||
_, _, spec_file = web_util.read_from_url(spec_url)
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
# Need full spec.json name or this gets confused with index.json.
|
||||
if spec_url.endswith('.json'):
|
||||
spec_dict = sjson.load(spec_file_contents)
|
||||
s = Spec.from_json(spec_file_contents)
|
||||
elif spec_url.endswith('.yaml'):
|
||||
spec_dict = syaml.load(spec_file_contents)
|
||||
s = Spec.from_yaml(spec_file_contents)
|
||||
all_mirror_specs[s.dag_hash()] = {
|
||||
'spec_url': spec_url,
|
||||
'spec': s,
|
||||
'num_deps': len(list(s.traverse(root=False))),
|
||||
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
|
||||
'buildinfo': spec_dict['buildinfo'],
|
||||
}
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Error reading specfile: {0}'.format(file_path))
|
||||
tty.error(url_err)
|
||||
|
||||
sorted_specs = sorted(all_mirror_specs.keys(),
|
||||
key=lambda k: all_mirror_specs[k]['num_deps'])
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
db_root_dir = os.path.join(tmpdir, 'db_root')
|
||||
db = spack_db.Database(None, db_dir=db_root_dir,
|
||||
@@ -853,7 +826,85 @@ def generate_package_index(cache_prefix):
|
||||
record_fields=['spec', 'ref_count', 'in_buildcache'])
|
||||
|
||||
try:
|
||||
_read_specs_and_push_index(file_list, cache_prefix, db, db_root_dir)
|
||||
tty.debug('Specs sorted by number of dependencies:')
|
||||
for dag_hash in sorted_specs:
|
||||
spec_record = all_mirror_specs[dag_hash]
|
||||
s = spec_record['spec']
|
||||
num_deps = spec_record['num_deps']
|
||||
tty.debug(' {0}/{1} -> {2}'.format(
|
||||
s.name, dag_hash[:7], num_deps))
|
||||
if num_deps > 0:
|
||||
# Check each of this spec's dependencies (which we have already
|
||||
# processed), as they are the source of truth for their own
|
||||
# full hash. If the full hash we have for any deps does not
|
||||
# match what those deps have themselves, then we need to splice
|
||||
# this spec with those deps, and push this spliced spec
|
||||
# (spec.json file) back to the mirror, as well as update the
|
||||
# all_mirror_specs dictionary with this spliced spec.
|
||||
to_splice = []
|
||||
for dep in s.dependencies():
|
||||
dep_dag_hash = dep.dag_hash()
|
||||
if dep_dag_hash in all_mirror_specs:
|
||||
true_dep = all_mirror_specs[dep_dag_hash]['spec']
|
||||
if true_dep.full_hash() != dep.full_hash():
|
||||
to_splice.append(true_dep)
|
||||
|
||||
if to_splice:
|
||||
tty.debug(' needs the following deps spliced:')
|
||||
for true_dep in to_splice:
|
||||
tty.debug(' {0}/{1}'.format(
|
||||
true_dep.name, true_dep.dag_hash()[:7]))
|
||||
s = s.splice(true_dep, True)
|
||||
|
||||
# Push this spliced spec back to the mirror
|
||||
spliced_spec_dict = s.to_dict(hash=ht.full_hash)
|
||||
for key in ['binary_cache_checksum', 'buildinfo']:
|
||||
spliced_spec_dict[key] = spec_record[key]
|
||||
|
||||
temp_json_path = os.path.join(tmpdir, 'spliced.spec.json')
|
||||
with open(temp_json_path, 'w') as fd:
|
||||
fd.write(sjson.dump(spliced_spec_dict))
|
||||
|
||||
spliced_spec_url = spec_record['spec_url']
|
||||
web_util.push_to_url(
|
||||
temp_json_path, spliced_spec_url, keep_original=False)
|
||||
tty.debug(' spliced and wrote {0}'.format(
|
||||
spliced_spec_url))
|
||||
spec_record['spec'] = s
|
||||
|
||||
db.add(s, None)
|
||||
db.mark(s, 'in_buildcache', True)
|
||||
|
||||
# Now that we have fixed any old specfiles that might have had the wrong
|
||||
# full hash for their dependencies, we can generate the index, compute
|
||||
# the hash, and push those files to the mirror.
|
||||
index_json_path = os.path.join(db_root_dir, 'index.json')
|
||||
with open(index_json_path, 'w') as f:
|
||||
db._write_to_file(f)
|
||||
|
||||
# Read the index back in and compute it's hash
|
||||
with open(index_json_path) as f:
|
||||
index_string = f.read()
|
||||
index_hash = compute_hash(index_string)
|
||||
|
||||
# Write the hash out to a local file
|
||||
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
|
||||
with open(index_hash_path, 'w') as f:
|
||||
f.write(index_hash)
|
||||
|
||||
# Push the index itself
|
||||
web_util.push_to_url(
|
||||
index_json_path,
|
||||
url_util.join(cache_prefix, 'index.json'),
|
||||
keep_original=False,
|
||||
extra_args={'ContentType': 'application/json'})
|
||||
|
||||
# Push the hash
|
||||
web_util.push_to_url(
|
||||
index_hash_path,
|
||||
url_util.join(cache_prefix, 'index.json.hash'),
|
||||
keep_original=False,
|
||||
extra_args={'ContentType': 'text/plain'})
|
||||
except Exception as err:
|
||||
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
|
||||
cache_prefix, err)
|
||||
@@ -1517,11 +1568,12 @@ def install_root_node(spec, allow_root, unsigned=False, force=False, sha256=None
|
||||
sha256 (str): optional sha256 of the binary package, to be checked
|
||||
before installation
|
||||
"""
|
||||
package = spack.repo.get(spec)
|
||||
# Early termination
|
||||
if spec.external or spec.virtual:
|
||||
warnings.warn("Skipping external or virtual package {0}".format(spec.format()))
|
||||
return
|
||||
elif spec.concrete and spec.installed and not force:
|
||||
elif spec.concrete and package.installed and not force:
|
||||
warnings.warn("Package for spec {0} already installed.".format(spec.format()))
|
||||
return
|
||||
|
||||
@@ -1559,14 +1611,16 @@ def install_single_spec(spec, allow_root=False, unsigned=False, force=False):
|
||||
install_root_node(node, allow_root=allow_root, unsigned=unsigned, force=force)
|
||||
|
||||
|
||||
def try_direct_fetch(spec, mirrors=None):
|
||||
def try_direct_fetch(spec, full_hash_match=False, mirrors=None):
|
||||
"""
|
||||
Try to find the spec directly on the configured mirrors
|
||||
"""
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_is_json = True
|
||||
lenient = not full_hash_match
|
||||
found_specs = []
|
||||
spec_full_hash = spec.full_hash()
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors=mirrors).values():
|
||||
buildcache_fetch_url_yaml = url_util.join(
|
||||
@@ -1596,21 +1650,29 @@ def try_direct_fetch(spec, mirrors=None):
|
||||
fetched_spec = Spec.from_yaml(specfile_contents)
|
||||
fetched_spec._mark_concrete()
|
||||
|
||||
found_specs.append({
|
||||
'mirror_url': mirror.fetch_url,
|
||||
'spec': fetched_spec,
|
||||
})
|
||||
# Do not recompute the full hash for the fetched spec, instead just
|
||||
# read the property.
|
||||
if lenient or fetched_spec._full_hash == spec_full_hash:
|
||||
found_specs.append({
|
||||
'mirror_url': mirror.fetch_url,
|
||||
'spec': fetched_spec,
|
||||
})
|
||||
|
||||
return found_specs
|
||||
|
||||
|
||||
def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False):
|
||||
def get_mirrors_for_spec(spec=None, full_hash_match=False,
|
||||
mirrors_to_check=None, index_only=False):
|
||||
"""
|
||||
Check if concrete spec exists on mirrors and return a list
|
||||
indicating the mirrors on which it can be found
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): The spec to look for in binary mirrors
|
||||
full_hash_match (bool): If True, only includes mirrors where the spec
|
||||
full hash matches the locally computed full hash of the ``spec``
|
||||
argument. If False, any mirror which has a matching DAG hash
|
||||
is included in the results.
|
||||
mirrors_to_check (dict): Optionally override the configured mirrors
|
||||
with the mirrors in this dictionary.
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.json``
|
||||
@@ -1627,14 +1689,29 @@ def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False):
|
||||
tty.debug("No Spack mirrors are currently configured")
|
||||
return {}
|
||||
|
||||
results = binary_index.find_built_spec(spec)
|
||||
results = []
|
||||
lenient = not full_hash_match
|
||||
spec_full_hash = spec.full_hash()
|
||||
|
||||
def filter_candidates(candidate_list):
|
||||
filtered_candidates = []
|
||||
for candidate in candidate_list:
|
||||
candidate_full_hash = candidate['spec']._full_hash
|
||||
if lenient or spec_full_hash == candidate_full_hash:
|
||||
filtered_candidates.append(candidate)
|
||||
return filtered_candidates
|
||||
|
||||
candidates = binary_index.find_built_spec(spec)
|
||||
if candidates:
|
||||
results = filter_candidates(candidates)
|
||||
|
||||
# Maybe we just didn't have the latest information from the mirror, so
|
||||
# try to fetch directly, unless we are only considering the indices.
|
||||
if not results and not index_only:
|
||||
results = try_direct_fetch(spec, mirrors=mirrors_to_check)
|
||||
# We found a spec by the direct fetch approach, we might as well
|
||||
# add it to our mapping.
|
||||
results = try_direct_fetch(spec,
|
||||
full_hash_match=full_hash_match,
|
||||
mirrors=mirrors_to_check)
|
||||
|
||||
if results:
|
||||
binary_index.update_spec(spec, results)
|
||||
|
||||
@@ -1784,35 +1861,124 @@ def push_keys(*mirrors, **kwargs):
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def needs_rebuild(spec, mirror_url):
|
||||
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
if not spec.concrete:
|
||||
raise ValueError('spec must be concrete to check against mirror')
|
||||
|
||||
pkg_name = spec.name
|
||||
pkg_version = spec.version
|
||||
pkg_hash = spec.dag_hash()
|
||||
|
||||
tty.debug('Checking {0}-{1}, dag_hash = {2}'.format(
|
||||
pkg_name, pkg_version, pkg_hash))
|
||||
pkg_hash = spec.dag_hash()
|
||||
pkg_full_hash = spec.full_hash()
|
||||
|
||||
tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
|
||||
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
|
||||
tty.debug(spec.tree())
|
||||
|
||||
# Try to retrieve the specfile directly, based on the known
|
||||
# format of the name, in order to determine if the package
|
||||
# needs to be rebuilt.
|
||||
cache_prefix = build_cache_prefix(mirror_url)
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(cache_prefix, specfile_name)
|
||||
deprecated_specfile_path = os.path.join(cache_prefix,
|
||||
deprecated_specfile_name)
|
||||
|
||||
# Only check for the presence of the json version of the spec. If the
|
||||
# mirror only has the yaml version, or doesn't have the spec at all, we
|
||||
# need to rebuild.
|
||||
return not web_util.url_exists(specfile_path)
|
||||
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
|
||||
spec.short_spec, '' if rebuild_on_errors else 'not ')
|
||||
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(specfile_path)
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(deprecated_specfile_path)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError) as url_err_y:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1} or {2}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(
|
||||
spec.short_spec,
|
||||
specfile_path,
|
||||
deprecated_specfile_path))
|
||||
tty.debug(url_err)
|
||||
tty.debug(url_err_y)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
if not spec_file_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(
|
||||
specfile_path if specfile_is_json else deprecated_specfile_path))
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_dict = (sjson.load(spec_file_contents)
|
||||
if specfile_is_json else syaml.load(spec_file_contents))
|
||||
|
||||
try:
|
||||
nodes = spec_dict['spec']['nodes']
|
||||
except KeyError:
|
||||
# Prior node dict format omitted 'nodes' key
|
||||
nodes = spec_dict['spec']
|
||||
name = spec.name
|
||||
|
||||
# In the old format:
|
||||
# The "spec" key represents a list of objects, each with a single
|
||||
# key that is the package name. While the list usually just contains
|
||||
# a single object, we iterate over the list looking for the object
|
||||
# with the name of this concrete spec as a key, out of an abundance
|
||||
# of caution.
|
||||
# In format version 2:
|
||||
# ['spec']['nodes'] is still a list of objects, but with a
|
||||
# multitude of keys. The list will commonly contain many objects, and in the
|
||||
# case of build specs, it is highly likely that the same name will occur
|
||||
# once as the actual package, and then again as the build provenance of that
|
||||
# same package. Hence format version 2 matches on the dag hash, not name.
|
||||
if nodes and 'name' not in nodes[0]:
|
||||
# old style
|
||||
cached_pkg_specs = [item[name] for item in nodes if name in item]
|
||||
elif nodes and spec_dict['spec']['_meta']['version'] == 2:
|
||||
cached_pkg_specs = [item for item in nodes
|
||||
if item[ht.dag_hash.name] == spec.dag_hash()]
|
||||
cached_target = cached_pkg_specs[0] if cached_pkg_specs else None
|
||||
|
||||
# If either the full_hash didn't exist in the specfile, or it
|
||||
# did, but didn't match the one we computed locally, then we should
|
||||
# just rebuild. This can be simplified once the dag_hash and the
|
||||
# full_hash become the same thing.
|
||||
rebuild = False
|
||||
|
||||
if not cached_target:
|
||||
reason = 'did not find spec in specfile contents'
|
||||
rebuild = True
|
||||
elif ht.full_hash.name not in cached_target:
|
||||
reason = 'full_hash was missing from remote specfile'
|
||||
rebuild = True
|
||||
else:
|
||||
full_hash = cached_target[ht.full_hash.name]
|
||||
if full_hash != pkg_full_hash:
|
||||
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
|
||||
full_hash, pkg_full_hash)
|
||||
rebuild = True
|
||||
|
||||
if rebuild:
|
||||
tty.msg('Rebuilding {0}, reason: {1}'.format(
|
||||
spec.short_spec, reason))
|
||||
tty.msg(spec.tree())
|
||||
|
||||
return rebuild
|
||||
|
||||
|
||||
def check_specs_against_mirrors(mirrors, specs, output_file=None):
|
||||
def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
rebuild_on_errors=False):
|
||||
"""Check all the given specs against buildcaches on the given mirrors and
|
||||
determine if any of the specs need to be rebuilt. Specs need to be rebuilt
|
||||
when their hash doesn't exist in the mirror.
|
||||
determine if any of the specs need to be rebuilt. Reasons for needing to
|
||||
rebuild include binary cache for spec isn't present on a mirror, or it is
|
||||
present but the full_hash has changed since last time spec was built.
|
||||
|
||||
Arguments:
|
||||
mirrors (dict): Mirrors to check against
|
||||
@@ -1820,6 +1986,8 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None):
|
||||
output_file (str): Path to output file to be written. If provided,
|
||||
mirrors with missing or out-of-date specs will be formatted as a
|
||||
JSON object and written to this file.
|
||||
rebuild_on_errors (bool): Treat any errors encountered while
|
||||
checking specs as a signal to rebuild package.
|
||||
|
||||
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
|
||||
|
||||
@@ -1831,7 +1999,7 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None):
|
||||
rebuild_list = []
|
||||
|
||||
for spec in specs:
|
||||
if needs_rebuild(spec, mirror.fetch_url):
|
||||
if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors):
|
||||
rebuild_list.append({
|
||||
'short_spec': spec.short_spec,
|
||||
'hash': spec.dag_hash()
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import GroupedExceptionHandler
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
@@ -418,10 +417,11 @@ def _make_bootstrapper(conf):
|
||||
return _bootstrap_methods[btype](conf)
|
||||
|
||||
|
||||
def _validate_source_is_trusted(conf):
|
||||
def _source_is_trusted(conf):
|
||||
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
|
||||
if name not in trusted:
|
||||
raise ValueError('source is not trusted')
|
||||
return False
|
||||
return trusted[name]
|
||||
|
||||
|
||||
def spec_for_current_python():
|
||||
@@ -488,25 +488,34 @@ def ensure_module_importable_or_raise(module, abstract_spec=None):
|
||||
abstract_spec = abstract_spec or module
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
|
||||
h = GroupedExceptionHandler()
|
||||
errors = {}
|
||||
|
||||
for current_config in source_configs:
|
||||
with h.forward(current_config['name']):
|
||||
_validate_source_is_trusted(current_config)
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(module, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
b = _make_bootstrapper(current_config)
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
if b.try_import(module, abstract_spec):
|
||||
return
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(module, str(e)))
|
||||
errors[current_config['name']] = e
|
||||
|
||||
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(module) # noqa: E501
|
||||
msg = 'cannot bootstrap the "{0}" Python module '.format(module)
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
|
||||
if abstract_spec:
|
||||
msg += 'from spec "{0}" '.format(abstract_spec)
|
||||
if tty.is_debug():
|
||||
msg += h.grouped_message(with_tracebacks=True)
|
||||
else:
|
||||
msg += h.grouped_message(with_tracebacks=False)
|
||||
msg += '\nRun `spack --debug ...` for more detailed errors'
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
msg += ' due to the following failures:\n'
|
||||
for method in errors:
|
||||
err = errors[method]
|
||||
msg += " '{0}' raised {1}: {2}\n".format(
|
||||
method, err.__class__.__name__, str(err))
|
||||
msg += ' Please run `spack -d spec zlib` for more verbose error messages'
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
@@ -530,14 +539,15 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
|
||||
|
||||
executables_str = ', '.join(executables)
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
|
||||
h = GroupedExceptionHandler()
|
||||
|
||||
for current_config in source_configs:
|
||||
with h.forward(current_config['name']):
|
||||
_validate_source_is_trusted(current_config)
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP EXECUTABLES {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(executables_str, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
b = _make_bootstrapper(current_config)
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
if b.try_search_path(executables, abstract_spec):
|
||||
# Additional environment variables needed
|
||||
concrete_spec, cmd = b.last_search['spec'], b.last_search['command']
|
||||
@@ -552,16 +562,14 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
|
||||
)
|
||||
cmd.add_default_envmod(env_mods)
|
||||
return cmd
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(executables_str, str(e)))
|
||||
|
||||
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(executables_str) # noqa: E501
|
||||
msg = 'cannot bootstrap any of the {0} executables '.format(executables_str)
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap any of the {0} executables'.format(executables_str)
|
||||
if abstract_spec:
|
||||
msg += 'from spec "{0}" '.format(abstract_spec)
|
||||
if tty.is_debug():
|
||||
msg += h.grouped_message(with_tracebacks=True)
|
||||
else:
|
||||
msg += h.grouped_message(with_tracebacks=False)
|
||||
msg += '\nRun `spack --debug ...` for more detailed errors'
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
|
||||
@@ -111,20 +111,6 @@
|
||||
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
|
||||
|
||||
|
||||
def should_set_parallel_jobs(jobserver_support=False):
|
||||
"""Returns true in general, except when:
|
||||
- The env variable SPACK_NO_PARALLEL_MAKE=1 is set
|
||||
- jobserver_support is enabled, and a jobserver was found.
|
||||
"""
|
||||
if (
|
||||
jobserver_support and
|
||||
'MAKEFLAGS' in os.environ and
|
||||
'--jobserver' in os.environ['MAKEFLAGS']
|
||||
):
|
||||
return False
|
||||
return not env_flag(SPACK_NO_PARALLEL_MAKE)
|
||||
|
||||
|
||||
class MakeExecutable(Executable):
|
||||
"""Special callable executable object for make so the user can specify
|
||||
parallelism options on a per-invocation basis. Specifying
|
||||
@@ -134,6 +120,9 @@ class MakeExecutable(Executable):
|
||||
call will name an environment variable which will be set to the
|
||||
parallelism level (without affecting the normal invocation with
|
||||
-j).
|
||||
|
||||
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
|
||||
everything.
|
||||
"""
|
||||
|
||||
def __init__(self, name, jobs):
|
||||
@@ -144,8 +133,9 @@ def __call__(self, *args, **kwargs):
|
||||
"""parallel, and jobs_env from kwargs are swallowed and used here;
|
||||
remaining arguments are passed through to the superclass.
|
||||
"""
|
||||
parallel = should_set_parallel_jobs(jobserver_support=True) and \
|
||||
kwargs.pop('parallel', self.jobs > 1)
|
||||
|
||||
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
|
||||
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
|
||||
|
||||
if parallel:
|
||||
args = ('-j{0}'.format(self.jobs),) + args
|
||||
@@ -191,7 +181,7 @@ def clean_environment():
|
||||
env.unset('PYTHONPATH')
|
||||
|
||||
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
|
||||
# env.unset('MAKEFLAGS')
|
||||
env.unset('MAKEFLAGS')
|
||||
|
||||
# Avoid that libraries of build dependencies get hijacked.
|
||||
env.unset('LD_PRELOAD')
|
||||
@@ -242,17 +232,6 @@ def clean_environment():
|
||||
# show useful matches.
|
||||
env.set('LC_ALL', build_lang)
|
||||
|
||||
remove_flags = set()
|
||||
keep_flags = set()
|
||||
if spack.config.get('config:flags:keep_werror') == 'all':
|
||||
keep_flags.add('-Werror*')
|
||||
else:
|
||||
if spack.config.get('config:flags:keep_werror') == 'specific':
|
||||
keep_flags.add('-Werror=*')
|
||||
remove_flags.add('-Werror*')
|
||||
env.set('SPACK_COMPILER_FLAGS_KEEP', '|'.join(keep_flags))
|
||||
env.set('SPACK_COMPILER_FLAGS_REMOVE', '|'.join(remove_flags))
|
||||
|
||||
# Remove any macports installs from the PATH. The macports ld can
|
||||
# cause conflicts with the built-in linker on el capitan. Solves
|
||||
# assembler issues, e.g.:
|
||||
@@ -850,7 +829,7 @@ def setup_package(pkg, dirty, context='build'):
|
||||
# PrgEnv modules on cray platform. Module unload does no damage when
|
||||
# unnecessary
|
||||
on_cray, _ = _on_cray()
|
||||
if on_cray and not dirty:
|
||||
if on_cray:
|
||||
for mod in ['cray-mpich', 'cray-libsci']:
|
||||
module('unload', mod)
|
||||
|
||||
@@ -1049,7 +1028,7 @@ def get_cmake_prefix_path(pkg):
|
||||
|
||||
|
||||
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
|
||||
input_multiprocess_fd, jsfd1, jsfd2):
|
||||
input_multiprocess_fd):
|
||||
|
||||
context = kwargs.get('context', 'build')
|
||||
|
||||
@@ -1156,8 +1135,6 @@ def child_fun():
|
||||
"""
|
||||
parent_pipe, child_pipe = multiprocessing.Pipe()
|
||||
input_multiprocess_fd = None
|
||||
jobserver_fd1 = None
|
||||
jobserver_fd2 = None
|
||||
|
||||
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
|
||||
|
||||
@@ -1167,17 +1144,11 @@ def child_fun():
|
||||
'fileno'):
|
||||
input_fd = os.dup(sys.stdin.fileno())
|
||||
input_multiprocess_fd = MultiProcessFd(input_fd)
|
||||
mflags = os.environ.get('MAKEFLAGS', False)
|
||||
if mflags:
|
||||
m = re.search(r'--jobserver-[^=]*=(\d),(\d)', mflags)
|
||||
if m:
|
||||
jobserver_fd1 = MultiProcessFd(int(m.group(1)))
|
||||
jobserver_fd2 = MultiProcessFd(int(m.group(2)))
|
||||
|
||||
p = multiprocessing.Process(
|
||||
target=_setup_pkg_and_run,
|
||||
args=(serialized_pkg, function, kwargs, child_pipe,
|
||||
input_multiprocess_fd, jobserver_fd1, jobserver_fd2))
|
||||
input_multiprocess_fd))
|
||||
|
||||
p.start()
|
||||
|
||||
|
||||
@@ -210,10 +210,6 @@ def std_initconfig_entries(self):
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
def initconfig_package_entries(self):
|
||||
"""This method is to be overwritten by the package"""
|
||||
return []
|
||||
|
||||
def initconfig(self, spec, prefix):
|
||||
cache_entries = (self.std_initconfig_entries() +
|
||||
self.initconfig_compiler_entries() +
|
||||
|
||||
@@ -176,7 +176,6 @@ def _std_args(pkg):
|
||||
'-G', generator,
|
||||
define('CMAKE_INSTALL_PREFIX', convert_to_posix_path(pkg.prefix)),
|
||||
define('CMAKE_BUILD_TYPE', build_type),
|
||||
define('BUILD_TESTING', pkg.run_tests),
|
||||
]
|
||||
|
||||
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
|
||||
@@ -362,7 +361,6 @@ def cmake_args(self):
|
||||
|
||||
* CMAKE_INSTALL_PREFIX
|
||||
* CMAKE_BUILD_TYPE
|
||||
* BUILD_TESTING
|
||||
|
||||
which will be set automatically.
|
||||
|
||||
|
||||
@@ -107,10 +107,10 @@ def cuda_flags(arch_list):
|
||||
# each release of a new cuda minor version.
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4.0')
|
||||
conflicts('%gcc@12:', when='+cuda ^cuda@:11.7')
|
||||
conflicts('%gcc@12:', when='+cuda ^cuda@:11.6')
|
||||
conflicts('%clang@12:', when='+cuda ^cuda@:11.4.0')
|
||||
conflicts('%clang@13:', when='+cuda ^cuda@:11.5')
|
||||
conflicts('%clang@14:', when='+cuda ^cuda@:11.7')
|
||||
conflicts('%clang@14:', when='+cuda ^cuda@:11.6')
|
||||
|
||||
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
|
||||
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from llnl.util.filesystem import find
|
||||
|
||||
from spack.directives import depends_on, extends
|
||||
from spack.multimethod import when
|
||||
from spack.package import PackageBase
|
||||
from spack.util.executable import Executable
|
||||
|
||||
|
||||
class LuaPackage(PackageBase):
|
||||
"""Specialized class for lua packages"""
|
||||
|
||||
phases = ['unpack', 'generate_luarocks_config', 'preprocess', 'install']
|
||||
#: This attribute is used in UI queries that need to know the build
|
||||
#: system base class
|
||||
build_system_class = 'LuaPackage'
|
||||
|
||||
list_depth = 1 # LuaRocks requires at least one level of spidering to find versions
|
||||
depends_on('lua-lang')
|
||||
extends('lua', when='^lua')
|
||||
with when('^lua-luajit'):
|
||||
extends('lua-luajit')
|
||||
depends_on('luajit')
|
||||
depends_on('lua-luajit+lualinks')
|
||||
with when('^lua-luajit-openresty'):
|
||||
extends('lua-luajit-openresty')
|
||||
depends_on('luajit')
|
||||
depends_on('lua-luajit-openresty+lualinks')
|
||||
|
||||
def unpack(self, spec, prefix):
|
||||
if os.path.splitext(self.stage.archive_file)[1] == '.rock':
|
||||
directory = self.luarocks('unpack', self.stage.archive_file, output=str)
|
||||
dirlines = directory.split('\n')
|
||||
# TODO: figure out how to scope this better
|
||||
os.chdir(dirlines[2])
|
||||
|
||||
def _generate_tree_line(self, name, prefix):
|
||||
return """{{ name = "{name}", root = "{prefix}" }};""".format(
|
||||
name=name,
|
||||
prefix=prefix,
|
||||
)
|
||||
|
||||
def _luarocks_config_path(self):
|
||||
return os.path.join(self.stage.source_path, 'spack_luarocks.lua')
|
||||
|
||||
def generate_luarocks_config(self, spec, prefix):
|
||||
spec = self.spec
|
||||
table_entries = []
|
||||
for d in spec.traverse(
|
||||
deptypes=("build", "run"), deptype_query="run"
|
||||
):
|
||||
if d.package.extends(self.extendee_spec):
|
||||
table_entries.append(self._generate_tree_line(d.name, d.prefix))
|
||||
|
||||
path = self._luarocks_config_path()
|
||||
with open(path, 'w') as config:
|
||||
config.write(
|
||||
"""
|
||||
deps_mode="all"
|
||||
rocks_trees={{
|
||||
{}
|
||||
}}
|
||||
""".format(
|
||||
"\n".join(table_entries)
|
||||
)
|
||||
)
|
||||
return path
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
env.set('LUAROCKS_CONFIG', self._luarocks_config_path())
|
||||
|
||||
def preprocess(self, spec, prefix):
|
||||
"""Override this to preprocess source before building with luarocks"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def lua(self):
|
||||
return Executable(self.spec['lua-lang'].prefix.bin.lua)
|
||||
|
||||
@property
|
||||
def luarocks(self):
|
||||
lr = Executable(self.spec['lua-lang'].prefix.bin.luarocks)
|
||||
return lr
|
||||
|
||||
def luarocks_args(self):
|
||||
return []
|
||||
|
||||
def install(self, spec, prefix):
|
||||
rock = '.'
|
||||
specs = find('.', '*.rockspec', recursive=False)
|
||||
if specs:
|
||||
rock = specs[0]
|
||||
rocks_args = self.luarocks_args()
|
||||
rocks_args.append(rock)
|
||||
self.luarocks('--tree=' + prefix, 'make', *rocks_args)
|
||||
@@ -30,15 +30,6 @@ class IntelOneApiPackage(Package):
|
||||
# organization (e.g. University/Company).
|
||||
redistribute_source = False
|
||||
|
||||
@staticmethod
|
||||
def update_description(cls):
|
||||
"""Updates oneapi package descriptions with common text."""
|
||||
|
||||
text = """ LICENSE INFORMATION: By downloading and using this software, you agree to the terms
|
||||
and conditions of the software license agreements at https://intel.ly/393CijO."""
|
||||
cls.__doc__ = cls.__doc__ + text
|
||||
return cls
|
||||
|
||||
@property
|
||||
def component_dir(self):
|
||||
"""Subdirectory for this component in the install prefix."""
|
||||
|
||||
@@ -90,8 +90,8 @@ def _create_buildgroup(opener, headers, url, project, group_name, group_type):
|
||||
return build_group_id
|
||||
|
||||
|
||||
def _populate_buildgroup(job_names, group_name, project, site,
|
||||
credentials, cdash_url):
|
||||
def populate_buildgroup(job_names, group_name, project, site,
|
||||
credentials, cdash_url):
|
||||
url = "{0}/api/v1/buildgroup.php".format(cdash_url)
|
||||
|
||||
headers = {
|
||||
@@ -132,30 +132,16 @@ def _populate_buildgroup(job_names, group_name, project, site,
|
||||
response_code = response.getcode()
|
||||
|
||||
if response_code != 200:
|
||||
msg = 'Error response code ({0}) in _populate_buildgroup'.format(
|
||||
msg = 'Error response code ({0}) in populate_buildgroup'.format(
|
||||
response_code)
|
||||
tty.warn(msg)
|
||||
|
||||
|
||||
def _is_main_phase(phase_name):
|
||||
def is_main_phase(phase_name):
|
||||
return True if phase_name == 'specs' else False
|
||||
|
||||
|
||||
def get_job_name(phase, strip_compiler, spec, osarch, build_group):
|
||||
""" Given the necessary parts, format the gitlab job name
|
||||
|
||||
Arguments:
|
||||
phase (str): Either 'specs' for the main phase, or the name of a
|
||||
bootstrapping phase
|
||||
strip_compiler (bool): Should compiler be stripped from job name
|
||||
spec (spack.spec.Spec): Spec job will build
|
||||
osarch: Architecture TODO: (this is a spack.spec.ArchSpec,
|
||||
but sphinx doesn't recognize the type and fails).
|
||||
build_group (str): Name of build group this job belongs to (a CDash
|
||||
notion)
|
||||
|
||||
Returns: The job name
|
||||
"""
|
||||
item_idx = 0
|
||||
format_str = ''
|
||||
format_args = []
|
||||
@@ -177,7 +163,7 @@ def get_job_name(phase, strip_compiler, spec, osarch, build_group):
|
||||
format_args.append(spec.version)
|
||||
item_idx += 1
|
||||
|
||||
if _is_main_phase(phase) is True or strip_compiler is False:
|
||||
if is_main_phase(phase) is True or strip_compiler is False:
|
||||
format_str += ' {{{0}}}'.format(item_idx)
|
||||
format_args.append(spec.compiler)
|
||||
item_idx += 1
|
||||
@@ -194,12 +180,12 @@ def get_job_name(phase, strip_compiler, spec, osarch, build_group):
|
||||
return format_str.format(*format_args)
|
||||
|
||||
|
||||
def _get_cdash_build_name(spec, build_group):
|
||||
def get_cdash_build_name(spec, build_group):
|
||||
return '{0}@{1}%{2} arch={3} ({4})'.format(
|
||||
spec.name, spec.version, spec.compiler, spec.architecture, build_group)
|
||||
|
||||
|
||||
def _get_spec_string(spec):
|
||||
def get_spec_string(spec):
|
||||
format_elements = [
|
||||
'{name}{@version}',
|
||||
'{%compiler}',
|
||||
@@ -211,15 +197,15 @@ def _get_spec_string(spec):
|
||||
return spec.format(''.join(format_elements))
|
||||
|
||||
|
||||
def _format_root_spec(spec, main_phase, strip_compiler):
|
||||
def format_root_spec(spec, main_phase, strip_compiler):
|
||||
if main_phase is False and strip_compiler is True:
|
||||
return '{0}@{1} arch={2}'.format(
|
||||
spec.name, spec.version, spec.architecture)
|
||||
else:
|
||||
return spec.dag_hash()
|
||||
return spec.build_hash()
|
||||
|
||||
|
||||
def _spec_deps_key(s):
|
||||
def spec_deps_key(s):
|
||||
return '{0}/{1}'.format(s.name, s.dag_hash(7))
|
||||
|
||||
|
||||
@@ -231,8 +217,8 @@ def _add_dependency(spec_label, dep_label, deps):
|
||||
deps[spec_label].add(dep_label)
|
||||
|
||||
|
||||
def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False):
|
||||
spec_deps_obj = _compute_spec_deps(specs, check_index_only=check_index_only)
|
||||
def get_spec_dependencies(specs, deps, spec_labels, check_index_only=False):
|
||||
spec_deps_obj = compute_spec_deps(specs, check_index_only=check_index_only)
|
||||
|
||||
if spec_deps_obj:
|
||||
dependencies = spec_deps_obj['dependencies']
|
||||
@@ -280,11 +266,11 @@ def stage_spec_jobs(specs, check_index_only=False):
|
||||
|
||||
"""
|
||||
|
||||
# The convenience method below, "_remove_satisfied_deps()", does not modify
|
||||
# The convenience method below, "remove_satisfied_deps()", does not modify
|
||||
# the "deps" parameter. Instead, it returns a new dictionary where only
|
||||
# dependencies which have not yet been satisfied are included in the
|
||||
# return value.
|
||||
def _remove_satisfied_deps(deps, satisfied_list):
|
||||
def remove_satisfied_deps(deps, satisfied_list):
|
||||
new_deps = {}
|
||||
|
||||
for key, value in iteritems(deps):
|
||||
@@ -297,7 +283,7 @@ def _remove_satisfied_deps(deps, satisfied_list):
|
||||
deps = {}
|
||||
spec_labels = {}
|
||||
|
||||
_get_spec_dependencies(
|
||||
get_spec_dependencies(
|
||||
specs, deps, spec_labels, check_index_only=check_index_only)
|
||||
|
||||
# Save the original deps, as we need to return them at the end of the
|
||||
@@ -316,7 +302,7 @@ def _remove_satisfied_deps(deps, satisfied_list):
|
||||
# Note that "dependencies" is a dictionary mapping each dependent
|
||||
# package to the set of not-yet-handled dependencies. The final step
|
||||
# below removes all the dependencies that are handled by this stage.
|
||||
dependencies = _remove_satisfied_deps(dependencies, next_stage)
|
||||
dependencies = remove_satisfied_deps(dependencies, next_stage)
|
||||
|
||||
if unstaged:
|
||||
stages.append(unstaged.copy())
|
||||
@@ -324,12 +310,13 @@ def _remove_satisfied_deps(deps, satisfied_list):
|
||||
return spec_labels, deps, stages
|
||||
|
||||
|
||||
def _print_staging_summary(spec_labels, dependencies, stages):
|
||||
def print_staging_summary(spec_labels, dependencies, stages):
|
||||
if not stages:
|
||||
return
|
||||
|
||||
tty.msg(' Staging summary ([x] means a job needs rebuilding):')
|
||||
for stage_index, stage in enumerate(stages):
|
||||
tty.msg(' Staging summary:')
|
||||
stage_index = 0
|
||||
for stage in stages:
|
||||
tty.msg(' stage {0} ({1} jobs):'.format(stage_index, len(stage)))
|
||||
|
||||
for job in sorted(stage):
|
||||
@@ -337,10 +324,12 @@ def _print_staging_summary(spec_labels, dependencies, stages):
|
||||
tty.msg(' [{1}] {0} -> {2}'.format(
|
||||
job,
|
||||
'x' if spec_labels[job]['needs_rebuild'] else ' ',
|
||||
_get_spec_string(s)))
|
||||
get_spec_string(s)))
|
||||
|
||||
stage_index += 1
|
||||
|
||||
|
||||
def _compute_spec_deps(spec_list, check_index_only=False):
|
||||
def compute_spec_deps(spec_list, check_index_only=False):
|
||||
"""
|
||||
Computes all the dependencies for the spec(s) and generates a JSON
|
||||
object which provides both a list of unique spec names as well as a
|
||||
@@ -413,17 +402,17 @@ def append_dep(s, d):
|
||||
continue
|
||||
|
||||
up_to_date_mirrors = bindist.get_mirrors_for_spec(
|
||||
spec=s, index_only=check_index_only)
|
||||
spec=s, full_hash_match=True, index_only=check_index_only)
|
||||
|
||||
skey = _spec_deps_key(s)
|
||||
skey = spec_deps_key(s)
|
||||
spec_labels[skey] = {
|
||||
'spec': _get_spec_string(s),
|
||||
'spec': get_spec_string(s),
|
||||
'root': root_spec,
|
||||
'needs_rebuild': not up_to_date_mirrors,
|
||||
}
|
||||
|
||||
for d in s.dependencies(deptype=all):
|
||||
dkey = _spec_deps_key(d)
|
||||
dkey = spec_deps_key(d)
|
||||
if d.external:
|
||||
tty.msg('Will not stage external dep: {0}'.format(d))
|
||||
continue
|
||||
@@ -446,11 +435,11 @@ def append_dep(s, d):
|
||||
return deps_json_obj
|
||||
|
||||
|
||||
def _spec_matches(spec, match_string):
|
||||
def spec_matches(spec, match_string):
|
||||
return spec.satisfies(match_string)
|
||||
|
||||
|
||||
def _copy_attributes(attrs_list, src_dict, dest_dict):
|
||||
def copy_attributes(attrs_list, src_dict, dest_dict):
|
||||
for runner_attr in attrs_list:
|
||||
if runner_attr in src_dict:
|
||||
if runner_attr in dest_dict and runner_attr == 'tags':
|
||||
@@ -471,7 +460,7 @@ def _copy_attributes(attrs_list, src_dict, dest_dict):
|
||||
dest_dict[runner_attr] = copy.deepcopy(src_dict[runner_attr])
|
||||
|
||||
|
||||
def _find_matching_config(spec, gitlab_ci):
|
||||
def find_matching_config(spec, gitlab_ci):
|
||||
runner_attributes = {}
|
||||
overridable_attrs = [
|
||||
'image',
|
||||
@@ -482,16 +471,16 @@ def _find_matching_config(spec, gitlab_ci):
|
||||
'after_script',
|
||||
]
|
||||
|
||||
_copy_attributes(overridable_attrs, gitlab_ci, runner_attributes)
|
||||
copy_attributes(overridable_attrs, gitlab_ci, runner_attributes)
|
||||
|
||||
ci_mappings = gitlab_ci['mappings']
|
||||
for ci_mapping in ci_mappings:
|
||||
for match_string in ci_mapping['match']:
|
||||
if _spec_matches(spec, match_string):
|
||||
if spec_matches(spec, match_string):
|
||||
if 'runner-attributes' in ci_mapping:
|
||||
_copy_attributes(overridable_attrs,
|
||||
ci_mapping['runner-attributes'],
|
||||
runner_attributes)
|
||||
copy_attributes(overridable_attrs,
|
||||
ci_mapping['runner-attributes'],
|
||||
runner_attributes)
|
||||
return runner_attributes
|
||||
else:
|
||||
return None
|
||||
@@ -499,16 +488,16 @@ def _find_matching_config(spec, gitlab_ci):
|
||||
return runner_attributes
|
||||
|
||||
|
||||
def _pkg_name_from_spec_label(spec_label):
|
||||
def pkg_name_from_spec_label(spec_label):
|
||||
return spec_label[:spec_label.index('/')]
|
||||
|
||||
|
||||
def _format_job_needs(phase_name, strip_compilers, dep_jobs,
|
||||
osname, build_group, prune_dag, stage_spec_dict,
|
||||
enable_artifacts_buildcache):
|
||||
def format_job_needs(phase_name, strip_compilers, dep_jobs,
|
||||
osname, build_group, prune_dag, stage_spec_dict,
|
||||
enable_artifacts_buildcache):
|
||||
needs_list = []
|
||||
for dep_job in dep_jobs:
|
||||
dep_spec_key = _spec_deps_key(dep_job)
|
||||
dep_spec_key = spec_deps_key(dep_job)
|
||||
dep_spec_info = stage_spec_dict[dep_spec_key]
|
||||
|
||||
if not prune_dag or dep_spec_info['needs_rebuild']:
|
||||
@@ -603,33 +592,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
prune_dag=False, check_index_only=False,
|
||||
run_optimizer=False, use_dependencies=False,
|
||||
artifacts_root=None):
|
||||
""" Generate a gitlab yaml file to run a dynamic chile pipeline from
|
||||
the spec matrix in the active environment.
|
||||
|
||||
Arguments:
|
||||
env (spack.environment.Environment): Activated environment object
|
||||
which must contain a gitlab-ci section describing how to map
|
||||
specs to runners
|
||||
print_summary (bool): Should we print a summary of all the jobs in
|
||||
the stages in which they were placed.
|
||||
output_file (str): File path where generated file should be written
|
||||
prune_dag (bool): If True, do not generate jobs for specs already
|
||||
exist built on the mirror.
|
||||
check_index_only (bool): If True, attempt to fetch the mirror index
|
||||
and only use that to determine whether built specs on the mirror
|
||||
this mode results in faster yaml generation time). Otherwise, also
|
||||
check each spec directly by url (useful if there is no index or it
|
||||
might be out of date).
|
||||
run_optimizer (bool): If True, post-process the generated yaml to try
|
||||
try to reduce the size (attempts to collect repeated configuration
|
||||
and replace with definitions).)
|
||||
use_dependencies (bool): If true, use "dependencies" rather than "needs"
|
||||
("needs" allows DAG scheduling). Useful if gitlab instance cannot
|
||||
be configured to handle more than a few "needs" per job.
|
||||
artifacts_root (str): Path where artifacts like logs, environment
|
||||
files (spack.yaml, spack.lock), etc should be written. GitLab
|
||||
requires this to be within the project directory.
|
||||
"""
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
with env.write_transaction():
|
||||
env.concretize()
|
||||
@@ -842,7 +804,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
max_needs_job = ''
|
||||
|
||||
# If this is configured, spack will fail "spack ci generate" if it
|
||||
# generates any hash which exists under the broken specs url.
|
||||
# generates any full hash which exists under the broken specs url.
|
||||
broken_spec_urls = None
|
||||
if broken_specs_url:
|
||||
if broken_specs_url.startswith('http'):
|
||||
@@ -857,7 +819,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
phase_name = phase['name']
|
||||
strip_compilers = phase['strip-compilers']
|
||||
|
||||
main_phase = _is_main_phase(phase_name)
|
||||
main_phase = is_main_phase(phase_name)
|
||||
spec_labels, dependencies, stages = staged_phases[phase_name]
|
||||
|
||||
for stage_jobs in stages:
|
||||
@@ -868,9 +830,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
for spec_label in stage_jobs:
|
||||
spec_record = spec_labels[spec_label]
|
||||
root_spec = spec_record['rootSpec']
|
||||
pkg_name = _pkg_name_from_spec_label(spec_label)
|
||||
pkg_name = pkg_name_from_spec_label(spec_label)
|
||||
release_spec = root_spec[pkg_name]
|
||||
release_spec_full_hash = release_spec.full_hash()
|
||||
release_spec_dag_hash = release_spec.dag_hash()
|
||||
release_spec_build_hash = release_spec.build_hash()
|
||||
|
||||
if prune_untouched_packages:
|
||||
if release_spec not in affected_specs:
|
||||
@@ -879,7 +843,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
spec_record['needs_rebuild'] = False
|
||||
continue
|
||||
|
||||
runner_attribs = _find_matching_config(
|
||||
runner_attribs = find_matching_config(
|
||||
release_spec, gitlab_ci)
|
||||
|
||||
if not runner_attribs:
|
||||
@@ -933,13 +897,15 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
compiler_action = 'NONE'
|
||||
if len(phases) > 1:
|
||||
compiler_action = 'FIND_ANY'
|
||||
if _is_main_phase(phase_name):
|
||||
if is_main_phase(phase_name):
|
||||
compiler_action = 'INSTALL_MISSING'
|
||||
|
||||
job_vars = {
|
||||
'SPACK_ROOT_SPEC': _format_root_spec(
|
||||
'SPACK_ROOT_SPEC': format_root_spec(
|
||||
root_spec, main_phase, strip_compilers),
|
||||
'SPACK_JOB_SPEC_DAG_HASH': release_spec_dag_hash,
|
||||
'SPACK_JOB_SPEC_BUILD_HASH': release_spec_build_hash,
|
||||
'SPACK_JOB_SPEC_FULL_HASH': release_spec_full_hash,
|
||||
'SPACK_JOB_SPEC_PKG_NAME': release_spec.name,
|
||||
'SPACK_COMPILER_ACTION': compiler_action
|
||||
}
|
||||
@@ -958,15 +924,15 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
# purposes, so we only get the direct dependencies.
|
||||
dep_jobs = []
|
||||
for dep_label in dependencies[spec_label]:
|
||||
dep_pkg = _pkg_name_from_spec_label(dep_label)
|
||||
dep_pkg = pkg_name_from_spec_label(dep_label)
|
||||
dep_root = spec_labels[dep_label]['rootSpec']
|
||||
dep_jobs.append(dep_root[dep_pkg])
|
||||
|
||||
job_dependencies.extend(
|
||||
_format_job_needs(phase_name, strip_compilers,
|
||||
dep_jobs, osname, build_group,
|
||||
prune_dag, spec_labels,
|
||||
enable_artifacts_buildcache))
|
||||
format_job_needs(phase_name, strip_compilers,
|
||||
dep_jobs, osname, build_group,
|
||||
prune_dag, spec_labels,
|
||||
enable_artifacts_buildcache))
|
||||
|
||||
rebuild_spec = spec_record['needs_rebuild']
|
||||
|
||||
@@ -977,7 +943,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
# compiler we are supposed to use is listed in any of the
|
||||
# bootstrap spec lists, then we will add more dependencies to
|
||||
# the job (that compiler and maybe it's dependencies as well).
|
||||
if _is_main_phase(phase_name):
|
||||
if is_main_phase(phase_name):
|
||||
spec_arch_family = (release_spec.architecture
|
||||
.target
|
||||
.microarchitecture
|
||||
@@ -1005,7 +971,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
# be rebuilt if the compiler targeted to build it
|
||||
# needs to be rebuilt.
|
||||
bs_specs, _, _ = staged_phases[bs['phase-name']]
|
||||
c_spec_key = _spec_deps_key(c_spec)
|
||||
c_spec_key = spec_deps_key(c_spec)
|
||||
rbld_comp = bs_specs[c_spec_key]['needs_rebuild']
|
||||
rebuild_spec = rebuild_spec or rbld_comp
|
||||
# Also update record so dependents do not fail to
|
||||
@@ -1019,14 +985,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
]
|
||||
|
||||
job_dependencies.extend(
|
||||
_format_job_needs(bs['phase-name'],
|
||||
bs['strip-compilers'],
|
||||
dep_jobs,
|
||||
str(bs_arch),
|
||||
build_group,
|
||||
prune_dag,
|
||||
bs_specs,
|
||||
enable_artifacts_buildcache))
|
||||
format_job_needs(bs['phase-name'],
|
||||
bs['strip-compilers'],
|
||||
dep_jobs,
|
||||
str(bs_arch),
|
||||
build_group,
|
||||
prune_dag,
|
||||
bs_specs,
|
||||
enable_artifacts_buildcache))
|
||||
else:
|
||||
debug_msg = ''.join([
|
||||
'Considered compiler {0} for spec ',
|
||||
@@ -1043,9 +1009,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
continue
|
||||
|
||||
if (broken_spec_urls is not None and
|
||||
release_spec_dag_hash in broken_spec_urls):
|
||||
release_spec_full_hash in broken_spec_urls):
|
||||
known_broken_specs_encountered.append('{0} ({1})'.format(
|
||||
release_spec, release_spec_dag_hash))
|
||||
release_spec, release_spec_full_hash))
|
||||
|
||||
if artifacts_root:
|
||||
job_dependencies.append({
|
||||
@@ -1056,7 +1022,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
job_vars['SPACK_SPEC_NEEDS_REBUILD'] = str(rebuild_spec)
|
||||
|
||||
if enable_cdash_reporting:
|
||||
cdash_build_name = _get_cdash_build_name(
|
||||
cdash_build_name = get_cdash_build_name(
|
||||
release_spec, build_group)
|
||||
all_job_names.append(cdash_build_name)
|
||||
job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
|
||||
@@ -1121,7 +1087,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
phase_name = phase['name']
|
||||
tty.msg('Stages for phase "{0}"'.format(phase_name))
|
||||
phase_stages = staged_phases[phase_name]
|
||||
_print_staging_summary(*phase_stages)
|
||||
print_staging_summary(*phase_stages)
|
||||
|
||||
tty.debug('{0} build jobs generated in {1} stages'.format(
|
||||
job_id, stage_id))
|
||||
@@ -1133,8 +1099,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
# Use "all_job_names" to populate the build group for this set
|
||||
if enable_cdash_reporting and cdash_auth_token:
|
||||
try:
|
||||
_populate_buildgroup(all_job_names, build_group, cdash_project,
|
||||
cdash_site, cdash_auth_token, cdash_url)
|
||||
populate_buildgroup(all_job_names, build_group, cdash_project,
|
||||
cdash_site, cdash_auth_token, cdash_url)
|
||||
except (SpackError, HTTPError, URLError) as err:
|
||||
tty.warn('Problem populating buildgroup: {0}'.format(err))
|
||||
else:
|
||||
@@ -1170,9 +1136,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
cleanup_job = {}
|
||||
|
||||
if service_job_config:
|
||||
_copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
cleanup_job)
|
||||
copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
cleanup_job)
|
||||
|
||||
cleanup_job['stage'] = 'cleanup-temp-storage'
|
||||
cleanup_job['script'] = [
|
||||
@@ -1190,9 +1156,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
final_job = {}
|
||||
|
||||
if service_job_config:
|
||||
_copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
final_job)
|
||||
copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
final_job)
|
||||
|
||||
index_target_mirror = mirror_urls[0]
|
||||
if is_pr_pipeline:
|
||||
@@ -1263,9 +1229,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
noop_job = {}
|
||||
|
||||
if service_job_config:
|
||||
_copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
noop_job)
|
||||
copy_attributes(default_attrs,
|
||||
service_job_config,
|
||||
noop_job)
|
||||
|
||||
if 'script' not in noop_job:
|
||||
noop_job['script'] = [
|
||||
@@ -1288,7 +1254,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
|
||||
|
||||
|
||||
def _url_encode_string(input_string):
|
||||
def url_encode_string(input_string):
|
||||
encoded_keyval = urlencode({'donotcare': input_string})
|
||||
eq_idx = encoded_keyval.find('=') + 1
|
||||
encoded_value = encoded_keyval[eq_idx:]
|
||||
@@ -1296,17 +1262,6 @@ def _url_encode_string(input_string):
|
||||
|
||||
|
||||
def import_signing_key(base64_signing_key):
|
||||
""" Given Base64-encoded gpg key, decode and import it to use for
|
||||
signing packages.
|
||||
|
||||
Arguments:
|
||||
base64_signing_key (str): A gpg key including the secret key,
|
||||
armor-exported and base64 encoded, so it can be stored in a
|
||||
gitlab CI variable. For an example of how to generate such
|
||||
a key, see:
|
||||
|
||||
https://github.com/spack/spack-infrastructure/blob/main/gitlab-docker/files/gen-key
|
||||
"""
|
||||
if not base64_signing_key:
|
||||
tty.warn('No key found for signing/verifying packages')
|
||||
return
|
||||
@@ -1344,34 +1299,14 @@ def import_signing_key(base64_signing_key):
|
||||
|
||||
|
||||
def can_sign_binaries():
|
||||
""" Utility method to determine if this spack instance is capable of
|
||||
signing binary packages. This is currently only possible if the
|
||||
spack gpg keystore contains exactly one secret key."""
|
||||
return len(gpg_util.signing_keys()) == 1
|
||||
|
||||
|
||||
def can_verify_binaries():
|
||||
""" Utility method to determin if this spack instance is capable (at
|
||||
least in theory) of verifying signed binaries."""
|
||||
return len(gpg_util.public_keys()) >= 1
|
||||
|
||||
|
||||
def configure_compilers(compiler_action, scope=None):
|
||||
""" Depending on the compiler_action parameter, either turn on the
|
||||
install_missing_compilers config option, or find spack compilers,
|
||||
or do nothing. This is used from rebuild jobs in bootstrapping
|
||||
pipelines, where in the bootsrapping phase we would pass
|
||||
FIND_ANY in case of compiler-agnostic bootstrapping, while in the
|
||||
spec building phase we would pass INSTALL_MISSING in order to get
|
||||
spack to use the compiler which was built in the previous phase and
|
||||
is now sitting in the binary mirror.
|
||||
|
||||
Arguments:
|
||||
compiler_action (str): 'FIND_ANY', 'INSTALL_MISSING' have meanings
|
||||
described above. Any other value essentially results in a no-op.
|
||||
scope (spack.config.ConfigScope): Optional. The scope in which to look for
|
||||
compilers, in case 'FIND_ANY' was provided.
|
||||
"""
|
||||
if compiler_action == 'INSTALL_MISSING':
|
||||
tty.debug('Make sure bootstrapped compiler will be installed')
|
||||
config = cfg.get('config')
|
||||
@@ -1395,35 +1330,6 @@ def configure_compilers(compiler_action, scope=None):
|
||||
|
||||
|
||||
def get_concrete_specs(env, root_spec, job_name, compiler_action):
|
||||
""" Build a dictionary of concrete specs relevant to a particular
|
||||
rebuild job. This includes the root spec and the spec to be
|
||||
rebuilt (which could be the same).
|
||||
|
||||
Arguments:
|
||||
|
||||
env (spack.environment.Environment): Activated spack environment
|
||||
used to get concrete root spec by hash in case compiler_action
|
||||
is anthing other than FIND_ANY.
|
||||
root_spec (str): If compiler_action is FIND_ANY root_spec is
|
||||
a string representation which can be turned directly into
|
||||
a spec, otherwise, it's a hash used to index the activated
|
||||
spack environment.
|
||||
job_name (str): Name of package to be built, used to index the
|
||||
concrete root spec and produce the concrete spec to be
|
||||
built.
|
||||
compiler_action (str): Determines how to interpret the root_spec
|
||||
parameter, either as a string representation as a hash.
|
||||
|
||||
Returns:
|
||||
|
||||
.. code-block:: JSON
|
||||
|
||||
{
|
||||
"root": "<spec>",
|
||||
"<job-pkg-name>": "<spec>",
|
||||
}
|
||||
|
||||
"""
|
||||
spec_map = {
|
||||
'root': None,
|
||||
}
|
||||
@@ -1470,19 +1376,6 @@ def _push_mirror_contents(env, specfile_path, sign_binaries, mirror_url):
|
||||
|
||||
|
||||
def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
|
||||
""" Push one or more binary packages to the mirror.
|
||||
|
||||
Arguments:
|
||||
|
||||
env (spack.environment.Environment): Optional environment. If
|
||||
provided, it is used to make sure binary package to push
|
||||
exists in the environment.
|
||||
specfile_path (str): Path to spec.json corresponding to built pkg
|
||||
to push.
|
||||
mirror_url (str): Base url of target mirror
|
||||
sign_binaries (bool): If True, spack will attempt to sign binary
|
||||
package before pushing.
|
||||
"""
|
||||
try:
|
||||
_push_mirror_contents(env, specfile_path, sign_binaries, mirror_url)
|
||||
except Exception as inst:
|
||||
@@ -1507,15 +1400,6 @@ def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
|
||||
|
||||
|
||||
def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
|
||||
""" Looks for spack-build-out.txt in the stage directory of the given
|
||||
job_spec, and attempts to copy the file into the directory given
|
||||
by job_log_dir.
|
||||
|
||||
Arguments:
|
||||
|
||||
job_spec (spack.spec.Spec): Spec associated with spack install log
|
||||
job_log_dir (str): Path into which build log should be copied
|
||||
"""
|
||||
try:
|
||||
job_pkg = spack.repo.get(job_spec)
|
||||
tty.debug('job package: {0}'.format(job_pkg))
|
||||
@@ -1534,14 +1418,6 @@ def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
|
||||
|
||||
|
||||
def download_and_extract_artifacts(url, work_dir):
|
||||
""" Look for gitlab artifacts.zip at the given url, and attempt to download
|
||||
and extract the contents into the given work_dir
|
||||
|
||||
Arguments:
|
||||
|
||||
url (str): Complete url to artifacts.zip file
|
||||
work_dir (str): Path to destination where artifacts should be extracted
|
||||
"""
|
||||
tty.msg('Fetching artifacts from: {0}\n'.format(url))
|
||||
|
||||
headers = {
|
||||
@@ -1581,8 +1457,6 @@ def download_and_extract_artifacts(url, work_dir):
|
||||
|
||||
|
||||
def get_spack_info():
|
||||
""" If spack is running from a git repo, return the most recent git log
|
||||
entry, otherwise, return a string containing the spack version. """
|
||||
git_path = os.path.join(spack.paths.prefix, ".git")
|
||||
if os.path.exists(git_path):
|
||||
git = exe.which("git")
|
||||
@@ -1598,23 +1472,6 @@ def get_spack_info():
|
||||
|
||||
|
||||
def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
""" Look in the local spack clone to find the checkout_commit, and if
|
||||
provided, the merge_commit given as arguments. If those commits can
|
||||
be found locally, then clone spack and attempt to recreate a merge
|
||||
commit with the same parent commits as tested in gitlab. This looks
|
||||
something like 1) git clone repo && cd repo 2) git checkout
|
||||
<checkout_commit> 3) git merge <merge_commit>. If there is no
|
||||
merge_commit provided, then skip step (3).
|
||||
|
||||
Arguments:
|
||||
|
||||
repro_dir (str): Location where spack should be cloned
|
||||
checkout_commit (str): SHA of PR branch commit
|
||||
merge_commit (str): SHA of target branch parent
|
||||
|
||||
Returns: True if git repo state was successfully recreated, or False
|
||||
otherwise.
|
||||
"""
|
||||
# figure out the path to the spack git version being used for the
|
||||
# reproduction
|
||||
print('checkout_commit: {0}'.format(checkout_commit))
|
||||
@@ -1656,7 +1513,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
fail_on_error=False)
|
||||
|
||||
if git.returncode != 0:
|
||||
tty.error('Unable to clone your local spack repo:')
|
||||
tty.error('Unable to clone your local spac repo:')
|
||||
tty.msg(clone_out)
|
||||
return False
|
||||
|
||||
@@ -1689,18 +1546,6 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
|
||||
|
||||
|
||||
def reproduce_ci_job(url, work_dir):
|
||||
""" Given a url to gitlab artifacts.zip from a failed 'spack ci rebuild' job,
|
||||
attempt to setup an environment in which the failure can be reproduced
|
||||
locally. This entails the following:
|
||||
|
||||
First download and extract artifacts. Then look through those artifacts
|
||||
to glean some information needed for the reproduer (e.g. one of the
|
||||
artifacts contains information about the version of spack tested by
|
||||
gitlab, another is the generated pipeline yaml containing details
|
||||
of the job like the docker image used to run it). The output of this
|
||||
function is a set of printed instructions for running docker and then
|
||||
commands to run to reproduce the build once inside the container.
|
||||
"""
|
||||
download_and_extract_artifacts(url, work_dir)
|
||||
|
||||
lock_file = fs.find(work_dir, 'spack.lock')[0]
|
||||
|
||||
@@ -155,17 +155,31 @@ def parse_specs(args, **kwargs):
|
||||
normalize = kwargs.get('normalize', False)
|
||||
tests = kwargs.get('tests', False)
|
||||
|
||||
sargs = args
|
||||
if not isinstance(args, six.string_types):
|
||||
sargs = ' '.join(spack.util.string.quote(args))
|
||||
specs = spack.spec.parse(sargs)
|
||||
for spec in specs:
|
||||
if concretize:
|
||||
spec.concretize(tests=tests) # implies normalize
|
||||
elif normalize:
|
||||
spec.normalize(tests=tests)
|
||||
try:
|
||||
sargs = args
|
||||
if not isinstance(args, six.string_types):
|
||||
sargs = ' '.join(spack.util.string.quote(args))
|
||||
specs = spack.spec.parse(sargs)
|
||||
for spec in specs:
|
||||
if concretize:
|
||||
spec.concretize(tests=tests) # implies normalize
|
||||
elif normalize:
|
||||
spec.normalize(tests=tests)
|
||||
|
||||
return specs
|
||||
return specs
|
||||
|
||||
except spack.spec.SpecParseError as e:
|
||||
msg = e.message + "\n" + str(e.string) + "\n"
|
||||
msg += (e.pos + 2) * " " + "^"
|
||||
raise spack.error.SpackError(msg)
|
||||
|
||||
except spack.error.SpecError as e:
|
||||
|
||||
msg = e.message
|
||||
if e.long_message:
|
||||
msg += e.long_message
|
||||
|
||||
raise spack.error.SpackError(msg)
|
||||
|
||||
|
||||
def matching_spec_from_env(spec):
|
||||
|
||||
@@ -161,6 +161,11 @@ def setup_parser(subparser):
|
||||
help=('Check single spec from json or yaml file instead of release ' +
|
||||
'specs file'))
|
||||
|
||||
check.add_argument(
|
||||
'--rebuild-on-error', default=False, action='store_true',
|
||||
help="Default to rebuilding packages if errors are encountered " +
|
||||
"during the process of checking whether rebuilding is needed")
|
||||
|
||||
check.set_defaults(func=check_fn)
|
||||
|
||||
# Download tarball and specfile
|
||||
@@ -356,7 +361,7 @@ def list_fn(args):
|
||||
try:
|
||||
specs = bindist.update_cache_and_get_specs()
|
||||
except bindist.FetchCacheError as e:
|
||||
tty.die(e)
|
||||
tty.error(e)
|
||||
|
||||
if not args.allarch:
|
||||
arch = spack.spec.Spec.default_arch()
|
||||
@@ -386,11 +391,17 @@ def preview_fn(args):
|
||||
constraints = spack.cmd.parse_specs(args.specs)
|
||||
specs = spack.store.find(constraints, multiple=True)
|
||||
|
||||
def status_fn(spec):
|
||||
if spack.relocate.is_relocatable(spec):
|
||||
return spec.install_stati.installed
|
||||
else:
|
||||
return spec.install_stati.unknown
|
||||
|
||||
# Cycle over the specs that match
|
||||
for spec in specs:
|
||||
print("Relocatable nodes")
|
||||
print("--------------------------------")
|
||||
print(spec.tree(status_fn=spack.relocate.is_relocatable))
|
||||
print(spec.tree(status_fn=status_fn))
|
||||
|
||||
|
||||
def check_fn(args):
|
||||
@@ -425,7 +436,7 @@ def check_fn(args):
|
||||
sys.exit(0)
|
||||
|
||||
sys.exit(bindist.check_specs_against_mirrors(
|
||||
configured_mirrors, specs, args.output_file))
|
||||
configured_mirrors, specs, args.output_file, args.rebuild_on_error))
|
||||
|
||||
|
||||
def download_fn(args):
|
||||
@@ -478,12 +489,11 @@ def save_specfile_fn(args):
|
||||
if args.root_specfile:
|
||||
with open(args.root_specfile) as fd:
|
||||
root_spec_as_json = fd.read()
|
||||
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
|
||||
else:
|
||||
root_spec = Spec(args.root_spec)
|
||||
root_spec.concretize()
|
||||
root_spec_as_json = root_spec.to_json(hash=ht.dag_hash)
|
||||
spec_format = 'json'
|
||||
root_spec_as_json = root_spec.to_json(hash=ht.build_hash)
|
||||
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
|
||||
save_dependency_specfiles(
|
||||
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format)
|
||||
|
||||
@@ -697,7 +707,7 @@ def update_index(mirror_url, update_keys=False):
|
||||
|
||||
def update_index_fn(args):
|
||||
"""Update a buildcache index."""
|
||||
outdir = 'file://.'
|
||||
outdir = '.'
|
||||
if args.mirror_url:
|
||||
outdir = args.mirror_url
|
||||
|
||||
|
||||
@@ -167,7 +167,8 @@ def ci_reindex(args):
|
||||
|
||||
def ci_rebuild(args):
|
||||
"""Check a single spec against the remote mirror, and rebuild it from
|
||||
source if the mirror does not contain the hash. """
|
||||
source if the mirror does not contain the full hash match of the spec
|
||||
as computed locally. """
|
||||
env = spack.cmd.require_active_env(cmd_name='ci rebuild')
|
||||
|
||||
# Make sure the environment is "gitlab-enabled", or else there's nothing
|
||||
@@ -279,8 +280,8 @@ def ci_rebuild(args):
|
||||
env, root_spec, job_spec_pkg_name, compiler_action)
|
||||
job_spec = spec_map[job_spec_pkg_name]
|
||||
|
||||
job_spec_json_file = '{0}.json'.format(job_spec_pkg_name)
|
||||
job_spec_json_path = os.path.join(repro_dir, job_spec_json_file)
|
||||
job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
|
||||
job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)
|
||||
|
||||
# To provide logs, cdash reports, etc for developer download/perusal,
|
||||
# these things have to be put into artifacts. This means downstream
|
||||
@@ -334,23 +335,23 @@ def ci_rebuild(args):
|
||||
# using a compiler already installed on the target system).
|
||||
spack_ci.configure_compilers(compiler_action)
|
||||
|
||||
# Write this job's spec json into the reproduction directory, and it will
|
||||
# Write this job's spec yaml into the reproduction directory, and it will
|
||||
# also be used in the generated "spack install" command to install the spec
|
||||
tty.debug('job concrete spec path: {0}'.format(job_spec_json_path))
|
||||
with open(job_spec_json_path, 'w') as fd:
|
||||
fd.write(job_spec.to_json(hash=ht.dag_hash))
|
||||
tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
|
||||
with open(job_spec_yaml_path, 'w') as fd:
|
||||
fd.write(job_spec.to_yaml(hash=ht.build_hash))
|
||||
|
||||
# Write the concrete root spec json into the reproduction directory
|
||||
root_spec_json_path = os.path.join(repro_dir, 'root.json')
|
||||
with open(root_spec_json_path, 'w') as fd:
|
||||
fd.write(spec_map['root'].to_json(hash=ht.dag_hash))
|
||||
# Write the concrete root spec yaml into the reproduction directory
|
||||
root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
|
||||
with open(root_spec_yaml_path, 'w') as fd:
|
||||
fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))
|
||||
|
||||
# Write some other details to aid in reproduction into an artifact
|
||||
repro_file = os.path.join(repro_dir, 'repro.json')
|
||||
repro_details = {
|
||||
'job_name': ci_job_name,
|
||||
'job_spec_json': job_spec_json_file,
|
||||
'root_spec_json': 'root.json',
|
||||
'job_spec_yaml': job_spec_yaml_file,
|
||||
'root_spec_yaml': 'root.yaml',
|
||||
'ci_project_dir': ci_project_dir
|
||||
}
|
||||
with open(repro_file, 'w') as fd:
|
||||
@@ -365,24 +366,25 @@ def ci_rebuild(args):
|
||||
fd.write(b'\n')
|
||||
|
||||
# If we decided there should be a temporary storage mechanism, add that
|
||||
# mirror now so it's used when we check for a hash match already
|
||||
# mirror now so it's used when we check for a full hash match already
|
||||
# built for this spec.
|
||||
if pipeline_mirror_url:
|
||||
spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
|
||||
pipeline_mirror_url,
|
||||
cfg.default_modify_scope())
|
||||
|
||||
# Check configured mirrors for a built spec with a matching hash
|
||||
matches = bindist.get_mirrors_for_spec(job_spec, index_only=False)
|
||||
# Check configured mirrors for a built spec with a matching full hash
|
||||
matches = bindist.get_mirrors_for_spec(
|
||||
job_spec, full_hash_match=True, index_only=False)
|
||||
|
||||
if matches:
|
||||
# Got a hash match on at least one configured mirror. All
|
||||
# Got a full hash match on at least one configured mirror. All
|
||||
# matches represent the fully up-to-date spec, so should all be
|
||||
# equivalent. If artifacts mirror is enabled, we just pick one
|
||||
# of the matches and download the buildcache files from there to
|
||||
# the artifacts, so they're available to be used by dependent
|
||||
# jobs in subsequent stages.
|
||||
tty.msg('No need to rebuild {0}, found hash match at: '.format(
|
||||
tty.msg('No need to rebuild {0}, found full hash match at: '.format(
|
||||
job_spec_pkg_name))
|
||||
for match in matches:
|
||||
tty.msg(' {0}'.format(match['mirror_url']))
|
||||
@@ -401,7 +403,7 @@ def ci_rebuild(args):
|
||||
# Now we are done and successful
|
||||
sys.exit(0)
|
||||
|
||||
# No hash match anywhere means we need to rebuild spec
|
||||
# No full hash match anywhere means we need to rebuild spec
|
||||
|
||||
# Start with spack arguments
|
||||
install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]
|
||||
@@ -413,6 +415,7 @@ def ci_rebuild(args):
|
||||
install_args.extend([
|
||||
'install',
|
||||
'--keep-stage',
|
||||
'--require-full-hash-match',
|
||||
])
|
||||
|
||||
can_verify = spack_ci.can_verify_binaries()
|
||||
@@ -440,8 +443,8 @@ def ci_rebuild(args):
|
||||
|
||||
# TODO: once we have the concrete spec registry, use the DAG hash
|
||||
# to identify the spec to install, rather than the concrete spec
|
||||
# json file.
|
||||
install_args.extend(['-f', job_spec_json_path])
|
||||
# yaml file.
|
||||
install_args.extend(['-f', job_spec_yaml_path])
|
||||
|
||||
tty.debug('Installing {0} from source'.format(job_spec.name))
|
||||
tty.debug('spack install arguments: {0}'.format(
|
||||
@@ -474,13 +477,13 @@ def ci_rebuild(args):
|
||||
tty.debug('spack install exited {0}'.format(install_exit_code))
|
||||
|
||||
# If a spec fails to build in a spack develop pipeline, we add it to a
|
||||
# list of known broken hashes. This allows spack PR pipelines to
|
||||
# list of known broken full hashes. This allows spack PR pipelines to
|
||||
# avoid wasting compute cycles attempting to build those hashes.
|
||||
if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
|
||||
tty.debug('Install failed on develop')
|
||||
if 'broken-specs-url' in gitlab_ci:
|
||||
broken_specs_url = gitlab_ci['broken-specs-url']
|
||||
dev_fail_hash = job_spec.dag_hash()
|
||||
dev_fail_hash = job_spec.full_hash()
|
||||
broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
|
||||
tty.msg('Reporting broken develop build as: {0}'.format(
|
||||
broken_spec_path))
|
||||
@@ -491,7 +494,7 @@ def ci_rebuild(args):
|
||||
'broken-spec': {
|
||||
'job-url': get_env_var('CI_JOB_URL'),
|
||||
'pipeline-url': get_env_var('CI_PIPELINE_URL'),
|
||||
'concrete-spec-dict': job_spec.to_dict(hash=ht.dag_hash)
|
||||
'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,7 +539,7 @@ def ci_rebuild(args):
|
||||
# per-PR mirror, if this is a PR pipeline
|
||||
if buildcache_mirror_url:
|
||||
spack_ci.push_mirror_contents(
|
||||
env, job_spec_json_path, buildcache_mirror_url, sign_binaries
|
||||
env, job_spec_yaml_path, buildcache_mirror_url, sign_binaries
|
||||
)
|
||||
|
||||
# Create another copy of that buildcache in the per-pipeline
|
||||
@@ -545,14 +548,14 @@ def ci_rebuild(args):
|
||||
# prefix is set)
|
||||
if pipeline_mirror_url:
|
||||
spack_ci.push_mirror_contents(
|
||||
env, job_spec_json_path, pipeline_mirror_url, sign_binaries
|
||||
env, job_spec_yaml_path, pipeline_mirror_url, sign_binaries
|
||||
)
|
||||
|
||||
# If this is a develop pipeline, check if the spec that we just built is
|
||||
# on the broken-specs list. If so, remove it.
|
||||
if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
|
||||
broken_specs_url = gitlab_ci['broken-specs-url']
|
||||
just_built_hash = job_spec.dag_hash()
|
||||
just_built_hash = job_spec.full_hash()
|
||||
broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
|
||||
if web_util.url_exists(broken_spec_path):
|
||||
tty.msg('Removing {0} from the list of broken specs'.format(
|
||||
|
||||
@@ -381,14 +381,14 @@ def add_concretizer_args(subparser):
|
||||
help='do not reuse installed deps; build newest configuration'
|
||||
)
|
||||
subgroup.add_argument(
|
||||
'--minimal', action=ConfigSetAction, dest="concretizer:minimal",
|
||||
const=True, default=None,
|
||||
help='minimize builds (disables default variants, may choose older versions)'
|
||||
'--reuse', action=ConfigSetAction, dest="concretizer:reuse",
|
||||
const="any", default=None,
|
||||
help='reuse installed dependencies/buildcaches when possible'
|
||||
)
|
||||
subgroup.add_argument(
|
||||
'--reuse', action=ConfigSetAction, dest="concretizer:reuse",
|
||||
'--reuse-only', action=ConfigSetAction, dest="concretizer:reuse",
|
||||
const=True, default=None,
|
||||
help='reuse installed dependencies/buildcaches when possible'
|
||||
help='operate as a binary package manager'
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,6 @@
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ['clean', 'dirty'])
|
||||
arguments.add_concretizer_args(subparser)
|
||||
|
||||
subparser.add_argument(
|
||||
'--dump', metavar="FILE",
|
||||
help="dump a source-able environment to FILE"
|
||||
|
||||
@@ -22,9 +22,6 @@ def setup_parser(subparser):
|
||||
help="""Concretize with test dependencies. When 'root' is chosen, test
|
||||
dependencies are only added for the environment's root specs. When 'all' is
|
||||
chosen, test dependencies are enabled for all packages in the environment.""")
|
||||
subparser.add_argument(
|
||||
'-q', '--quiet', action='store_true',
|
||||
help="Don't print concretized specs")
|
||||
|
||||
spack.cmd.common.arguments.add_concretizer_args(subparser)
|
||||
|
||||
@@ -41,6 +38,5 @@ def concretize(parser, args):
|
||||
|
||||
with env.write_transaction():
|
||||
concretized_specs = env.concretize(force=args.force, tests=tests)
|
||||
if not args.quiet:
|
||||
ev.display_specs(concretized_specs)
|
||||
ev.display_specs(concretized_specs)
|
||||
env.write()
|
||||
|
||||
@@ -187,27 +187,6 @@ def cmake_args(self):
|
||||
return args"""
|
||||
|
||||
|
||||
class LuaPackageTemplate(PackageTemplate):
|
||||
"""Provides appropriate overrides for LuaRocks-based packages"""
|
||||
|
||||
base_class_name = 'LuaPackage'
|
||||
|
||||
body_def = """\
|
||||
def luarocks_args(self):
|
||||
# FIXME: Add arguments to `luarocks make` other than rockspec path
|
||||
# FIXME: If not needed delete this function
|
||||
args = []
|
||||
return args"""
|
||||
|
||||
def __init__(self, name, url, *args, **kwargs):
|
||||
# If the user provided `--name lua-lpeg`, don't rename it lua-lua-lpeg
|
||||
if not name.startswith('lua-'):
|
||||
# Make it more obvious that we are renaming the package
|
||||
tty.msg("Changing package name from {0} to lua-{0}".format(name))
|
||||
name = 'lua-{0}'.format(name)
|
||||
super(LuaPackageTemplate, self).__init__(name, url, *args, **kwargs)
|
||||
|
||||
|
||||
class MesonPackageTemplate(PackageTemplate):
|
||||
"""Provides appropriate overrides for meson-based packages"""
|
||||
|
||||
@@ -601,7 +580,6 @@ def __init__(self, name, *args, **kwargs):
|
||||
'makefile': MakefilePackageTemplate,
|
||||
'intel': IntelPackageTemplate,
|
||||
'meson': MesonPackageTemplate,
|
||||
'lua': LuaPackageTemplate,
|
||||
'sip': SIPPackageTemplate,
|
||||
'generic': PackageTemplate,
|
||||
}
|
||||
@@ -666,9 +644,6 @@ def __call__(self, stage, url):
|
||||
if url.endswith('.whl') or '.whl#' in url:
|
||||
self.build_system = 'python'
|
||||
return
|
||||
if url.endswith('.rock'):
|
||||
self.build_system = 'lua'
|
||||
return
|
||||
|
||||
# A list of clues that give us an idea of the build system a package
|
||||
# uses. If the regular expression matches a file contained in the
|
||||
@@ -693,7 +668,6 @@ def __call__(self, stage, url):
|
||||
(r'/Rakefile$', 'ruby'),
|
||||
(r'/setup\.rb$', 'ruby'),
|
||||
(r'/.*\.pro$', 'qmake'),
|
||||
(r'/.*\.rockspec$', 'lua'),
|
||||
(r'/(GNU)?[Mm]akefile$', 'makefile'),
|
||||
(r'/DESCRIPTION$', 'octave'),
|
||||
(r'/meson\.build$', 'meson'),
|
||||
|
||||
@@ -91,8 +91,8 @@ def dev_build(self, args):
|
||||
spec.concretize()
|
||||
package = spack.repo.get(spec)
|
||||
|
||||
if spec.installed:
|
||||
tty.error("Already installed in %s" % spec.prefix)
|
||||
if package.installed:
|
||||
tty.error("Already installed in %s" % package.prefix)
|
||||
tty.msg("Uninstall or try adding a version suffix for this dev build.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -68,14 +68,8 @@ def compare_specs(a, b, to_string=False, color=None):
|
||||
# Prepare a solver setup to parse differences
|
||||
setup = asp.SpackSolverSetup()
|
||||
|
||||
# get facts for specs, making sure to include build dependencies of concrete
|
||||
# specs and to descend into dependency hashes so we include all facts.
|
||||
a_facts = set(t for t in setup.spec_clauses(
|
||||
a, body=True, expand_hashes=True, concrete_build_deps=True,
|
||||
))
|
||||
b_facts = set(t for t in setup.spec_clauses(
|
||||
b, body=True, expand_hashes=True, concrete_build_deps=True,
|
||||
))
|
||||
a_facts = set(t for t in setup.spec_clauses(a, body=True, expand_hashes=True))
|
||||
b_facts = set(t for t in setup.spec_clauses(b, body=True, expand_hashes=True))
|
||||
|
||||
# We want to present them to the user as simple key: values
|
||||
intersect = sorted(a_facts.intersection(b_facts))
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import six
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.tty.colify import colify
|
||||
@@ -43,8 +41,7 @@
|
||||
'loads',
|
||||
'view',
|
||||
'update',
|
||||
'revert',
|
||||
'depfile'
|
||||
'revert'
|
||||
]
|
||||
|
||||
|
||||
@@ -526,154 +523,6 @@ def env_revert(args):
|
||||
tty.msg(msg.format(manifest_file))
|
||||
|
||||
|
||||
def env_depfile_setup_parser(subparser):
|
||||
"""generate a depfile from the concrete environment specs"""
|
||||
subparser.add_argument(
|
||||
'--make-target-prefix', default=None, metavar='TARGET',
|
||||
help='prefix Makefile targets with <TARGET>/<name>. By default the absolute '
|
||||
'path to the directory makedeps under the environment metadata dir is '
|
||||
'used. Can be set to an empty string --make-target-prefix \'\'.')
|
||||
subparser.add_argument(
|
||||
'--make-disable-jobserver', default=True, action='store_false',
|
||||
dest='jobserver', help='disable POSIX jobserver support.')
|
||||
subparser.add_argument(
|
||||
'-o', '--output', default=None, metavar='FILE',
|
||||
help='write the depfile to FILE rather than to stdout')
|
||||
subparser.add_argument(
|
||||
'-G', '--generator', default='make', choices=('make',),
|
||||
help='specify the depfile type. Currently only make is supported.')
|
||||
|
||||
|
||||
def env_depfile(args):
|
||||
# Currently only make is supported.
|
||||
spack.cmd.require_active_env(cmd_name='env depfile')
|
||||
env = ev.active_environment()
|
||||
|
||||
# Maps each hash in the environment to a string of install prereqs
|
||||
hash_to_prereqs = {}
|
||||
hash_to_spec = {}
|
||||
|
||||
if args.make_target_prefix is None:
|
||||
target_prefix = os.path.join(env.env_subdir_path, 'makedeps')
|
||||
else:
|
||||
target_prefix = args.make_target_prefix
|
||||
|
||||
def get_target(name):
|
||||
# The `all`, `fetch` and `clean` targets are phony. It doesn't make sense to
|
||||
# have /abs/path/to/env/metadir/{all,clean} targets. But it *does* make
|
||||
# sense to have a prefix like `env/all`, `env/fetch`, `env/clean` when they are
|
||||
# supposed to be included
|
||||
if name in ('all', 'fetch-all', 'clean') and os.path.isabs(target_prefix):
|
||||
return name
|
||||
else:
|
||||
return os.path.join(target_prefix, name)
|
||||
|
||||
def get_install_target(name):
|
||||
return os.path.join(target_prefix, '.install', name)
|
||||
|
||||
def get_fetch_target(name):
|
||||
return os.path.join(target_prefix, '.fetch', name)
|
||||
|
||||
for _, spec in env.concretized_specs():
|
||||
for s in spec.traverse(root=True):
|
||||
hash_to_spec[s.dag_hash()] = s
|
||||
hash_to_prereqs[s.dag_hash()] = [
|
||||
get_install_target(dep.dag_hash()) for dep in s.dependencies()]
|
||||
|
||||
root_dags = [s.dag_hash() for _, s in env.concretized_specs()]
|
||||
|
||||
# Root specs without deps are the prereqs for the environment target
|
||||
root_install_targets = [get_install_target(h) for h in root_dags]
|
||||
|
||||
# All package install targets, not just roots.
|
||||
all_install_targets = [get_install_target(h) for h in hash_to_spec.keys()]
|
||||
|
||||
# Fetch targets for all packages in the environment, not just roots.
|
||||
all_fetch_targets = [get_fetch_target(h) for h in hash_to_spec.keys()]
|
||||
|
||||
buf = six.StringIO()
|
||||
|
||||
buf.write("""SPACK ?= spack
|
||||
|
||||
.PHONY: {} {} {}
|
||||
|
||||
{}: {}
|
||||
|
||||
{}: {}
|
||||
|
||||
{}: {}
|
||||
\t@touch $@
|
||||
|
||||
{}: {}
|
||||
\t@touch $@
|
||||
|
||||
{}:
|
||||
\t@mkdir -p {} {}
|
||||
|
||||
{}: | {}
|
||||
\t$(info Fetching $(SPEC))
|
||||
\t$(SPACK) -e '{}' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@
|
||||
|
||||
{}: {}
|
||||
\t$(info Installing $(SPEC))
|
||||
\t{}$(SPACK) -e '{}' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package \
|
||||
--no-add /$(notdir $@) && touch $@
|
||||
|
||||
""".format(get_target('all'), get_target('fetch-all'), get_target('clean'),
|
||||
get_target('all'), get_target('env'),
|
||||
get_target('fetch-all'), get_target('fetch'),
|
||||
get_target('env'), ' '.join(root_install_targets),
|
||||
get_target('fetch'), ' '.join(all_fetch_targets),
|
||||
get_target('dirs'), get_target('.fetch'), get_target('.install'),
|
||||
get_target('.fetch/%'), get_target('dirs'),
|
||||
env.path,
|
||||
get_target('.install/%'), get_target('.fetch/%'),
|
||||
'+' if args.jobserver else '', env.path))
|
||||
|
||||
# Targets are of the form <prefix>/<name>: [<prefix>/<depname>]...,
|
||||
# The prefix can be an empty string, in that case we don't add the `/`.
|
||||
# The name is currently the dag hash of the spec. In principle it
|
||||
# could be the package name in case of `concretization: together` so
|
||||
# it can be more easily referred to, but for now we don't special case
|
||||
# this.
|
||||
fmt = '{name}{@version}{%compiler}{variants}{arch=architecture}'
|
||||
|
||||
# Set SPEC for each hash
|
||||
buf.write('# Set the human-readable spec for each target\n')
|
||||
for dag_hash in hash_to_prereqs.keys():
|
||||
formatted_spec = hash_to_spec[dag_hash].format(fmt)
|
||||
buf.write("{}: SPEC = {}\n".format(get_target('%/' + dag_hash), formatted_spec))
|
||||
buf.write('\n')
|
||||
|
||||
# Set install dependencies
|
||||
buf.write('# Install dependencies\n')
|
||||
for parent, children in hash_to_prereqs.items():
|
||||
if not children:
|
||||
continue
|
||||
buf.write('{}: {}\n'.format(get_install_target(parent), ' '.join(children)))
|
||||
buf.write('\n')
|
||||
|
||||
# Clean target: remove target files but not their folders, cause
|
||||
# --make-target-prefix can be any existing directory we do not control,
|
||||
# including empty string (which means deleting the containing folder
|
||||
# would delete the folder with the Makefile)
|
||||
buf.write("{}:\n\trm -f -- {} {} {} {}\n".format(
|
||||
get_target('clean'),
|
||||
get_target('env'),
|
||||
get_target('fetch'),
|
||||
' '.join(all_fetch_targets),
|
||||
' '.join(all_install_targets)))
|
||||
|
||||
makefile = buf.getvalue()
|
||||
|
||||
# Finally write to stdout/file.
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(makefile)
|
||||
else:
|
||||
sys.stdout.write(makefile)
|
||||
|
||||
|
||||
#: Dictionary mapping subcommand names and aliases to functions
|
||||
subcommand_functions = {}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
@@ -14,7 +13,6 @@
|
||||
import spack
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cray_manifest as cray_manifest
|
||||
import spack.detection
|
||||
import spack.error
|
||||
import spack.util.environment
|
||||
@@ -37,9 +35,6 @@ def setup_parser(subparser):
|
||||
find_parser.add_argument(
|
||||
'--not-buildable', action='store_true', default=False,
|
||||
help="packages with detected externals won't be built with Spack")
|
||||
find_parser.add_argument(
|
||||
'-p', '--path', default=None, action='append',
|
||||
help="Alternative search paths for finding externals. May be repeated")
|
||||
find_parser.add_argument(
|
||||
'--scope', choices=scopes, metavar=scopes_metavar,
|
||||
default=spack.config.default_modify_scope('packages'),
|
||||
@@ -60,40 +55,8 @@ def setup_parser(subparser):
|
||||
'list', help='list detectable packages, by repository and name'
|
||||
)
|
||||
|
||||
read_cray_manifest = sp.add_parser(
|
||||
'read-cray-manifest', help=(
|
||||
"consume a Spack-compatible description of externally-installed "
|
||||
"packages, including dependency relationships"
|
||||
)
|
||||
)
|
||||
read_cray_manifest.add_argument(
|
||||
'--file', default=None,
|
||||
help="specify a location other than the default")
|
||||
read_cray_manifest.add_argument(
|
||||
'--directory', default=None,
|
||||
help="specify a directory storing a group of manifest files")
|
||||
read_cray_manifest.add_argument(
|
||||
'--dry-run', action='store_true', default=False,
|
||||
help="don't modify DB with files that are read")
|
||||
read_cray_manifest.add_argument(
|
||||
'--fail-on-error', action='store_true',
|
||||
help=("if a manifest file cannot be parsed, fail and report the "
|
||||
"full stack trace")
|
||||
)
|
||||
|
||||
|
||||
def external_find(args):
|
||||
if args.all or not (args.tags or args.packages):
|
||||
# If the user calls 'spack external find' with no arguments, and
|
||||
# this system has a description of installed packages, then we should
|
||||
# consume it automatically.
|
||||
try:
|
||||
_collect_and_consume_cray_manifest_files()
|
||||
except NoManifestFileError:
|
||||
# It's fine to not find any manifest file if we are doing the
|
||||
# search implicitly (i.e. as part of 'spack external find')
|
||||
pass
|
||||
|
||||
# If the user didn't specify anything, search for build tools by default
|
||||
if not args.tags and not args.all and not args.packages:
|
||||
args.tags = ['core-packages', 'build-tools']
|
||||
@@ -127,10 +90,8 @@ def external_find(args):
|
||||
if not args.tags and not packages_to_check:
|
||||
packages_to_check = spack.repo.path.all_packages()
|
||||
|
||||
detected_packages = spack.detection.by_executable(
|
||||
packages_to_check, path_hints=args.path)
|
||||
detected_packages.update(spack.detection.by_library(
|
||||
packages_to_check, path_hints=args.path))
|
||||
detected_packages = spack.detection.by_executable(packages_to_check)
|
||||
detected_packages.update(spack.detection.by_library(packages_to_check))
|
||||
|
||||
new_entries = spack.detection.update_configuration(
|
||||
detected_packages, scope=args.scope, buildable=not args.not_buildable
|
||||
@@ -145,56 +106,6 @@ def external_find(args):
|
||||
tty.msg('No new external packages detected')
|
||||
|
||||
|
||||
def external_read_cray_manifest(args):
|
||||
_collect_and_consume_cray_manifest_files(
|
||||
manifest_file=args.file,
|
||||
manifest_directory=args.directory,
|
||||
dry_run=args.dry_run,
|
||||
fail_on_error=args.fail_on_error
|
||||
)
|
||||
|
||||
|
||||
def _collect_and_consume_cray_manifest_files(
|
||||
manifest_file=None, manifest_directory=None, dry_run=False,
|
||||
fail_on_error=False):
|
||||
|
||||
manifest_files = []
|
||||
if manifest_file:
|
||||
manifest_files.append(manifest_file)
|
||||
|
||||
manifest_dirs = []
|
||||
if manifest_directory:
|
||||
manifest_dirs.append(manifest_directory)
|
||||
|
||||
if os.path.isdir(cray_manifest.default_path):
|
||||
tty.debug(
|
||||
"Cray manifest path {0} exists: collecting all files to read."
|
||||
.format(cray_manifest.default_path))
|
||||
manifest_dirs.append(cray_manifest.default_path)
|
||||
else:
|
||||
tty.debug("Default Cray manifest directory {0} does not exist."
|
||||
.format(cray_manifest.default_path))
|
||||
|
||||
for directory in manifest_dirs:
|
||||
for fname in os.listdir(directory):
|
||||
manifest_files.append(os.path.join(directory, fname))
|
||||
|
||||
if not manifest_files:
|
||||
raise NoManifestFileError(
|
||||
"--file/--directory not specified, and no manifest found at {0}"
|
||||
.format(cray_manifest.default_path))
|
||||
|
||||
for path in manifest_files:
|
||||
try:
|
||||
cray_manifest.read(path, not dry_run)
|
||||
except (spack.compilers.UnknownCompilerError, spack.error.SpackError) as e:
|
||||
if fail_on_error:
|
||||
raise
|
||||
else:
|
||||
tty.warn("Failure reading manifest file: {0}"
|
||||
"\n\t{1}".format(path, str(e)))
|
||||
|
||||
|
||||
def external_list(args):
|
||||
# Trigger a read of all packages, might take a long time.
|
||||
list(spack.repo.path.all_packages())
|
||||
@@ -206,10 +117,5 @@ def external_list(args):
|
||||
|
||||
|
||||
def external(parser, args):
|
||||
action = {'find': external_find, 'list': external_list,
|
||||
'read-cray-manifest': external_read_cray_manifest}
|
||||
action = {'find': external_find, 'list': external_list}
|
||||
action[args.external_command](args)
|
||||
|
||||
|
||||
class NoManifestFileError(spack.error.SpackError):
|
||||
pass
|
||||
|
||||
@@ -69,10 +69,14 @@ def fetch(parser, args):
|
||||
|
||||
for spec in specs:
|
||||
if args.missing or args.dependencies:
|
||||
for s in spec.traverse(root=False):
|
||||
for s in spec.traverse():
|
||||
package = spack.repo.get(s)
|
||||
|
||||
# Skip already-installed packages with --missing
|
||||
if args.missing and s.installed:
|
||||
if args.missing and package.installed:
|
||||
continue
|
||||
|
||||
s.package.do_fetch()
|
||||
spec.package.do_fetch()
|
||||
package.do_fetch()
|
||||
|
||||
package = spack.repo.get(spec)
|
||||
package.do_fetch()
|
||||
|
||||
@@ -184,9 +184,8 @@ def print_detectable(pkg):
|
||||
color.cprint('')
|
||||
color.cprint(section_title('Externally Detectable: '))
|
||||
|
||||
# If the package has an 'executables' of 'libraries' field, it
|
||||
# can detect an installation
|
||||
if hasattr(pkg, 'executables') or hasattr(pkg, 'libraries'):
|
||||
# If the package has an 'executables' field, it can detect an installation
|
||||
if hasattr(pkg, 'executables'):
|
||||
find_attributes = []
|
||||
if hasattr(pkg, 'determine_version'):
|
||||
find_attributes.append('version')
|
||||
|
||||
@@ -47,6 +47,7 @@ def update_kwargs_from_args(args, kwargs):
|
||||
'explicit': True, # Always true for install command
|
||||
'stop_at': args.until,
|
||||
'unsigned': args.unsigned,
|
||||
'full_hash_match': args.full_hash_match,
|
||||
})
|
||||
|
||||
kwargs.update({
|
||||
@@ -116,6 +117,11 @@ def setup_parser(subparser):
|
||||
'--no-check-signature', action='store_true',
|
||||
dest='unsigned', default=False,
|
||||
help="do not check signatures of binary packages")
|
||||
subparser.add_argument(
|
||||
'--require-full-hash-match', action='store_true',
|
||||
dest='full_hash_match', default=False, help="""when installing from
|
||||
binary mirrors, do not install binary package unless the full hash of the
|
||||
remote spec matches that of the local spec""")
|
||||
subparser.add_argument(
|
||||
'--show-log-on-error', action='store_true',
|
||||
help="print full build log to stderr if build fails")
|
||||
@@ -153,6 +159,10 @@ def setup_parser(subparser):
|
||||
if 'all' is chosen, run package tests during installation for all
|
||||
packages. If neither are chosen, don't run tests for any packages."""
|
||||
)
|
||||
testing.add_argument(
|
||||
'--run-tests', action='store_true',
|
||||
help='run package tests during installation (same as --test=all)'
|
||||
)
|
||||
subparser.add_argument(
|
||||
'--log-format',
|
||||
default=None,
|
||||
@@ -306,8 +316,11 @@ def install(parser, args, **kwargs):
|
||||
if args.log_file:
|
||||
reporter.filename = args.log_file
|
||||
|
||||
if args.run_tests:
|
||||
tty.warn("Deprecated option: --run-tests: use --test=all instead")
|
||||
|
||||
def get_tests(specs):
|
||||
if args.test == 'all':
|
||||
if args.test == 'all' or args.run_tests:
|
||||
return True
|
||||
elif args.test == 'root':
|
||||
return [spec.name for spec in specs]
|
||||
@@ -464,7 +477,7 @@ def get_tests(specs):
|
||||
})
|
||||
|
||||
# If we are using the monitor, we send configs. and create build
|
||||
# The dag_hash is the main package id
|
||||
# The full_hash is the main package id, the build_hash for others
|
||||
if args.use_monitor and specs:
|
||||
monitor.new_configuration(specs)
|
||||
install_specs(args, kwargs, zip(abstract_specs, specs))
|
||||
|
||||
@@ -273,7 +273,7 @@ def refresh(module_type, specs, args):
|
||||
return
|
||||
|
||||
if not args.upstream_modules:
|
||||
specs = list(s for s in specs if not s.installed_upstream)
|
||||
specs = list(s for s in specs if not s.package.installed_upstream)
|
||||
|
||||
if not args.yes_to_all:
|
||||
msg = 'You are about to regenerate {types} module files for:\n'
|
||||
|
||||
@@ -136,13 +136,13 @@ def solve(parser, args):
|
||||
)
|
||||
|
||||
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
|
||||
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
|
||||
for i, (idx, build_idx, name) in enumerate(result.criteria, 1):
|
||||
color.cprint(
|
||||
fmt % (
|
||||
i,
|
||||
name,
|
||||
"-" if build_cost is None else installed_cost,
|
||||
installed_cost if build_cost is None else build_cost,
|
||||
"-" if build_idx is None else opt[idx],
|
||||
opt[idx] if build_idx is None else opt[build_idx],
|
||||
)
|
||||
)
|
||||
print()
|
||||
@@ -151,9 +151,9 @@ def solve(parser, args):
|
||||
# With -y, just print YAML to output.
|
||||
if args.format == 'yaml':
|
||||
# use write because to_yaml already has a newline.
|
||||
sys.stdout.write(spec.to_yaml(hash=ht.dag_hash))
|
||||
sys.stdout.write(spec.to_yaml(hash=ht.build_hash))
|
||||
elif args.format == 'json':
|
||||
sys.stdout.write(spec.to_json(hash=ht.dag_hash))
|
||||
sys.stdout.write(spec.to_json(hash=ht.build_hash))
|
||||
else:
|
||||
sys.stdout.write(
|
||||
spec.tree(color=sys.stdout.isatty(), **kwargs))
|
||||
|
||||
@@ -34,16 +34,12 @@ def setup_parser(subparser):
|
||||
arguments.add_common_arguments(
|
||||
subparser, ['long', 'very_long', 'install_status']
|
||||
)
|
||||
format_group = subparser.add_mutually_exclusive_group()
|
||||
format_group.add_argument(
|
||||
subparser.add_argument(
|
||||
'-y', '--yaml', action='store_const', dest='format', default=None,
|
||||
const='yaml', help='print concrete spec as YAML')
|
||||
format_group.add_argument(
|
||||
subparser.add_argument(
|
||||
'-j', '--json', action='store_const', dest='format', default=None,
|
||||
const='json', help='print concrete spec as JSON')
|
||||
format_group.add_argument(
|
||||
'--format', action='store', default=None,
|
||||
help='print concrete spec with the specified format string')
|
||||
subparser.add_argument(
|
||||
'-c', '--cover', action='store',
|
||||
default='nodes', choices=['nodes', 'edges', 'paths'],
|
||||
@@ -51,6 +47,10 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-N', '--namespaces', action='store_true', default=False,
|
||||
help='show fully qualified package names')
|
||||
subparser.add_argument(
|
||||
'--hash-type', default="build_hash",
|
||||
choices=['build_hash', 'full_hash', 'dag_hash'],
|
||||
help='generate spec with a particular hash type.')
|
||||
subparser.add_argument(
|
||||
'-t', '--types', action='store_true', default=False,
|
||||
help='show dependency types')
|
||||
@@ -92,13 +92,14 @@ def spec(parser, args):
|
||||
for (input, output) in specs:
|
||||
# With -y, just print YAML to output.
|
||||
if args.format:
|
||||
# The user can specify the hash type to use
|
||||
hash_type = getattr(ht, args.hash_type)
|
||||
|
||||
if args.format == 'yaml':
|
||||
# use write because to_yaml already has a newline.
|
||||
sys.stdout.write(output.to_yaml(hash=ht.dag_hash))
|
||||
elif args.format == 'json':
|
||||
print(output.to_json(hash=ht.dag_hash))
|
||||
sys.stdout.write(output.to_yaml(hash=hash_type))
|
||||
else:
|
||||
print(output.format(args.format))
|
||||
print(output.to_json(hash=hash_type))
|
||||
continue
|
||||
|
||||
with tree_context():
|
||||
|
||||
@@ -27,6 +27,12 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def stage(parser, args):
|
||||
# We temporarily modify the working directory when setting up a stage, so we need to
|
||||
# convert this to an absolute path here in order for it to remain valid later.
|
||||
custom_path = os.path.abspath(args.path) if args.path else None
|
||||
if custom_path:
|
||||
spack.stage.create_stage_root(custom_path)
|
||||
|
||||
if not args.specs:
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
@@ -48,10 +54,6 @@ def stage(parser, args):
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
|
||||
# We temporarily modify the working directory when setting up a stage, so we need to
|
||||
# convert this to an absolute path here in order for it to remain valid later.
|
||||
custom_path = os.path.abspath(args.path) if args.path else None
|
||||
|
||||
# prevent multiple specs from extracting in the same folder
|
||||
if len(specs) > 1 and custom_path:
|
||||
tty.die("`--path` requires a single spec, but multiple were provided")
|
||||
|
||||
@@ -337,8 +337,6 @@ def _report_suite_results(test_suite, args, constraints):
|
||||
pkg_id, status = line.split()
|
||||
results[pkg_id] = status
|
||||
|
||||
tty.msg('test specs:')
|
||||
|
||||
failed, skipped, untested = 0, 0, 0
|
||||
for pkg_id in test_specs:
|
||||
if pkg_id in results:
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
|
||||
# tutorial configuration parameters
|
||||
tutorial_branch = "releases/v0.17"
|
||||
tutorial_branch = "releases/v%d.%d" % spack.spack_version_info[:2]
|
||||
tutorial_mirror = "file:///mirror"
|
||||
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")
|
||||
|
||||
|
||||
@@ -62,14 +62,9 @@ def setup_parser(subparser):
|
||||
'-a', '--all', action='store_true', dest='all',
|
||||
help="remove ALL installed packages that match each supplied spec"
|
||||
)
|
||||
subparser.add_argument(
|
||||
'--origin', dest='origin',
|
||||
help="only remove DB records with the specified origin"
|
||||
)
|
||||
|
||||
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
|
||||
origin=None):
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
|
||||
"""Returns a list of specs matching the not necessarily
|
||||
concretized specs given from cli
|
||||
|
||||
@@ -90,8 +85,8 @@ def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
|
||||
has_errors = False
|
||||
for spec in specs:
|
||||
install_query = [InstallStatuses.INSTALLED, InstallStatuses.DEPRECATED]
|
||||
matching = spack.store.db.query_local(
|
||||
spec, hashes=hashes, installed=install_query, origin=origin)
|
||||
matching = spack.store.db.query_local(spec, hashes=hashes,
|
||||
installed=install_query)
|
||||
# For each spec provided, make sure it refers to only one package.
|
||||
# Fail and ask user to be unambiguous if it doesn't
|
||||
if not allow_multiple_matches and len(matching) > 1:
|
||||
@@ -225,25 +220,15 @@ def do_uninstall(env, specs, force):
|
||||
|
||||
# A package is ready to be uninstalled when nothing else references it,
|
||||
# unless we are requested to force uninstall it.
|
||||
def is_ready(dag_hash):
|
||||
if force:
|
||||
return True
|
||||
|
||||
_, record = spack.store.db.query_by_spec_hash(dag_hash)
|
||||
if not record.ref_count:
|
||||
return True
|
||||
|
||||
# If this spec is only used as a build dependency, we can uninstall
|
||||
return all(
|
||||
dspec.deptypes == ("build",) or not dspec.parent.installed
|
||||
for dspec in record.spec.edges_from_dependents()
|
||||
)
|
||||
is_ready = lambda x: not spack.store.db.query_by_spec_hash(x)[1].ref_count
|
||||
if force:
|
||||
is_ready = lambda x: True
|
||||
|
||||
while packages:
|
||||
ready = [x for x in packages if is_ready(x.spec.dag_hash())]
|
||||
if not ready:
|
||||
msg = 'unexpected error [cannot proceed uninstalling specs with' \
|
||||
' remaining link or run dependents {0}]'
|
||||
' remaining dependents {0}]'
|
||||
msg = msg.format(', '.join(x.name for x in packages))
|
||||
raise spack.error.SpackError(msg)
|
||||
|
||||
@@ -255,8 +240,7 @@ def is_ready(dag_hash):
|
||||
def get_uninstall_list(args, specs, env):
|
||||
# Gets the list of installed specs that match the ones give via cli
|
||||
# args.all takes care of the case where '-a' is given in the cli
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force,
|
||||
args.origin)
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force)
|
||||
|
||||
# Takes care of '-R'
|
||||
active_dpts, inactive_dpts = installed_dependents(uninstall_list, env)
|
||||
|
||||
@@ -495,8 +495,7 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
|
||||
@llnl.util.lang.memoized
|
||||
def class_for_compiler_name(compiler_name):
|
||||
"""Given a compiler module name, get the corresponding Compiler class."""
|
||||
if not supported(compiler_name):
|
||||
raise UnknownCompilerError(compiler_name)
|
||||
assert supported(compiler_name)
|
||||
|
||||
# Hack to be able to call the compiler `apple-clang` while still
|
||||
# using a valid python name for the module
|
||||
@@ -766,8 +765,7 @@ def name_matches(name, name_list):
|
||||
toolchains.add(compiler_cls.__name__)
|
||||
|
||||
if len(toolchains) > 1:
|
||||
if toolchains == set(['Clang', 'AppleClang', 'Aocc']) or \
|
||||
toolchains == set(['Dpcpp', 'Oneapi']):
|
||||
if toolchains == set(['Clang', 'AppleClang', 'Aocc']):
|
||||
return False
|
||||
tty.debug("[TOOLCHAINS] {0}".format(toolchains))
|
||||
return True
|
||||
@@ -790,13 +788,6 @@ def __init__(self):
|
||||
"Spack could not find any compilers!")
|
||||
|
||||
|
||||
class UnknownCompilerError(spack.error.SpackError):
|
||||
def __init__(self, compiler_name):
|
||||
super(UnknownCompilerError, self).__init__(
|
||||
"Spack doesn't support the requested compiler: {0}"
|
||||
.format(compiler_name))
|
||||
|
||||
|
||||
class NoCompilerForSpecError(spack.error.SpackError):
|
||||
def __init__(self, compiler_spec, target):
|
||||
super(NoCompilerForSpecError, self).__init__(
|
||||
|
||||
@@ -78,8 +78,10 @@ def cxx14_flag(self):
|
||||
self, "the C++14 standard", "cxx14_flag", "< 4.8")
|
||||
elif self.real_version < ver('4.9'):
|
||||
return "-std=c++1y"
|
||||
else:
|
||||
elif self.real_version < ver('6.0'):
|
||||
return "-std=c++14"
|
||||
else:
|
||||
return ""
|
||||
|
||||
@property
|
||||
def cxx17_flag(self):
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
#: Path to the default configuration
|
||||
configuration_defaults_path = (
|
||||
'defaults', os.path.join(spack.paths.etc_path, 'defaults')
|
||||
'defaults', os.path.join(spack.paths.etc_path, 'spack', 'defaults')
|
||||
)
|
||||
|
||||
#: Hard-coded default values for some key configuration options.
|
||||
@@ -104,10 +104,6 @@
|
||||
'build_jobs': min(16, cpus_available()),
|
||||
'build_stage': '$tempdir/spack-stage',
|
||||
'concretizer': 'clingo',
|
||||
'license_dir': spack.paths.default_license_dir,
|
||||
'flags': {
|
||||
'keep_werror': 'none',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -819,7 +815,7 @@ def _config():
|
||||
# Site configuration is per spack instance, for sites or projects
|
||||
# No site-level configs should be checked into spack by default.
|
||||
configuration_paths.append(
|
||||
('site', os.path.join(spack.paths.etc_path)),
|
||||
('site', os.path.join(spack.paths.etc_path, 'spack')),
|
||||
)
|
||||
|
||||
# User configuration can override both spack defaults and site config
|
||||
|
||||
@@ -1,193 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import json
|
||||
|
||||
import jsonschema
|
||||
import six
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.cmd
|
||||
import spack.hash_types as hash_types
|
||||
from spack.schema.cray_manifest import schema as manifest_schema
|
||||
|
||||
#: Cray systems can store a Spack-compatible description of system
|
||||
#: packages here.
|
||||
default_path = '/opt/cray/pe/cpe-descriptive-manifest/'
|
||||
|
||||
compiler_name_translation = {
|
||||
'nvidia': 'nvhpc',
|
||||
}
|
||||
|
||||
|
||||
def translated_compiler_name(manifest_compiler_name):
|
||||
"""
|
||||
When creating a Compiler object, Spack expects a name matching
|
||||
one of the classes in `spack.compilers`. Names in the Cray manifest
|
||||
may differ; for cases where we know the name refers to a compiler in
|
||||
Spack, this function translates it automatically.
|
||||
|
||||
This function will raise an error if there is no recorded translation
|
||||
and the name doesn't match a known compiler name.
|
||||
"""
|
||||
if manifest_compiler_name in compiler_name_translation:
|
||||
return compiler_name_translation[manifest_compiler_name]
|
||||
elif manifest_compiler_name in spack.compilers.supported_compilers():
|
||||
return manifest_compiler_name
|
||||
else:
|
||||
# Try to fail quickly. This can occur in two cases: (1) the compiler
|
||||
# definition (2) a spec can specify a compiler that doesn't exist; the
|
||||
# first will be caught when creating compiler definition. The second
|
||||
# will result in Specs with associated undefined compilers.
|
||||
raise spack.compilers.UnknownCompilerError(
|
||||
"Manifest parsing - unknown compiler: {0}"
|
||||
.format(manifest_compiler_name))
|
||||
|
||||
|
||||
def compiler_from_entry(entry):
|
||||
compiler_name = translated_compiler_name(entry['name'])
|
||||
paths = entry['executables']
|
||||
version = entry['version']
|
||||
arch = entry['arch']
|
||||
operating_system = arch['os']
|
||||
target = arch['target']
|
||||
|
||||
compiler_cls = spack.compilers.class_for_compiler_name(compiler_name)
|
||||
spec = spack.spec.CompilerSpec(compiler_cls.name, version)
|
||||
paths = [paths.get(x, None) for x in ('cc', 'cxx', 'f77', 'fc')]
|
||||
return compiler_cls(
|
||||
spec, operating_system, target, paths
|
||||
)
|
||||
|
||||
|
||||
def spec_from_entry(entry):
|
||||
arch_str = ""
|
||||
if 'arch' in entry:
|
||||
arch_format = "arch={platform}-{os}-{target}"
|
||||
arch_str = arch_format.format(
|
||||
platform=entry['arch']['platform'],
|
||||
os=entry['arch']['platform_os'],
|
||||
target=entry['arch']['target']['name']
|
||||
)
|
||||
|
||||
compiler_str = ""
|
||||
if 'compiler' in entry:
|
||||
compiler_format = "%{name}@{version}"
|
||||
compiler_str = compiler_format.format(
|
||||
name=translated_compiler_name(entry['compiler']['name']),
|
||||
version=entry['compiler']['version']
|
||||
)
|
||||
|
||||
spec_format = "{name}@{version} {compiler} {arch}"
|
||||
spec_str = spec_format.format(
|
||||
name=entry['name'],
|
||||
version=entry['version'],
|
||||
compiler=compiler_str,
|
||||
arch=arch_str
|
||||
)
|
||||
|
||||
package = spack.repo.get(entry['name'])
|
||||
|
||||
if 'parameters' in entry:
|
||||
variant_strs = list()
|
||||
for name, value in entry['parameters'].items():
|
||||
# TODO: also ensure that the variant value is valid?
|
||||
if not (name in package.variants):
|
||||
tty.debug("Omitting variant {0} for entry {1}/{2}"
|
||||
.format(name, entry['name'], entry['hash'][:7]))
|
||||
continue
|
||||
|
||||
# Value could be a list (of strings), boolean, or string
|
||||
if isinstance(value, six.string_types):
|
||||
variant_strs.append('{0}={1}'.format(name, value))
|
||||
else:
|
||||
try:
|
||||
iter(value)
|
||||
variant_strs.append(
|
||||
'{0}={1}'.format(name, ','.join(value)))
|
||||
continue
|
||||
except TypeError:
|
||||
# Not an iterable
|
||||
pass
|
||||
# At this point not a string or collection, check for boolean
|
||||
if value in [True, False]:
|
||||
bool_symbol = '+' if value else '~'
|
||||
variant_strs.append('{0}{1}'.format(bool_symbol, name))
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unexpected value for {0} ({1}): {2}".format(
|
||||
name, str(type(value)), str(value)
|
||||
)
|
||||
)
|
||||
spec_str += ' ' + ' '.join(variant_strs)
|
||||
|
||||
spec, = spack.cmd.parse_specs(spec_str.split())
|
||||
|
||||
for ht in [hash_types.dag_hash, hash_types.build_hash,
|
||||
hash_types.full_hash]:
|
||||
setattr(spec, ht.attr, entry['hash'])
|
||||
|
||||
spec._concrete = True
|
||||
spec._hashes_final = True
|
||||
spec.external_path = entry['prefix']
|
||||
spec.origin = 'external-db'
|
||||
spack.spec.Spec.ensure_valid_variants(spec)
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
def entries_to_specs(entries):
|
||||
spec_dict = {}
|
||||
for entry in entries:
|
||||
try:
|
||||
spec = spec_from_entry(entry)
|
||||
spec_dict[spec._hash] = spec
|
||||
except spack.repo.UnknownPackageError:
|
||||
tty.debug("Omitting package {0}: no corresponding repo package"
|
||||
.format(entry['name']))
|
||||
except spack.error.SpackError:
|
||||
raise
|
||||
except Exception:
|
||||
tty.warn("Could not parse entry: " + str(entry))
|
||||
|
||||
for entry in filter(lambda x: 'dependencies' in x, entries):
|
||||
dependencies = entry['dependencies']
|
||||
for name, properties in dependencies.items():
|
||||
dep_hash = properties['hash']
|
||||
deptypes = properties['type']
|
||||
if dep_hash in spec_dict:
|
||||
if entry['hash'] not in spec_dict:
|
||||
continue
|
||||
parent_spec = spec_dict[entry['hash']]
|
||||
dep_spec = spec_dict[dep_hash]
|
||||
parent_spec._add_dependency(dep_spec, deptypes)
|
||||
|
||||
return spec_dict
|
||||
|
||||
|
||||
def read(path, apply_updates):
|
||||
with open(path, 'r') as json_file:
|
||||
json_data = json.load(json_file)
|
||||
|
||||
jsonschema.validate(json_data, manifest_schema)
|
||||
|
||||
specs = entries_to_specs(json_data['specs'])
|
||||
tty.debug("{0}: {1} specs read from manifest".format(
|
||||
path,
|
||||
str(len(specs))))
|
||||
compilers = list()
|
||||
if 'compilers' in json_data:
|
||||
compilers.extend(compiler_from_entry(x)
|
||||
for x in json_data['compilers'])
|
||||
tty.debug("{0}: {1} compilers read from manifest".format(
|
||||
path,
|
||||
str(len(compilers))))
|
||||
if apply_updates and compilers:
|
||||
spack.compilers.add_compilers_to_config(
|
||||
compilers, init_config=False)
|
||||
if apply_updates:
|
||||
for spec in specs.values():
|
||||
spack.store.db.add(spec, directory_layout=None)
|
||||
@@ -91,8 +91,7 @@
|
||||
_pkg_lock_timeout = None
|
||||
|
||||
# Types of dependencies tracked by the database
|
||||
# We store by DAG hash, so we track the dependencies that the DAG hash includes.
|
||||
_tracked_deps = ht.dag_hash.deptype
|
||||
_tracked_deps = ('link', 'run')
|
||||
|
||||
# Default list of fields written for each install record
|
||||
default_install_record_fields = [
|
||||
@@ -188,7 +187,6 @@ def __init__(
|
||||
installation_time=None,
|
||||
deprecated_for=None,
|
||||
in_buildcache=False,
|
||||
origin=None
|
||||
):
|
||||
self.spec = spec
|
||||
self.path = str(path) if path else None
|
||||
@@ -198,7 +196,6 @@ def __init__(
|
||||
self.installation_time = installation_time or _now()
|
||||
self.deprecated_for = deprecated_for
|
||||
self.in_buildcache = in_buildcache
|
||||
self.origin = origin
|
||||
|
||||
def install_type_matches(self, installed):
|
||||
installed = InstallStatuses.canonicalize(installed)
|
||||
@@ -220,9 +217,6 @@ def to_dict(self, include_fields=default_install_record_fields):
|
||||
else:
|
||||
rec_dict.update({field_name: getattr(self, field_name)})
|
||||
|
||||
if self.origin:
|
||||
rec_dict['origin'] = self.origin
|
||||
|
||||
return rec_dict
|
||||
|
||||
@classmethod
|
||||
@@ -356,10 +350,10 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
self.prefix_fail_path = os.path.join(self._db_dir, 'prefix_failures')
|
||||
|
||||
# Create needed directories and files
|
||||
if not is_upstream and not os.path.exists(self._db_dir):
|
||||
if not os.path.exists(self._db_dir):
|
||||
fs.mkdirp(self._db_dir)
|
||||
|
||||
if not is_upstream and not os.path.exists(self._failure_dir):
|
||||
if not os.path.exists(self._failure_dir) and not is_upstream:
|
||||
fs.mkdirp(self._failure_dir)
|
||||
|
||||
self.is_upstream = is_upstream
|
||||
@@ -434,7 +428,7 @@ def _failed_spec_path(self, spec):
|
||||
.format(spec.name))
|
||||
|
||||
return os.path.join(self._failure_dir,
|
||||
'{0}-{1}'.format(spec.name, spec.dag_hash()))
|
||||
'{0}-{1}'.format(spec.name, spec.full_hash()))
|
||||
|
||||
def clear_all_failures(self):
|
||||
"""Force remove install failure tracking files."""
|
||||
@@ -646,12 +640,8 @@ def _write_to_file(self, stream):
|
||||
# TODO: fix this before we support multiple install locations.
|
||||
database = {
|
||||
'database': {
|
||||
# TODO: move this to a top-level _meta section if we ever
|
||||
# TODO: bump the DB version to 7
|
||||
'version': str(_db_version),
|
||||
|
||||
# dictionary of installation records, keyed by DAG hash
|
||||
'installs': installs,
|
||||
'version': str(_db_version)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -691,13 +681,6 @@ def db_for_spec_hash(self, hash_key):
|
||||
return db
|
||||
|
||||
def query_by_spec_hash(self, hash_key, data=None):
|
||||
"""Get a spec for hash, and whether it's installed upstream.
|
||||
|
||||
Return:
|
||||
(tuple): (bool, optional InstallRecord): bool tells us whether
|
||||
the spec is installed upstream. Its InstallRecord is also
|
||||
returned if it's installed at all; otherwise None.
|
||||
"""
|
||||
if data and hash_key in data:
|
||||
return False, data[hash_key]
|
||||
if not data:
|
||||
@@ -1064,7 +1047,9 @@ def _read(self):
|
||||
self._state_is_inconsistent = False
|
||||
return
|
||||
elif self.is_upstream:
|
||||
tty.warn('upstream not found: {0}'.format(self._index_path))
|
||||
raise UpstreamDatabaseLockingError(
|
||||
"No database index file is present, and upstream"
|
||||
" databases cannot generate an index file")
|
||||
|
||||
def _add(
|
||||
self,
|
||||
@@ -1102,7 +1087,6 @@ def _add(
|
||||
"Specs added to DB must be concrete.")
|
||||
|
||||
key = spec.dag_hash()
|
||||
spec_pkg_hash = spec._package_hash
|
||||
upstream, record = self.query_by_spec_hash(key)
|
||||
if upstream:
|
||||
return
|
||||
@@ -1147,10 +1131,6 @@ def _add(
|
||||
'explicit': explicit,
|
||||
'installation_time': installation_time
|
||||
}
|
||||
# Commands other than 'spack install' may add specs to the DB,
|
||||
# we can record the source of an installed Spec with 'origin'
|
||||
if hasattr(spec, 'origin'):
|
||||
extra_args['origin'] = spec.origin
|
||||
self._data[key] = InstallRecord(
|
||||
new_spec, path, installed, ref_count=0, **extra_args
|
||||
)
|
||||
@@ -1164,10 +1144,10 @@ def _add(
|
||||
record.ref_count += 1
|
||||
|
||||
# Mark concrete once everything is built, and preserve
|
||||
# the original hashes of concrete specs.
|
||||
# the original hash of concrete specs.
|
||||
new_spec._mark_concrete()
|
||||
new_spec._hash = key
|
||||
new_spec._package_hash = spec_pkg_hash
|
||||
new_spec._full_hash = spec._full_hash
|
||||
|
||||
else:
|
||||
# It is already in the database
|
||||
@@ -1482,7 +1462,6 @@ def _query(
|
||||
end_date=None,
|
||||
hashes=None,
|
||||
in_buildcache=any,
|
||||
origin=None
|
||||
):
|
||||
"""Run a query on the database."""
|
||||
|
||||
@@ -1511,9 +1490,6 @@ def _query(
|
||||
if hashes is not None and rec.spec.dag_hash() not in hashes:
|
||||
continue
|
||||
|
||||
if origin and not (origin == rec.origin):
|
||||
continue
|
||||
|
||||
if not rec.install_type_matches(installed):
|
||||
continue
|
||||
|
||||
@@ -1607,12 +1583,11 @@ def unused_specs(self):
|
||||
needed, visited = set(), set()
|
||||
with self.read_transaction():
|
||||
for key, rec in self._data.items():
|
||||
if not rec.explicit:
|
||||
continue
|
||||
|
||||
# recycle `visited` across calls to avoid redundantly traversing
|
||||
for spec in rec.spec.traverse(visited=visited, deptype=("link", "run")):
|
||||
needed.add(spec.dag_hash())
|
||||
if rec.explicit:
|
||||
# recycle `visited` across calls to avoid
|
||||
# redundantly traversing
|
||||
for spec in rec.spec.traverse(visited=visited):
|
||||
needed.add(spec.dag_hash())
|
||||
|
||||
unused = [rec.spec for key, rec in self._data.items()
|
||||
if key not in needed and rec.installed]
|
||||
|
||||
@@ -74,8 +74,7 @@ def executables_in_path(path_hints=None):
|
||||
|
||||
|
||||
def libraries_in_ld_library_path(path_hints=None):
|
||||
"""Get the paths of all libraries available from LD_LIBRARY_PATH,
|
||||
LIBRARY_PATH, DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH.
|
||||
"""Get the paths of all libraries available from LD_LIBRARY_PATH.
|
||||
|
||||
For convenience, this is constructed as a dictionary where the keys are
|
||||
the library paths and the values are the names of the libraries
|
||||
@@ -86,15 +85,9 @@ def libraries_in_ld_library_path(path_hints=None):
|
||||
|
||||
Args:
|
||||
path_hints (list): list of paths to be searched. If None the list will be
|
||||
constructed based on the set of LD_LIBRARY_PATH, LIBRARY_PATH,
|
||||
DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH environment
|
||||
variables.
|
||||
constructed based on the LD_LIBRARY_PATH environment variable.
|
||||
"""
|
||||
path_hints = path_hints or \
|
||||
spack.util.environment.get_path('LIBRARY_PATH') + \
|
||||
spack.util.environment.get_path('LD_LIBRARY_PATH') + \
|
||||
spack.util.environment.get_path('DYLD_LIBRARY_PATH') + \
|
||||
spack.util.environment.get_path('DYLD_FALLBACK_LIBRARY_PATH')
|
||||
path_hints = path_hints or spack.util.environment.get_path('LD_LIBRARY_PATH')
|
||||
search_paths = llnl.util.filesystem.search_paths_for_libraries(*path_hints)
|
||||
|
||||
path_to_lib = {}
|
||||
|
||||
@@ -48,13 +48,13 @@ class OpenMpi(Package):
|
||||
from spack.resource import Resource
|
||||
from spack.version import Version, VersionChecksumError
|
||||
|
||||
__all__ = ['DirectiveError', 'DirectiveMeta', 'version', 'conflicts', 'depends_on',
|
||||
'extends', 'provides', 'patch', 'variant', 'resource']
|
||||
__all__ = ['DirectiveError', 'DirectiveMeta']
|
||||
|
||||
#: These are variant names used by Spack internally; packages can't use them
|
||||
reserved_names = ['patches', 'dev_path']
|
||||
|
||||
#: Names of possible directives. This list is populated elsewhere in the file.
|
||||
#: Names of possible directives. This list is populated elsewhere in the file and then
|
||||
#: added to `__all__` at the bottom.
|
||||
directive_names = []
|
||||
|
||||
_patch_order_index = 0
|
||||
@@ -731,3 +731,7 @@ class DependencyPatchError(DirectiveError):
|
||||
|
||||
class UnsupportedPackageDirective(DirectiveError):
|
||||
"""Raised when an invalid or unsupported package directive is specified."""
|
||||
|
||||
|
||||
#: add all directive names to __all__
|
||||
__all__.extend(directive_names)
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
import posixpath
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
|
||||
@@ -25,7 +24,6 @@
|
||||
import spack.util.spack_json as sjson
|
||||
from spack.error import SpackError
|
||||
|
||||
is_windows = sys.platform == 'win32'
|
||||
# Note: Posixpath is used here as opposed to
|
||||
# os.path.join due to spack.spec.Spec.format
|
||||
# requiring forward slash path seperators at this stage
|
||||
@@ -110,9 +108,13 @@ def write_spec(self, spec, path):
|
||||
"""Write a spec out to a file."""
|
||||
_check_concrete(spec)
|
||||
with open(path, 'w') as f:
|
||||
# The hash of the projection is the DAG hash which contains
|
||||
# the full provenance, so it's availabe if we want it later
|
||||
spec.to_json(f, hash=ht.dag_hash)
|
||||
# The hash the the projection is the DAG hash but we write out the
|
||||
# full provenance by full hash so it's availabe if we want it later
|
||||
# extension = os.path.splitext(path)[-1].lower()
|
||||
# if 'json' in extension:
|
||||
spec.to_json(f, hash=ht.full_hash)
|
||||
# elif 'yaml' in extension:
|
||||
# spec.to_yaml(f, hash=ht.full_hash)
|
||||
|
||||
def write_host_environment(self, spec):
|
||||
"""The host environment is a json file with os, kernel, and spack
|
||||
@@ -238,10 +240,10 @@ def create_install_directory(self, spec):
|
||||
|
||||
def ensure_installed(self, spec):
|
||||
"""
|
||||
Throws InconsistentInstallDirectoryError if:
|
||||
Throws DirectoryLayoutError if:
|
||||
1. spec prefix does not exist
|
||||
2. spec prefix does not contain a spec file, or
|
||||
3. We read a spec with the wrong DAG hash out of an existing install directory.
|
||||
2. spec prefix does not contain a spec file
|
||||
3. the spec file does not correspond to the spec
|
||||
"""
|
||||
_check_concrete(spec)
|
||||
path = self.path_for_spec(spec)
|
||||
@@ -257,7 +259,25 @@ def ensure_installed(self, spec):
|
||||
" " + path)
|
||||
|
||||
installed_spec = self.read_spec(spec_file_path)
|
||||
if installed_spec.dag_hash() != spec.dag_hash():
|
||||
if installed_spec == spec:
|
||||
return
|
||||
|
||||
# DAG hashes currently do not include build dependencies.
|
||||
#
|
||||
# TODO: remove this when we do better concretization and don't
|
||||
# ignore build-only deps in hashes.
|
||||
elif (installed_spec.copy(deps=('link', 'run')) ==
|
||||
spec.copy(deps=('link', 'run'))):
|
||||
# The directory layout prefix is based on the dag hash, so among
|
||||
# specs with differing full-hash but matching dag-hash, only one
|
||||
# may be installed. This means for example that for two instances
|
||||
# that differ only in CMake version used to build, only one will
|
||||
# be installed.
|
||||
return
|
||||
|
||||
if spec.dag_hash() == installed_spec.dag_hash():
|
||||
raise SpecHashCollisionError(spec, installed_spec)
|
||||
else:
|
||||
raise InconsistentInstallDirectoryError(
|
||||
'Spec file in %s does not match hash!' % spec_file_path)
|
||||
|
||||
@@ -329,14 +349,6 @@ def remove_install_directory(self, spec, deprecated=False):
|
||||
path = self.path_for_spec(spec)
|
||||
assert(path.startswith(self.root))
|
||||
|
||||
# Windows readonly files cannot be removed by Python
|
||||
# directly, change permissions before attempting to remove
|
||||
if is_windows:
|
||||
kwargs = {'ignore_errors': False,
|
||||
'onerror': fs.readonly_file_handler(ignore_errors=False)}
|
||||
else:
|
||||
kwargs = {} # the default value for ignore_errors is false
|
||||
|
||||
if deprecated:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
@@ -345,9 +357,10 @@ def remove_install_directory(self, spec, deprecated=False):
|
||||
os.remove(metapath)
|
||||
except OSError as e:
|
||||
raise six.raise_from(RemoveFailedError(spec, path, e), e)
|
||||
|
||||
elif os.path.exists(path):
|
||||
try:
|
||||
shutil.rmtree(path, **kwargs)
|
||||
shutil.rmtree(path)
|
||||
except OSError as e:
|
||||
raise six.raise_from(RemoveFailedError(spec, path, e), e)
|
||||
|
||||
@@ -445,8 +458,8 @@ def add_extension(self, spec, ext_spec):
|
||||
def check_extension_conflict(self, spec, ext_spec):
|
||||
exts = self._extension_map(spec)
|
||||
if ext_spec.name in exts:
|
||||
installed_spec = exts[ext_spec.name]
|
||||
if ext_spec.dag_hash() == installed_spec.dag_hash():
|
||||
installed_spec = exts[ext_spec.name].copy(deps=('link', 'run'))
|
||||
if ext_spec.copy(deps=('link', 'run')) == installed_spec:
|
||||
raise ExtensionAlreadyInstalledError(spec, ext_spec)
|
||||
else:
|
||||
raise ExtensionConflictError(spec, ext_spec, installed_spec)
|
||||
@@ -566,6 +579,15 @@ def __init__(self, message, long_msg=None):
|
||||
super(DirectoryLayoutError, self).__init__(message, long_msg)
|
||||
|
||||
|
||||
class SpecHashCollisionError(DirectoryLayoutError):
|
||||
"""Raised when there is a hash collision in an install layout."""
|
||||
|
||||
def __init__(self, installed_spec, new_spec):
|
||||
super(SpecHashCollisionError, self).__init__(
|
||||
'Specs %s and %s have the same SHA-1 prefix!'
|
||||
% (installed_spec, new_spec))
|
||||
|
||||
|
||||
class RemoveFailedError(DirectoryLayoutError):
|
||||
"""Raised when a DirectoryLayout cannot remove an install prefix."""
|
||||
|
||||
|
||||
@@ -1,334 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""This package implements Spack environments.
|
||||
|
||||
.. _lockfile-format:
|
||||
|
||||
`spack.lock` format
|
||||
===================
|
||||
|
||||
Spack environments have existed since Spack ``v0.12.0``, and there have been 4 different
|
||||
``spack.lock`` formats since then. The formats are documented here.
|
||||
|
||||
The high-level format of a Spack lockfile hasn't changed much between versions, but the
|
||||
contents have. Lockfiles are JSON-formatted and their top-level sections are:
|
||||
|
||||
1. ``_meta`` (object): this contains deatails about the file format, including:
|
||||
* ``file-type``: always ``"spack-lockfile"``
|
||||
* ``lockfile-version``: an integer representing the lockfile format version
|
||||
* ``specfile-version``: an integer representing the spec format version (since
|
||||
``v0.17``)
|
||||
|
||||
2. ``roots`` (list): an ordered list of records representing the roots of the Spack
|
||||
environment. Each has two fields:
|
||||
* ``hash``: a Spack spec hash uniquely identifying the concrete root spec
|
||||
* ``spec``: a string representation of the abstract spec that was concretized
|
||||
|
||||
3. ``concrete_specs``: a dictionary containing the specs in the environment.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
New versions of Spack can (so far) read all old lockfile formats -- they are
|
||||
backward-compatible. Old versions cannot read new lockfile formats, and you'll need to
|
||||
upgrade Spack to use them.
|
||||
|
||||
.. list-table:: Lockfile version compatibility across Spack versions
|
||||
:header-rows: 1
|
||||
|
||||
* - Spack version
|
||||
- ``v1``
|
||||
- ``v2``
|
||||
- ``v3``
|
||||
- ``v4``
|
||||
* - ``v0.12:0.14``
|
||||
- ✅
|
||||
-
|
||||
-
|
||||
-
|
||||
* - ``v0.15:0.16``
|
||||
- ✅
|
||||
- ✅
|
||||
-
|
||||
-
|
||||
* - ``v0.17``
|
||||
- ✅
|
||||
- ✅
|
||||
- ✅
|
||||
-
|
||||
* - ``v0.18:``
|
||||
- ✅
|
||||
- ✅
|
||||
- ✅
|
||||
- ✅
|
||||
|
||||
Version 1
|
||||
---------
|
||||
|
||||
When lockfiles were first created, there was only one hash in Spack: the DAG hash. This
|
||||
DAG hash (we'll call it the old DAG hash) did *not* include build dependencies -- it
|
||||
only included transitive link and run dependencies.
|
||||
|
||||
The spec format at this time was keyed by name. Each spec started with a key for its
|
||||
name, whose value was a dictionary of other spec attributes. The lockfile put these
|
||||
name-keyed specs into dictionaries keyed by their DAG hash, and the spec records did not
|
||||
actually have a "hash" field in the lockfile -- you have to associate the hash from the
|
||||
key with the spec record after the fact.
|
||||
|
||||
Dependencies in original lockfiles were keyed by ``"hash"``, i.e. the old DAG hash.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"_meta": {
|
||||
"file-type": "spack-lockfile",
|
||||
"lockfile-version": 1
|
||||
},
|
||||
"roots": [
|
||||
{
|
||||
"hash": "<old_dag_hash 1>",
|
||||
"spec": "<abstract spec 1>"
|
||||
},
|
||||
{
|
||||
"hash": "<old_dag_hash 2>",
|
||||
"spec": "<abstract spec 2>"
|
||||
}
|
||||
],
|
||||
"concrete_specs": {
|
||||
"<old_dag_hash 1>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": {
|
||||
"depname_1": {
|
||||
"hash": "<old_dag_hash for depname_1>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
"depname_2": {
|
||||
"hash": "<old_dag_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
}
|
||||
},
|
||||
"hash": "<old_dag_hash 1>"
|
||||
},
|
||||
"<old_dag_hash 2>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": {
|
||||
"depname_3": {
|
||||
"hash": "<old_dag_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
"depname_4": {
|
||||
"hash": "<old_dag_hash for depname_4>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
},
|
||||
"hash": "<old_dag_hash 2>"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Version 2
|
||||
---------
|
||||
|
||||
Version 2 changes one thing: specs in the lockfile are now keyed by ``build_hash``
|
||||
instead of the old ``dag_hash``. Specs have a ``hash`` attribute with their real DAG
|
||||
hash, so you can't go by the dictionary key anymore to identify a spec -- you have to
|
||||
read it in and look at ``"hash"``. Dependencies are still keyed by old DAG hash.
|
||||
|
||||
Even though we key lockfiles by ``build_hash``, specs in Spack were still deployed with
|
||||
the old, coarser DAG hash. This means that in v2 and v3 lockfiles (which are keyed by
|
||||
build hash), there may be multiple versions of the same spec with different build
|
||||
dependencies, which means they will have different build hashes but the same DAG hash.
|
||||
Spack would only have been able to actually install one of these.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"_meta": {
|
||||
"file-type": "spack-lockfile",
|
||||
"lockfile-version": 2
|
||||
},
|
||||
"roots": [
|
||||
{
|
||||
"hash": "<build_hash 1>",
|
||||
"spec": "<abstract spec 1>"
|
||||
},
|
||||
{
|
||||
"hash": "<build_hash 2>",
|
||||
"spec": "<abstract spec 2>"
|
||||
}
|
||||
],
|
||||
"concrete_specs": {
|
||||
"<build_hash 1>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": {
|
||||
"depname_1": {
|
||||
"hash": "<old_dag_hash for depname_1>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
"depname_2": {
|
||||
"hash": "<old_dag_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
}
|
||||
},
|
||||
"hash": "<old_dag_hash 1>",
|
||||
},
|
||||
"<build_hash 2>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": {
|
||||
"depname_3": {
|
||||
"hash": "<old_dag_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
"depname_4": {
|
||||
"hash": "<old_dag_hash for depname_4>",
|
||||
"type": ["build", "link"]
|
||||
}
|
||||
},
|
||||
"hash": "<old_dag_hash 2>"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Version 3
|
||||
---------
|
||||
|
||||
Version 3 doesn't change the top-level lockfile format, but this was when we changed the
|
||||
specfile format. Specs in ``concrete_specs`` are now keyed by the build hash, with no
|
||||
inner dictionary keyed by their package name. The package name is in a ``name`` field
|
||||
inside each spec dictionary. The ``dependencies`` field in the specs is a list instead
|
||||
of a dictionary, and each element of the list is a record with the name, dependency
|
||||
types, and hash of the dependency. Instead of a key called ``hash``, dependencies are
|
||||
keyed by ``build_hash``. Each spec still has a ``hash`` attribute.
|
||||
|
||||
Version 3 adds the ``specfile_version`` field to ``_meta`` and uses the new JSON spec
|
||||
format.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"_meta": {
|
||||
"file-type": "spack-lockfile",
|
||||
"lockfile-version": 3,
|
||||
"specfile-version": 2
|
||||
},
|
||||
"roots": [
|
||||
{
|
||||
"hash": "<build_hash 1>",
|
||||
"spec": "<abstract spec 1>"
|
||||
},
|
||||
{
|
||||
"hash": "<build_hash 2>",
|
||||
"spec": "<abstract spec 2>"
|
||||
},
|
||||
],
|
||||
"concrete_specs": {
|
||||
"<build_hash 1>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "depname_1",
|
||||
"build_hash": "<build_hash for depname_1>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
{
|
||||
"name": "depname_2",
|
||||
"build_hash": "<build_hash for depname_2>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
],
|
||||
"hash": "<old_dag_hash 1>",
|
||||
},
|
||||
"<build_hash 2>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "depname_3",
|
||||
"build_hash": "<build_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
{
|
||||
"name": "depname_4",
|
||||
"build_hash": "<build_hash for depname_4>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
],
|
||||
"hash": "<old_dag_hash 2>"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Version 4
|
||||
---------
|
||||
|
||||
Version 4 removes build hashes and is keyed by the new DAG hash (``hash``). The ``hash``
|
||||
now includes build dependencies and a canonical hash of the ``package.py`` file.
|
||||
Dependencies are keyed by ``hash`` (DAG hash) as well. There are no more ``build_hash``
|
||||
fields in the specs, and there are no more issues with lockfiles being able to store
|
||||
multiple specs with the same DAG hash (because the DAG hash is now finer-grained).
|
||||
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"_meta": {
|
||||
"file-type": "spack-lockfile",
|
||||
"lockfile-version": 3,
|
||||
"specfile-version": 2
|
||||
},
|
||||
"roots": [
|
||||
{
|
||||
"hash": "<dag_hash 1>",
|
||||
"spec": "<abstract spec 1>"
|
||||
},
|
||||
{
|
||||
"hash": "<dag_hash 2>",
|
||||
"spec": "<abstract spec 2>"
|
||||
}
|
||||
],
|
||||
"concrete_specs": {
|
||||
"<dag_hash 1>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "depname_1",
|
||||
"hash": "<dag_hash for depname_1>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
{
|
||||
"name": "depname_2",
|
||||
"hash": "<dag_hash for depname_2>",
|
||||
"type": ["build", "link"]
|
||||
}
|
||||
],
|
||||
"hash": "<dag_hash 1>",
|
||||
},
|
||||
"<daghash 2>": {
|
||||
"... <spec dict attributes> ...": { },
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "depname_3",
|
||||
"hash": "<dag_hash for depname_3>",
|
||||
"type": ["build", "link"]
|
||||
},
|
||||
{
|
||||
"name": "depname_4",
|
||||
"hash": "<dag_hash for depname_4>",
|
||||
"type": ["build", "link"]
|
||||
}
|
||||
],
|
||||
"hash": "<dag_hash 2>"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
from .environment import (
|
||||
Environment,
|
||||
SpackEnvironmentError,
|
||||
|
||||
@@ -79,9 +79,8 @@
|
||||
env_subdir_name = '.spack-env'
|
||||
|
||||
|
||||
def default_manifest_yaml():
|
||||
"""default spack.yaml file to put in new environments"""
|
||||
return """\
|
||||
#: default spack.yaml file to put in new environments
|
||||
default_manifest_yaml = """\
|
||||
# This is a Spack Environment file.
|
||||
#
|
||||
# It describes a set of packages to be installed, along with
|
||||
@@ -90,16 +89,12 @@ def default_manifest_yaml():
|
||||
# add package specs to the `specs` list
|
||||
specs: []
|
||||
view: true
|
||||
concretizer:
|
||||
unify: {}
|
||||
""".format('true' if spack.config.get('concretizer:unify') else 'false')
|
||||
|
||||
|
||||
"""
|
||||
#: regex for validating enviroment names
|
||||
valid_environment_name_re = r'^\w[\w-]*$'
|
||||
|
||||
#: version of the lockfile format. Must increase monotonically.
|
||||
lockfile_format_version = 4
|
||||
lockfile_format_version = 3
|
||||
|
||||
# Magic names
|
||||
# The name of the standalone spec list in the manifest yaml
|
||||
@@ -307,7 +302,7 @@ def _is_dev_spec_and_has_changed(spec):
|
||||
return False
|
||||
|
||||
# Now we can check whether the code changed since the last installation
|
||||
if not spec.installed:
|
||||
if not spec.package.installed:
|
||||
# Not installed -> nothing to compare against
|
||||
return False
|
||||
|
||||
@@ -320,7 +315,7 @@ def _spec_needs_overwrite(spec, changed_dev_specs):
|
||||
"""Check whether the current spec needs to be overwritten because either it has
|
||||
changed itself or one of its dependencies have changed"""
|
||||
# if it's not installed, we don't need to overwrite it
|
||||
if not spec.installed:
|
||||
if not spec.package.installed:
|
||||
return False
|
||||
|
||||
# If the spec itself has changed this is a trivial decision
|
||||
@@ -335,7 +330,7 @@ def _spec_needs_overwrite(spec, changed_dev_specs):
|
||||
# If any dep needs overwrite, or any dep is missing and is a dev build then
|
||||
# overwrite this package
|
||||
if any(
|
||||
((not dep.installed) and dep.satisfies('dev_path=*')) or
|
||||
((not dep.package.installed) and dep.satisfies('dev_path=*')) or
|
||||
_spec_needs_overwrite(dep, changed_dev_specs)
|
||||
for dep in spec.traverse(root=False)
|
||||
):
|
||||
@@ -444,7 +439,7 @@ def _next_root(self, specs):
|
||||
def content_hash(self, specs):
|
||||
d = syaml.syaml_dict([
|
||||
('descriptor', self.to_dict()),
|
||||
('specs', [(spec.dag_hash(), spec.prefix) for spec in sorted(specs)])
|
||||
('specs', [(spec.full_hash(), spec.prefix) for spec in sorted(specs)])
|
||||
])
|
||||
contents = sjson.dump(d)
|
||||
return spack.util.hash.b32_hash(contents)
|
||||
@@ -523,7 +518,7 @@ def specs_for_view(self, concretized_root_specs):
|
||||
|
||||
# Filter selected, installed specs
|
||||
with spack.store.db.read_transaction():
|
||||
specs = [s for s in specs if s in self and s.installed]
|
||||
specs = [s for s in specs if s in self and s.package.installed]
|
||||
|
||||
return specs
|
||||
|
||||
@@ -637,11 +632,11 @@ def __init__(self, path, init_file=None, with_view=None, keep_relative=False):
|
||||
# the init file.
|
||||
with fs.open_if_filename(init_file) as f:
|
||||
if hasattr(f, 'name') and f.name.endswith('.lock'):
|
||||
self._read_manifest(default_manifest_yaml())
|
||||
self._read_manifest(default_manifest_yaml)
|
||||
self._read_lockfile(f)
|
||||
self._set_user_specs_from_lockfile()
|
||||
else:
|
||||
self._read_manifest(f, raw_yaml=default_manifest_yaml())
|
||||
self._read_manifest(f, raw_yaml=default_manifest_yaml)
|
||||
|
||||
# Rewrite relative develop paths when initializing a new
|
||||
# environment in a different location from the spack.yaml file.
|
||||
@@ -705,7 +700,7 @@ def _read(self):
|
||||
default_manifest = not os.path.exists(self.manifest_path)
|
||||
if default_manifest:
|
||||
# No manifest, use default yaml
|
||||
self._read_manifest(default_manifest_yaml())
|
||||
self._read_manifest(default_manifest_yaml)
|
||||
else:
|
||||
with open(self.manifest_path) as f:
|
||||
self._read_manifest(f)
|
||||
@@ -771,11 +766,8 @@ def _read_manifest(self, f, raw_yaml=None):
|
||||
self.views = {}
|
||||
# Retrieve the current concretization strategy
|
||||
configuration = config_dict(self.yaml)
|
||||
|
||||
# Let `concretization` overrule `concretize:unify` config for now.
|
||||
unify = spack.config.get('concretizer:unify')
|
||||
self.concretization = configuration.get(
|
||||
'concretization', 'together' if unify else 'separately')
|
||||
# default concretization to separately
|
||||
self.concretization = configuration.get('concretization', 'separately')
|
||||
|
||||
# Retrieve dev-build packages:
|
||||
self.dev_specs = configuration.get('develop', {})
|
||||
@@ -1018,9 +1010,14 @@ def remove(self, query_spec, list_name=user_speclist_name, force=False):
|
||||
|
||||
if not matches:
|
||||
# concrete specs match against concrete specs in the env
|
||||
# by dag hash.
|
||||
# by *dag hash*, not build hash.
|
||||
dag_hashes_in_order = [
|
||||
self.specs_by_hash[build_hash].dag_hash()
|
||||
for build_hash in self.concretized_order
|
||||
]
|
||||
|
||||
specs_hashes = zip(
|
||||
self.concretized_user_specs, self.concretized_order
|
||||
self.concretized_user_specs, dag_hashes_in_order
|
||||
)
|
||||
|
||||
matches = [
|
||||
@@ -1277,7 +1274,7 @@ def _concretize_separately(self, tests=False):
|
||||
by_hash = {}
|
||||
for abstract, concrete in zip(root_specs, concretized_root_specs):
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
by_hash[concrete.dag_hash()] = concrete
|
||||
by_hash[concrete.build_hash()] = concrete
|
||||
|
||||
# Unify the specs objects, so we get correct references to all parents
|
||||
self._read_lockfile_dict(self._to_lockfile_dict())
|
||||
@@ -1334,7 +1331,7 @@ def concretize_and_add(self, user_spec, concrete_spec=None, tests=False):
|
||||
spec = next(
|
||||
s for s in self.user_specs if s.satisfies(user_spec)
|
||||
)
|
||||
concrete = self.specs_by_hash.get(spec.dag_hash())
|
||||
concrete = self.specs_by_hash.get(spec.build_hash())
|
||||
if not concrete:
|
||||
concrete = spec.concretized(tests=tests)
|
||||
self._add_concrete_spec(spec, concrete)
|
||||
@@ -1383,10 +1380,9 @@ def check_views(self):
|
||||
# default view if they are installed.
|
||||
for view_name, view in self.views.items():
|
||||
for _, spec in self.concretized_specs():
|
||||
if spec in view and spec.package and spec.installed:
|
||||
msg = '{0} in view "{1}"'
|
||||
tty.debug(msg.format(spec.name, view_name))
|
||||
|
||||
if spec in view and spec.package.installed:
|
||||
tty.debug(
|
||||
'Spec %s in view %s' % (spec.name, view_name))
|
||||
except (spack.repo.UnknownPackageError,
|
||||
spack.repo.UnknownNamespaceError) as e:
|
||||
tty.warn(e)
|
||||
@@ -1402,8 +1398,7 @@ def _env_modifications_for_default_view(self, reverse=False):
|
||||
|
||||
errors = []
|
||||
for _, root_spec in self.concretized_specs():
|
||||
if (root_spec in self.default_view and
|
||||
root_spec.installed and root_spec.package):
|
||||
if root_spec in self.default_view and root_spec.package.installed:
|
||||
for spec in root_spec.traverse(deptype='run', root=True):
|
||||
if spec.name in visited:
|
||||
# It is expected that only one instance of the package
|
||||
@@ -1502,7 +1497,7 @@ def _add_concrete_spec(self, spec, concrete, new=True):
|
||||
# update internal lists of specs
|
||||
self.concretized_user_specs.append(spec)
|
||||
|
||||
h = concrete.dag_hash()
|
||||
h = concrete.build_hash()
|
||||
self.concretized_order.append(h)
|
||||
self.specs_by_hash[h] = concrete
|
||||
|
||||
@@ -1542,7 +1537,7 @@ def uninstalled_specs(self):
|
||||
with spack.store.db.read_transaction():
|
||||
for concretized_hash in self.concretized_order:
|
||||
spec = self.specs_by_hash[concretized_hash]
|
||||
if not spec.installed or (
|
||||
if not spec.package.installed or (
|
||||
spec.satisfies('dev_path=*') or
|
||||
spec.satisfies('^dev_path=*')
|
||||
):
|
||||
@@ -1577,7 +1572,7 @@ def install_specs(self, specs=None, **install_args):
|
||||
|
||||
# ensure specs already installed are marked explicit
|
||||
all_specs = specs or [cs for _, cs in self.concretized_specs()]
|
||||
specs_installed = [s for s in all_specs if s.installed]
|
||||
specs_installed = [s for s in all_specs if s.package.installed]
|
||||
with spack.store.db.write_transaction(): # do all in one transaction
|
||||
for spec in specs_installed:
|
||||
spack.store.db.update_explicit(spec, True)
|
||||
@@ -1604,7 +1599,7 @@ def install_specs(self, specs=None, **install_args):
|
||||
finally:
|
||||
# Ensure links are set appropriately
|
||||
for spec in specs_to_install:
|
||||
if spec.installed:
|
||||
if spec.package.installed:
|
||||
self.new_installs.append(spec)
|
||||
try:
|
||||
self._install_log_links(spec)
|
||||
@@ -1619,19 +1614,14 @@ def all_specs(self):
|
||||
"""Return all specs, even those a user spec would shadow."""
|
||||
all_specs = set()
|
||||
for h in self.concretized_order:
|
||||
try:
|
||||
spec = self.specs_by_hash[h]
|
||||
except KeyError:
|
||||
tty.warn(
|
||||
'Environment %s appears to be corrupt: missing spec '
|
||||
'"%s"' % (self.name, h))
|
||||
continue
|
||||
all_specs.update(spec.traverse())
|
||||
all_specs.update(self.specs_by_hash[h].traverse())
|
||||
|
||||
return sorted(all_specs)
|
||||
|
||||
def all_hashes(self):
|
||||
"""Return hashes of all specs."""
|
||||
"""Return hashes of all specs.
|
||||
|
||||
Note these hashes exclude build dependencies."""
|
||||
return list(set(s.dag_hash() for s in self.all_specs()))
|
||||
|
||||
def roots(self):
|
||||
@@ -1659,7 +1649,7 @@ def added_specs(self):
|
||||
concrete = concretized.get(spec)
|
||||
if not concrete:
|
||||
yield spec
|
||||
elif not concrete.installed:
|
||||
elif not concrete.package.installed:
|
||||
yield concrete
|
||||
|
||||
def concretized_specs(self):
|
||||
@@ -1667,15 +1657,6 @@ def concretized_specs(self):
|
||||
for s, h in zip(self.concretized_user_specs, self.concretized_order):
|
||||
yield (s, self.specs_by_hash[h])
|
||||
|
||||
def get_by_hash(self, dag_hash):
|
||||
matches = {}
|
||||
for _, root in self.concretized_specs():
|
||||
for spec in root.traverse(root=True):
|
||||
dep_hash = spec.dag_hash()
|
||||
if dep_hash.startswith(dag_hash):
|
||||
matches[dep_hash] = spec
|
||||
return list(matches.values())
|
||||
|
||||
def matching_spec(self, spec):
|
||||
"""
|
||||
Given a spec (likely not concretized), find a matching concretized
|
||||
@@ -1703,7 +1684,13 @@ def matching_spec(self, spec):
|
||||
for user_spec, concretized_user_spec in self.concretized_specs():
|
||||
# Deal with concrete specs differently
|
||||
if spec.concrete:
|
||||
if spec in concretized_user_spec:
|
||||
# Matching a concrete spec is more restrictive
|
||||
# than just matching the dag hash
|
||||
is_match = (
|
||||
spec in concretized_user_spec and
|
||||
concretized_user_spec[spec.name].build_hash() == spec.build_hash()
|
||||
)
|
||||
if is_match:
|
||||
matches[spec] = spec
|
||||
continue
|
||||
|
||||
@@ -1783,12 +1770,12 @@ def _to_lockfile_dict(self):
|
||||
concrete_specs = {}
|
||||
for spec in self.specs_by_hash.values():
|
||||
for s in spec.traverse():
|
||||
dag_hash = s.dag_hash()
|
||||
if dag_hash not in concrete_specs:
|
||||
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
build_hash = s.build_hash()
|
||||
if build_hash not in concrete_specs:
|
||||
spec_dict = s.to_node_dict(hash=ht.build_hash)
|
||||
# Assumes no legacy formats, since this was just created.
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[dag_hash] = spec_dict
|
||||
concrete_specs[build_hash] = spec_dict
|
||||
|
||||
hash_spec_list = zip(
|
||||
self.concretized_order, self.concretized_user_specs)
|
||||
@@ -1822,56 +1809,47 @@ def _read_lockfile(self, file_or_json):
|
||||
|
||||
def _read_lockfile_dict(self, d):
|
||||
"""Read a lockfile dictionary into this environment."""
|
||||
self.specs_by_hash = {}
|
||||
|
||||
roots = d['roots']
|
||||
self.concretized_user_specs = [Spec(r['spec']) for r in roots]
|
||||
self.concretized_order = [r['hash'] for r in roots]
|
||||
|
||||
json_specs_by_hash = d['concrete_specs']
|
||||
root_hashes = set(self.concretized_order)
|
||||
|
||||
# Track specs by their lockfile key. Currently spack uses the finest
|
||||
# grained hash as the lockfile key, while older formats used the build
|
||||
# hash or a previous incarnation of the DAG hash (one that did not
|
||||
# include build deps or package hash).
|
||||
specs_by_hash = {}
|
||||
|
||||
# Track specs by their DAG hash, allows handling DAG hash collisions
|
||||
first_seen = {}
|
||||
|
||||
# First pass: Put each spec in the map ignoring dependencies
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
for build_hash, node_dict in json_specs_by_hash.items():
|
||||
spec = Spec.from_node_dict(node_dict)
|
||||
if not spec._hash:
|
||||
# in v1 lockfiles, the hash only occurs as a key
|
||||
spec._hash = lockfile_key
|
||||
specs_by_hash[lockfile_key] = spec
|
||||
if d['_meta']['lockfile-version'] > 1:
|
||||
# Build hash is stored as a key, but not as part of the node dict
|
||||
# To ensure build hashes are not recomputed, we reattach here
|
||||
setattr(spec, ht.build_hash.attr, build_hash)
|
||||
specs_by_hash[build_hash] = spec
|
||||
|
||||
# Second pass: For each spec, get its dependencies from the node dict
|
||||
# and add them to the spec
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
for build_hash, node_dict in json_specs_by_hash.items():
|
||||
for _, dep_hash, deptypes, _ in (
|
||||
Spec.dependencies_from_node_dict(node_dict)):
|
||||
specs_by_hash[lockfile_key]._add_dependency(
|
||||
specs_by_hash[build_hash]._add_dependency(
|
||||
specs_by_hash[dep_hash], deptypes)
|
||||
|
||||
# Traverse the root specs one at a time in the order they appear.
|
||||
# The first time we see each DAG hash, that's the one we want to
|
||||
# keep. This is only required as long as we support older lockfile
|
||||
# formats where the mapping from DAG hash to lockfile key is possibly
|
||||
# one-to-many.
|
||||
for lockfile_key in self.concretized_order:
|
||||
for s in specs_by_hash[lockfile_key].traverse():
|
||||
if s.dag_hash() not in first_seen:
|
||||
first_seen[s.dag_hash()] = s
|
||||
# If we are reading an older lockfile format (which uses dag hashes
|
||||
# that exclude build deps), we use this to convert the old
|
||||
# concretized_order to the full hashes (preserving the order)
|
||||
old_hash_to_new = {}
|
||||
self.specs_by_hash = {}
|
||||
for _, spec in specs_by_hash.items():
|
||||
dag_hash = spec.dag_hash()
|
||||
build_hash = spec.build_hash()
|
||||
if dag_hash in root_hashes:
|
||||
old_hash_to_new[dag_hash] = build_hash
|
||||
|
||||
# Now make sure concretized_order and our internal specs dict
|
||||
# contains the keys used by modern spack (i.e. the dag_hash
|
||||
# that includes build deps and package hash).
|
||||
self.concretized_order = [specs_by_hash[h_key].dag_hash()
|
||||
for h_key in self.concretized_order]
|
||||
if (dag_hash in root_hashes or build_hash in root_hashes):
|
||||
self.specs_by_hash[build_hash] = spec
|
||||
|
||||
for spec_dag_hash in self.concretized_order:
|
||||
self.specs_by_hash[spec_dag_hash] = first_seen[spec_dag_hash]
|
||||
if old_hash_to_new:
|
||||
# Replace any older hashes in concretized_order with hashes
|
||||
# that include build deps
|
||||
self.concretized_order = [
|
||||
old_hash_to_new.get(h, h) for h in self.concretized_order]
|
||||
|
||||
def write(self, regenerate=True):
|
||||
"""Writes an in-memory environment to its location on disk.
|
||||
@@ -1884,15 +1862,17 @@ def write(self, regenerate=True):
|
||||
regenerate (bool): regenerate views and run post-write hooks as
|
||||
well as writing if True.
|
||||
"""
|
||||
# Warn that environments are not in the latest format.
|
||||
if not is_latest_format(self.manifest_path):
|
||||
ver = '.'.join(str(s) for s in spack.spack_version_info[:2])
|
||||
msg = ('The environment "{}" is written to disk in a deprecated format. '
|
||||
'Please update it using:\n\n'
|
||||
'\tspack env update {}\n\n'
|
||||
'Note that versions of Spack older than {} may not be able to '
|
||||
# Intercept environment not using the latest schema format and prevent
|
||||
# them from being modified
|
||||
manifest_exists = os.path.exists(self.manifest_path)
|
||||
if manifest_exists and not is_latest_format(self.manifest_path):
|
||||
msg = ('The environment "{0}" needs to be written to disk, but '
|
||||
'is currently using a deprecated format. Please update it '
|
||||
'using:\n\n'
|
||||
'\tspack env update {0}\n\n'
|
||||
'Note that previous versions of Spack will not be able to '
|
||||
'use the updated configuration.')
|
||||
tty.warn(msg.format(self.name, self.name, ver))
|
||||
raise RuntimeError(msg.format(self.name))
|
||||
|
||||
# ensure path in var/spack/environments
|
||||
fs.mkdirp(self.path)
|
||||
@@ -2244,16 +2224,14 @@ def _top_level_key(data):
|
||||
|
||||
|
||||
def is_latest_format(manifest):
|
||||
"""Return False if the manifest file exists and is not in the latest schema format.
|
||||
"""Return True if the manifest file is at the latest schema format,
|
||||
False otherwise.
|
||||
|
||||
Args:
|
||||
manifest (str): manifest file to be analyzed
|
||||
"""
|
||||
try:
|
||||
with open(manifest) as f:
|
||||
data = syaml.load(f)
|
||||
except (OSError, IOError):
|
||||
return True
|
||||
with open(manifest) as f:
|
||||
data = syaml.load(f)
|
||||
top_level_key = _top_level_key(data)
|
||||
changed = spack.schema.env.update(data[top_level_key])
|
||||
return not changed
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
#: at what level we should write stack traces or short error messages
|
||||
#: whether we should write stack traces or short error messages
|
||||
#: this is module-scoped because it needs to be set very early
|
||||
debug = 0
|
||||
debug = False
|
||||
|
||||
|
||||
class SpackError(Exception):
|
||||
|
||||
@@ -406,12 +406,12 @@ def write(self, spec, color=None, out=None):
|
||||
# Colors associated with each node in the DAG.
|
||||
# Edges are colored by the node they point to.
|
||||
self._name_to_color = {
|
||||
spec.dag_hash(): self.colors[i % len(self.colors)]
|
||||
spec.full_hash(): self.colors[i % len(self.colors)]
|
||||
for i, spec in enumerate(nodes_in_topological_order)
|
||||
}
|
||||
|
||||
# Frontier tracks open edges of the graph as it's written out.
|
||||
self._frontier = [[spec.dag_hash()]]
|
||||
self._frontier = [[spec.full_hash()]]
|
||||
while self._frontier:
|
||||
# Find an unexpanded part of frontier
|
||||
i = find(self._frontier, lambda f: len(f) > 1)
|
||||
@@ -488,16 +488,14 @@ def write(self, spec, color=None, out=None):
|
||||
node = nodes_in_topological_order.pop()
|
||||
|
||||
# Find the named node in the frontier and draw it.
|
||||
i = find(self._frontier, lambda f: node.dag_hash() in f)
|
||||
i = find(self._frontier, lambda f: node.full_hash() in f)
|
||||
self._node_line(i, node)
|
||||
|
||||
# Replace node with its dependencies
|
||||
self._frontier.pop(i)
|
||||
edges = sorted(
|
||||
node.edges_to_dependencies(deptype=self.deptype), reverse=True
|
||||
)
|
||||
if edges:
|
||||
deps = [e.spec.dag_hash() for e in edges]
|
||||
deps = node.dependencies(deptype=self.deptype)
|
||||
if deps:
|
||||
deps = sorted((d.full_hash() for d in deps), reverse=True)
|
||||
self._connect_deps(i, deps, "new-deps") # anywhere.
|
||||
|
||||
elif self._frontier:
|
||||
|
||||
@@ -33,14 +33,15 @@ def attr(self):
|
||||
"""Private attribute stored on spec"""
|
||||
return '_' + self.name
|
||||
|
||||
def __call__(self, spec):
|
||||
"""Run this hash on the provided spec."""
|
||||
return spec.spec_hash(self)
|
||||
|
||||
|
||||
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
|
||||
#: Default Hash descriptor, used by Spec.dag_hash() and stored in the DB.
|
||||
dag_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=True, name='hash')
|
||||
deptype=('link', 'run'), package_hash=False, name='hash')
|
||||
|
||||
|
||||
#: Hash descriptor that includes build dependencies.
|
||||
build_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=False, name='build_hash')
|
||||
|
||||
|
||||
#: Hash descriptor used only to transfer a DAG, as is, across processes
|
||||
@@ -50,19 +51,12 @@ def __call__(self, spec):
|
||||
name='process_hash'
|
||||
)
|
||||
|
||||
|
||||
#: Package hash used as part of dag hash
|
||||
package_hash = SpecHashDescriptor(
|
||||
deptype=(), package_hash=True, name='package_hash',
|
||||
override=lambda s: s.package.content_hash())
|
||||
|
||||
|
||||
# Deprecated hash types, no longer used, but needed to understand old serialized
|
||||
# spec formats
|
||||
|
||||
#: Full hash used in build pipelines to determine when to rebuild packages.
|
||||
full_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=True, name='full_hash')
|
||||
|
||||
|
||||
build_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=False, name='build_hash')
|
||||
#: Package hash used as part of full hash
|
||||
package_hash = SpecHashDescriptor(
|
||||
deptype=(), package_hash=True, name='package_hash',
|
||||
override=lambda s: s.package.content_hash())
|
||||
|
||||
@@ -140,7 +140,7 @@ def _handle_external_and_upstream(pkg, explicit):
|
||||
.format(pkg.prefix, package_id(pkg)))
|
||||
return True
|
||||
|
||||
if pkg.spec.installed_upstream:
|
||||
if pkg.installed_upstream:
|
||||
tty.verbose('{0} is installed in an upstream Spack instance at {1}'
|
||||
.format(package_id(pkg), pkg.spec.prefix))
|
||||
_print_installed_pkg(pkg.prefix)
|
||||
@@ -260,7 +260,8 @@ def _hms(seconds):
|
||||
return ' '.join(parts)
|
||||
|
||||
|
||||
def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
|
||||
def _install_from_cache(pkg, cache_only, explicit, unsigned=False,
|
||||
full_hash_match=False):
|
||||
"""
|
||||
Extract the package from binary cache
|
||||
|
||||
@@ -277,7 +278,7 @@ def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
|
||||
``False`` otherwise
|
||||
"""
|
||||
installed_from_cache = _try_install_from_binary_cache(
|
||||
pkg, explicit, unsigned=unsigned)
|
||||
pkg, explicit, unsigned=unsigned, full_hash_match=full_hash_match)
|
||||
pkg_id = package_id(pkg)
|
||||
if not installed_from_cache:
|
||||
pre = 'No binary for {0} found'.format(pkg_id)
|
||||
@@ -389,7 +390,8 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
|
||||
return True
|
||||
|
||||
|
||||
def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
|
||||
def _try_install_from_binary_cache(pkg, explicit, unsigned=False,
|
||||
full_hash_match=False):
|
||||
"""
|
||||
Try to extract the package from binary cache.
|
||||
|
||||
@@ -401,7 +403,8 @@ def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
|
||||
"""
|
||||
pkg_id = package_id(pkg)
|
||||
tty.debug('Searching for binary cache of {0}'.format(pkg_id))
|
||||
matches = binary_distribution.get_mirrors_for_spec(pkg.spec)
|
||||
matches = binary_distribution.get_mirrors_for_spec(
|
||||
pkg.spec, full_hash_match=full_hash_match)
|
||||
|
||||
if not matches:
|
||||
return False
|
||||
@@ -558,10 +561,6 @@ def log(pkg):
|
||||
# Archive the environment modifications for the build.
|
||||
fs.install(pkg.env_mods_path, pkg.install_env_path)
|
||||
|
||||
# Archive the install-phase test log, if present
|
||||
if pkg.test_install_log_path and os.path.exists(pkg.test_install_log_path):
|
||||
fs.install(pkg.test_install_log_path, pkg.install_test_install_log_path)
|
||||
|
||||
if os.path.exists(pkg.configure_args_path):
|
||||
# Archive the args used for the build
|
||||
fs.install(pkg.configure_args_path, pkg.install_configure_args_path)
|
||||
@@ -854,7 +853,7 @@ def _check_deps_status(self, request):
|
||||
raise InstallError(err.format(request.pkg_id, msg))
|
||||
|
||||
# Flag external and upstream packages as being installed
|
||||
if dep_pkg.spec.external or dep_pkg.spec.installed_upstream:
|
||||
if dep_pkg.spec.external or dep_pkg.installed_upstream:
|
||||
self._flag_installed(dep_pkg)
|
||||
continue
|
||||
|
||||
@@ -996,7 +995,7 @@ def _ensure_install_ready(self, pkg):
|
||||
raise ExternalPackageError('{0} {1}'.format(pre, 'is external'))
|
||||
|
||||
# Upstream packages cannot be installed locally.
|
||||
if pkg.spec.installed_upstream:
|
||||
if pkg.installed_upstream:
|
||||
raise UpstreamPackageError('{0} {1}'.format(pre, 'is upstream'))
|
||||
|
||||
# The package must have a prefix lock at this stage.
|
||||
@@ -1201,6 +1200,7 @@ def _install_task(self, task):
|
||||
install_args = task.request.install_args
|
||||
cache_only = install_args.get('cache_only')
|
||||
explicit = task.explicit
|
||||
full_hash_match = install_args.get('full_hash_match')
|
||||
tests = install_args.get('tests')
|
||||
unsigned = install_args.get('unsigned')
|
||||
use_cache = install_args.get('use_cache')
|
||||
@@ -1213,7 +1213,8 @@ def _install_task(self, task):
|
||||
|
||||
# Use the binary cache if requested
|
||||
if use_cache and \
|
||||
_install_from_cache(pkg, cache_only, explicit, unsigned):
|
||||
_install_from_cache(pkg, cache_only, explicit, unsigned,
|
||||
full_hash_match):
|
||||
self._update_installed(task)
|
||||
if task.compiler:
|
||||
spack.compilers.add_compilers_to_config(
|
||||
@@ -2017,10 +2018,11 @@ def build_process(pkg, install_args):
|
||||
|
||||
|
||||
class OverwriteInstall(object):
|
||||
def __init__(self, installer, database, task):
|
||||
def __init__(self, installer, database, task, tmp_root=None):
|
||||
self.installer = installer
|
||||
self.database = database
|
||||
self.task = task
|
||||
self.tmp_root = tmp_root
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
@@ -2030,7 +2032,7 @@ def install(self):
|
||||
install error if installation fails.
|
||||
"""
|
||||
try:
|
||||
with fs.replace_directory_transaction(self.task.pkg.prefix):
|
||||
with fs.replace_directory_transaction(self.task.pkg.prefix, self.tmp_root):
|
||||
self.installer._install_task(self.task)
|
||||
except fs.CouldNotRestoreDirectoryBackup as e:
|
||||
self.database.remove(self.task.pkg.spec)
|
||||
@@ -2301,6 +2303,7 @@ def _add_default_args(self):
|
||||
('dirty', False),
|
||||
('fail_fast', False),
|
||||
('fake', False),
|
||||
('full_hash_match', False),
|
||||
('install_deps', True),
|
||||
('install_package', True),
|
||||
('install_source', False),
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.colify
|
||||
import llnl.util.tty.color as color
|
||||
from llnl.util.tty.log import log_output
|
||||
from llnl.util.tty.log import log_output, winlog
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
@@ -375,6 +375,13 @@ def make_argument_parser(**kwargs):
|
||||
# stat names in groups of 7, for nice wrapping.
|
||||
stat_lines = list(zip(*(iter(stat_names),) * 7))
|
||||
|
||||
# help message for --show-cores
|
||||
show_cores_help = 'provide additional information on concretization failures\n'
|
||||
show_cores_help += 'off (default): show only the violated rule\n'
|
||||
show_cores_help += 'full: show raw unsat cores from clingo\n'
|
||||
show_cores_help += 'minimized: show subset-minimal unsat cores '
|
||||
show_cores_help += '(Warning: this may take hours for some specs)'
|
||||
|
||||
parser.add_argument(
|
||||
'-h', '--help',
|
||||
dest='help', action='store_const', const='short', default=None,
|
||||
@@ -398,6 +405,9 @@ def make_argument_parser(**kwargs):
|
||||
'-d', '--debug', action='count', default=0,
|
||||
help="write out debug messages "
|
||||
"(more d's for more verbosity: -d, -dd, -ddd, etc.)")
|
||||
parser.add_argument(
|
||||
'--show-cores', choices=["off", "full", "minimized"], default="off",
|
||||
help=show_cores_help)
|
||||
parser.add_argument(
|
||||
'--timestamp', action='store_true',
|
||||
help="Add a timestamp to tty output")
|
||||
@@ -480,11 +490,18 @@ def setup_main_options(args):
|
||||
# errors raised by spack.config.
|
||||
|
||||
if args.debug:
|
||||
spack.error.debug = args.debug
|
||||
spack.error.debug = True
|
||||
spack.util.debug.register_interrupt_handler()
|
||||
spack.config.set('config:debug', True, scope='command_line')
|
||||
spack.util.environment.tracing_enabled = True
|
||||
|
||||
if args.show_cores != "off":
|
||||
# minimize_cores defaults to true, turn it off if we're showing full core
|
||||
# but don't want to wait to minimize it.
|
||||
spack.solver.asp.full_cores = True
|
||||
if args.show_cores == 'full':
|
||||
spack.solver.asp.minimize_cores = False
|
||||
|
||||
if args.timestamp:
|
||||
tty.set_timestamp(True)
|
||||
|
||||
@@ -588,9 +605,14 @@ def __call__(self, *argv, **kwargs):
|
||||
|
||||
out = StringIO()
|
||||
try:
|
||||
with log_output(out):
|
||||
self.returncode = _invoke_command(
|
||||
self.command, self.parser, args, unknown)
|
||||
if sys.platform == 'win32':
|
||||
with winlog(out):
|
||||
self.returncode = _invoke_command(
|
||||
self.command, self.parser, args, unknown)
|
||||
else:
|
||||
with log_output(out):
|
||||
self.returncode = _invoke_command(
|
||||
self.command, self.parser, args, unknown)
|
||||
|
||||
except SystemExit as e:
|
||||
self.returncode = e.code
|
||||
|
||||
@@ -184,38 +184,19 @@ def _filter_compiler_wrappers_impl(self):
|
||||
|
||||
x = llnl.util.filesystem.FileFilter(*abs_files)
|
||||
|
||||
compiler_vars = [
|
||||
replacements = [
|
||||
('CC', self.compiler.cc),
|
||||
('CXX', self.compiler.cxx),
|
||||
('F77', self.compiler.f77),
|
||||
('FC', self.compiler.fc)
|
||||
]
|
||||
|
||||
# Some paths to the compiler wrappers might be substrings of the others.
|
||||
# For example:
|
||||
# CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)
|
||||
# FC=/path/to/spack/lib/spack/env/cce/ftn
|
||||
# Therefore, we perform the filtering in the reversed sorted order of
|
||||
# the substituted strings. If, however, the strings are identical (e.g.
|
||||
# both CC and FC are set using realpath), the filtering is done
|
||||
# according to the order in compiler_vars. To achieve that, we populate
|
||||
# the following array with tuples of three elements: path to the
|
||||
# wrapper, negated index of the variable in compiler_vars, path to the
|
||||
# real compiler. This way, the reversed sorted order of the resulting
|
||||
# array is the order of replacements that we need.
|
||||
replacements = []
|
||||
|
||||
for idx, (env_var, compiler_path) in enumerate(compiler_vars):
|
||||
for env_var, compiler_path in replacements:
|
||||
if env_var in os.environ:
|
||||
# filter spack wrapper and links to spack wrapper in case
|
||||
# build system runs realpath
|
||||
wrapper = os.environ[env_var]
|
||||
for wrapper_path in (wrapper, os.path.realpath(wrapper)):
|
||||
replacements.append((wrapper_path, -idx, compiler_path))
|
||||
|
||||
for wrapper_path, _, compiler_path in sorted(replacements,
|
||||
reverse=True):
|
||||
x.filter(wrapper_path, compiler_path, **filter_kwargs)
|
||||
x.filter(wrapper_path, compiler_path, **filter_kwargs)
|
||||
|
||||
# Remove this linking flag if present (it turns RPATH into RUNPATH)
|
||||
x.filter('{0}--enable-new-dtags'.format(self.compiler.linker_arg), '',
|
||||
|
||||
@@ -370,7 +370,7 @@ def get_module(
|
||||
available.
|
||||
"""
|
||||
try:
|
||||
upstream = spec.installed_upstream
|
||||
upstream = spec.package.installed_upstream
|
||||
except spack.repo.UnknownPackageError:
|
||||
upstream, record = spack.store.db.query_by_spec_hash(spec.dag_hash())
|
||||
if upstream:
|
||||
|
||||
@@ -132,7 +132,7 @@ def __init__(self, host=None, prefix="ms1", allow_fail=False, tags=None,
|
||||
self.tags = tags
|
||||
self.save_local = save_local
|
||||
|
||||
# We key lookup of build_id by dag_hash
|
||||
# We keey lookup of build_id by full_hash
|
||||
self.build_ids = {}
|
||||
self.setup_save()
|
||||
|
||||
@@ -412,8 +412,6 @@ def new_configuration(self, specs):
|
||||
spec.concretize()
|
||||
|
||||
# Remove extra level of nesting
|
||||
# This is the only place in Spack we still use full_hash, as `spack monitor`
|
||||
# requires specs with full_hash-keyed dependencies.
|
||||
as_dict = {"spec": spec.to_dict(hash=ht.full_hash)['spec'],
|
||||
"spack_version": self.spack_version}
|
||||
|
||||
@@ -439,7 +437,8 @@ def failed_concretization(self, specs):
|
||||
meta = spec.to_dict()['spec']
|
||||
nodes = []
|
||||
for node in meta.get("nodes", []):
|
||||
node["full_hash"] = "FAILED_CONCRETIZATION"
|
||||
for hashtype in ["build_hash", "full_hash"]:
|
||||
node[hashtype] = "FAILED_CONCRETIZATION"
|
||||
nodes.append(node)
|
||||
meta['nodes'] = nodes
|
||||
|
||||
@@ -471,13 +470,13 @@ def get_build_id(self, spec, return_response=False, spec_exists=True):
|
||||
"""
|
||||
Retrieve a build id, either in the local cache, or query the server.
|
||||
"""
|
||||
dag_hash = spec.dag_hash()
|
||||
if dag_hash in self.build_ids:
|
||||
return self.build_ids[dag_hash]
|
||||
full_hash = spec.full_hash()
|
||||
if full_hash in self.build_ids:
|
||||
return self.build_ids[full_hash]
|
||||
|
||||
# Prepare build environment data (including spack version)
|
||||
data = self.build_environment.copy()
|
||||
data['full_hash'] = dag_hash
|
||||
data['full_hash'] = full_hash
|
||||
|
||||
# If the build should be tagged, add it
|
||||
if self.tags:
|
||||
@@ -495,10 +494,10 @@ def get_build_id(self, spec, return_response=False, spec_exists=True):
|
||||
data['spec'] = syaml.load(read_file(spec_file))
|
||||
|
||||
if self.save_local:
|
||||
return self.get_local_build_id(data, dag_hash, return_response)
|
||||
return self.get_server_build_id(data, dag_hash, return_response)
|
||||
return self.get_local_build_id(data, full_hash, return_response)
|
||||
return self.get_server_build_id(data, full_hash, return_response)
|
||||
|
||||
def get_local_build_id(self, data, dag_hash, return_response):
|
||||
def get_local_build_id(self, data, full_hash, return_response):
|
||||
"""
|
||||
Generate a local build id based on hashing the expected data
|
||||
"""
|
||||
@@ -511,15 +510,15 @@ def get_local_build_id(self, data, dag_hash, return_response):
|
||||
return response
|
||||
return bid
|
||||
|
||||
def get_server_build_id(self, data, dag_hash, return_response=False):
|
||||
def get_server_build_id(self, data, full_hash, return_response=False):
|
||||
"""
|
||||
Retrieve a build id from the spack monitor server
|
||||
"""
|
||||
response = self.do_request("builds/new/", data=sjson.dump(data))
|
||||
|
||||
# Add the build id to the lookup
|
||||
bid = self.build_ids[dag_hash] = response['data']['build']['build_id']
|
||||
self.build_ids[dag_hash] = bid
|
||||
bid = self.build_ids[full_hash] = response['data']['build']['build_id']
|
||||
self.build_ids[full_hash] = bid
|
||||
|
||||
# If the function is called directly, the user might want output
|
||||
if return_response:
|
||||
|
||||
@@ -26,14 +26,13 @@
|
||||
import time
|
||||
import traceback
|
||||
import types
|
||||
import warnings
|
||||
from typing import Any, Callable, Dict, List, Optional # novm
|
||||
|
||||
import six
|
||||
|
||||
import llnl.util.filesystem as fsys
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized, nullcontext
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.link_tree import LinkTree
|
||||
|
||||
import spack.compilers
|
||||
@@ -53,7 +52,6 @@
|
||||
import spack.store
|
||||
import spack.url
|
||||
import spack.util.environment
|
||||
import spack.util.path
|
||||
import spack.util.web
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
from spack.install_test import TestFailure, TestSuite
|
||||
@@ -61,6 +59,7 @@
|
||||
from spack.stage import ResourceStage, Stage, StageComposite, stage_prefix
|
||||
from spack.util.executable import ProcessError, which
|
||||
from spack.util.package_hash import package_hash
|
||||
from spack.util.path import win_exe_ext
|
||||
from spack.util.prefix import Prefix
|
||||
from spack.version import Version
|
||||
|
||||
@@ -77,9 +76,6 @@
|
||||
# Filename for the Spack build/install environment modifications file.
|
||||
_spack_build_envmodsfile = 'spack-build-env-mods.txt'
|
||||
|
||||
# Filename for the Spack install phase-time test log.
|
||||
_spack_install_test_log = 'install-time-test-log.txt'
|
||||
|
||||
# Filename of json with total build and phase times (seconds)
|
||||
_spack_times_log = 'install_times.json'
|
||||
|
||||
@@ -200,9 +196,9 @@ def __init__(cls, name, bases, attr_dict):
|
||||
def platform_executables(self):
|
||||
def to_windows_exe(exe):
|
||||
if exe.endswith('$'):
|
||||
exe = exe.replace('$', '%s$' % spack.util.path.win_exe_ext())
|
||||
exe = exe.replace('$', '%s$' % win_exe_ext())
|
||||
else:
|
||||
exe += spack.util.path.win_exe_ext()
|
||||
exe += win_exe_ext()
|
||||
return exe
|
||||
plat_exe = []
|
||||
if hasattr(self, 'executables'):
|
||||
@@ -438,11 +434,6 @@ def name(self):
|
||||
self._name = self._name[self._name.rindex('.') + 1:]
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def global_license_dir(self):
|
||||
"""Returns the directory where license files for all packages are stored."""
|
||||
return spack.util.path.canonicalize_path(spack.config.get('config:license_dir'))
|
||||
|
||||
|
||||
def run_before(*phases):
|
||||
"""Registers a method of a package to be run before a given phase"""
|
||||
@@ -799,6 +790,15 @@ def __init__(self, spec):
|
||||
|
||||
super(PackageBase, self).__init__()
|
||||
|
||||
@property
|
||||
def installed_upstream(self):
|
||||
if not hasattr(self, '_installed_upstream'):
|
||||
upstream, record = spack.store.db.query_by_spec_hash(
|
||||
self.spec.dag_hash())
|
||||
self._installed_upstream = upstream
|
||||
|
||||
return self._installed_upstream
|
||||
|
||||
@classmethod
|
||||
def possible_dependencies(
|
||||
cls, transitive=True, expand_virtuals=True, deptype='all',
|
||||
@@ -943,8 +943,9 @@ def name(self):
|
||||
|
||||
@property
|
||||
def global_license_dir(self):
|
||||
"""Returns the directory where global license files are stored."""
|
||||
return type(self).global_license_dir
|
||||
"""Returns the directory where global license files for all
|
||||
packages are stored."""
|
||||
return os.path.join(spack.paths.prefix, 'etc', 'spack', 'licenses')
|
||||
|
||||
@property
|
||||
def global_license_file(self):
|
||||
@@ -1251,16 +1252,6 @@ def configure_args_path(self):
|
||||
"""Return the configure args file path associated with staging."""
|
||||
return os.path.join(self.stage.path, _spack_configure_argsfile)
|
||||
|
||||
@property
|
||||
def test_install_log_path(self):
|
||||
"""Return the install phase-time test log file path, if set."""
|
||||
return getattr(self, 'test_log_file', None)
|
||||
|
||||
@property
|
||||
def install_test_install_log_path(self):
|
||||
"""Return the install location for the install phase-time test log."""
|
||||
return fsys.join_path(self.metadata_dir, _spack_install_test_log)
|
||||
|
||||
@property
|
||||
def times_log_path(self):
|
||||
"""Return the times log json file."""
|
||||
@@ -1276,20 +1267,6 @@ def install_test_root(self):
|
||||
"""Return the install test root directory."""
|
||||
return os.path.join(self.metadata_dir, 'test')
|
||||
|
||||
@property
|
||||
def installed(self):
|
||||
msg = ('the "PackageBase.installed" property is deprecated and will be '
|
||||
'removed in Spack v0.19, use "Spec.installed" instead')
|
||||
warnings.warn(msg)
|
||||
return self.spec.installed
|
||||
|
||||
@property
|
||||
def installed_upstream(self):
|
||||
msg = ('the "PackageBase.installed_upstream" property is deprecated and will '
|
||||
'be removed in Spack v0.19, use "Spec.installed_upstream" instead')
|
||||
warnings.warn(msg)
|
||||
return self.spec.installed_upstream
|
||||
|
||||
def _make_fetcher(self):
|
||||
# Construct a composite fetcher that always contains at least
|
||||
# one element (the root package). In case there are resources
|
||||
@@ -1403,7 +1380,7 @@ def is_activated(self, view):
|
||||
if not self.is_extension:
|
||||
raise ValueError(
|
||||
"is_activated called on package that is not an extension.")
|
||||
if self.extendee_spec.installed_upstream:
|
||||
if self.extendee_spec.package.installed_upstream:
|
||||
# If this extends an upstream package, it cannot be activated for
|
||||
# it. This bypasses construction of the extension map, which can
|
||||
# can fail when run in the context of a downstream Spack instance
|
||||
@@ -1429,6 +1406,22 @@ def virtuals_provided(self):
|
||||
return [vspec for vspec, constraints in self.provided.items()
|
||||
if any(self.spec.satisfies(c) for c in constraints)]
|
||||
|
||||
@property
|
||||
def installed(self):
|
||||
"""Installation status of a package.
|
||||
|
||||
Returns:
|
||||
True if the package has been installed, False otherwise.
|
||||
"""
|
||||
try:
|
||||
# If the spec is in the DB, check the installed
|
||||
# attribute of the record
|
||||
return spack.store.db.get_record(self.spec).installed
|
||||
except KeyError:
|
||||
# If the spec is not in the DB, the method
|
||||
# above raises a Key error
|
||||
return False
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
"""Get the prefix into which this package should be installed."""
|
||||
@@ -1677,65 +1670,39 @@ def all_patches(cls):
|
||||
return patches
|
||||
|
||||
def content_hash(self, content=None):
|
||||
"""Create a hash based on the artifacts and patches used to build this package.
|
||||
"""Create a hash based on the sources and logic used to build the
|
||||
package. This includes the contents of all applied patches and the
|
||||
contents of applicable functions in the package subclass."""
|
||||
if not self.spec.concrete:
|
||||
err_msg = ("Cannot invoke content_hash on a package"
|
||||
" if the associated spec is not concrete")
|
||||
raise spack.error.SpackError(err_msg)
|
||||
|
||||
This includes:
|
||||
* source artifacts (tarballs, repositories) used to build;
|
||||
* content hashes (``sha256``'s) of all patches applied by Spack; and
|
||||
* canonicalized contents the ``package.py`` recipe used to build.
|
||||
|
||||
This hash is only included in Spack's DAG hash for concrete specs, but if it
|
||||
happens to be called on a package with an abstract spec, only applicable (i.e.,
|
||||
determinable) portions of the hash will be included.
|
||||
|
||||
"""
|
||||
# list of components to make up the hash
|
||||
hash_content = []
|
||||
|
||||
# source artifacts/repositories
|
||||
# TODO: resources
|
||||
if self.spec.versions.concrete:
|
||||
try:
|
||||
source_id = fs.for_package_version(self, self.version).source_id()
|
||||
except (fs.ExtrapolationError, fs.InvalidArgsError):
|
||||
# ExtrapolationError happens if the package has no fetchers defined.
|
||||
# InvalidArgsError happens when there are version directives with args,
|
||||
# but none of them identifies an actual fetcher.
|
||||
source_id = None
|
||||
|
||||
if not source_id:
|
||||
# TODO? in cases where a digest or source_id isn't available,
|
||||
# should this attempt to download the source and set one? This
|
||||
# probably only happens for source repositories which are
|
||||
# referenced by branch name rather than tag or commit ID.
|
||||
env = spack.environment.active_environment()
|
||||
from_local_sources = env and env.is_develop(self.spec)
|
||||
if not self.spec.external and not from_local_sources:
|
||||
message = 'Missing a source id for {s.name}@{s.version}'
|
||||
tty.warn(message.format(s=self))
|
||||
hash_content.append(''.encode('utf-8'))
|
||||
else:
|
||||
hash_content.append(source_id.encode('utf-8'))
|
||||
|
||||
# patch sha256's
|
||||
# Only include these if they've been assigned by the concretizer.
|
||||
# We check spec._patches_assigned instead of spec.concrete because
|
||||
# we have to call package_hash *before* marking specs concrete
|
||||
if self.spec._patches_assigned():
|
||||
hash_content.extend(
|
||||
':'.join((p.sha256, str(p.level))).encode('utf-8')
|
||||
for p in self.spec.patches
|
||||
)
|
||||
|
||||
# package.py contents
|
||||
hash_content = list()
|
||||
try:
|
||||
source_id = fs.for_package_version(self, self.version).source_id()
|
||||
except fs.ExtrapolationError:
|
||||
source_id = None
|
||||
if not source_id:
|
||||
# TODO? in cases where a digest or source_id isn't available,
|
||||
# should this attempt to download the source and set one? This
|
||||
# probably only happens for source repositories which are
|
||||
# referenced by branch name rather than tag or commit ID.
|
||||
env = spack.environment.active_environment()
|
||||
from_local_sources = env and env.is_develop(self.spec)
|
||||
if not self.spec.external and not from_local_sources:
|
||||
message = 'Missing a source id for {s.name}@{s.version}'
|
||||
tty.warn(message.format(s=self))
|
||||
hash_content.append(''.encode('utf-8'))
|
||||
else:
|
||||
hash_content.append(source_id.encode('utf-8'))
|
||||
hash_content.extend(':'.join((p.sha256, str(p.level))).encode('utf-8')
|
||||
for p in self.spec.patches)
|
||||
hash_content.append(package_hash(self.spec, source=content).encode('utf-8'))
|
||||
|
||||
# put it all together and encode as base32
|
||||
b32_hash = base64.b32encode(
|
||||
hashlib.sha256(
|
||||
bytes().join(sorted(hash_content))
|
||||
).digest()
|
||||
).lower()
|
||||
hashlib.sha256(bytes().join(
|
||||
sorted(hash_content))).digest()).lower()
|
||||
|
||||
# convert from bytes if running python 3
|
||||
if sys.version_info[0] >= 3:
|
||||
@@ -1959,33 +1926,6 @@ def cache_extra_test_sources(self, srcs):
|
||||
fsys.mkdirp(os.path.dirname(dest_path))
|
||||
fsys.copy(src_path, dest_path)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _setup_test(self, verbose, externals):
|
||||
self.test_failures = []
|
||||
if self.test_suite:
|
||||
self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
|
||||
self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
|
||||
pkg_id = self.test_suite.test_pkg_id(self.spec)
|
||||
else:
|
||||
self.test_log_file = fsys.join_path(
|
||||
self.stage.path, _spack_install_test_log)
|
||||
pkg_id = self.spec.format('{name}-{version}-{hash:7}')
|
||||
fsys.touch(self.test_log_file) # Otherwise log_parse complains
|
||||
|
||||
with tty.log.log_output(self.test_log_file, verbose) as logger:
|
||||
with logger.force_echo():
|
||||
tty.msg('Testing package {0}'.format(pkg_id))
|
||||
|
||||
# use debug print levels for log file to record commands
|
||||
old_debug = tty.is_debug()
|
||||
tty.set_debug(True)
|
||||
|
||||
try:
|
||||
yield logger
|
||||
finally:
|
||||
# reset debug level
|
||||
tty.set_debug(old_debug)
|
||||
|
||||
def do_test(self, dirty=False, externals=False):
|
||||
if self.test_requires_compiler:
|
||||
compilers = spack.compilers.compilers_for_spec(
|
||||
@@ -1997,14 +1937,19 @@ def do_test(self, dirty=False, externals=False):
|
||||
self.spec.compiler)
|
||||
return
|
||||
|
||||
# Clear test failures
|
||||
self.test_failures = []
|
||||
self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
|
||||
self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
|
||||
fsys.touch(self.test_log_file) # Otherwise log_parse complains
|
||||
|
||||
kwargs = {
|
||||
'dirty': dirty, 'fake': False, 'context': 'test',
|
||||
'externals': externals
|
||||
}
|
||||
if tty.is_verbose():
|
||||
kwargs['verbose'] = True
|
||||
spack.build_environment.start_build_process(
|
||||
self, test_process, kwargs)
|
||||
spack.build_environment.start_build_process(self, test_process, kwargs)
|
||||
|
||||
def test(self):
|
||||
# Defer tests to virtual and concrete packages
|
||||
@@ -2198,21 +2143,21 @@ def build_log_path(self):
|
||||
to the staging build file until the software is successfully installed,
|
||||
when it points to the file in the installation directory.
|
||||
"""
|
||||
return self.install_log_path if self.spec.installed else self.log_path
|
||||
return self.install_log_path if self.installed else self.log_path
|
||||
|
||||
@classmethod
|
||||
def inject_flags(cls, name, flags):
|
||||
"""
|
||||
flag_handler that injects all flags through the compiler wrapper.
|
||||
"""
|
||||
return flags, None, None
|
||||
return (flags, None, None)
|
||||
|
||||
@classmethod
|
||||
def env_flags(cls, name, flags):
|
||||
"""
|
||||
flag_handler that adds all flags to canonical environment variables.
|
||||
"""
|
||||
return None, flags, None
|
||||
return (None, flags, None)
|
||||
|
||||
@classmethod
|
||||
def build_system_flags(cls, name, flags):
|
||||
@@ -2223,7 +2168,7 @@ def build_system_flags(cls, name, flags):
|
||||
implements it. Currently, AutotoolsPackage and CMakePackage
|
||||
implement it.
|
||||
"""
|
||||
return None, None, flags
|
||||
return (None, None, flags)
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
"""Sets up the build environment for a package.
|
||||
@@ -2378,11 +2323,7 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
|
||||
if not force:
|
||||
dependents = spack.store.db.installed_relatives(
|
||||
spec,
|
||||
direction='parents',
|
||||
transitive=True,
|
||||
deptype=("link", "run"),
|
||||
)
|
||||
spec, 'parents', True)
|
||||
if dependents:
|
||||
raise PackageStillNeededError(spec, dependents)
|
||||
|
||||
@@ -2524,10 +2465,10 @@ def _sanity_check_extension(self):
|
||||
extendee_package = self.extendee_spec.package
|
||||
extendee_package._check_extendable()
|
||||
|
||||
if not self.extendee_spec.installed:
|
||||
if not extendee_package.installed:
|
||||
raise ActivationError(
|
||||
"Can only (de)activate extensions for installed packages.")
|
||||
if not self.spec.installed:
|
||||
if not self.installed:
|
||||
raise ActivationError("Extensions must first be installed.")
|
||||
if self.extendee_spec.name not in self.extendees:
|
||||
raise ActivationError("%s does not extend %s!" %
|
||||
@@ -2753,54 +2694,45 @@ def rpath_args(self):
|
||||
"""
|
||||
return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
|
||||
|
||||
def _run_test_callbacks(self, method_names, callback_type='install'):
|
||||
"""Tries to call all of the listed methods, returning immediately
|
||||
if the list is None."""
|
||||
if method_names is None:
|
||||
return
|
||||
|
||||
fail_fast = spack.config.get('config:fail_fast', False)
|
||||
|
||||
with self._setup_test(verbose=False, externals=False) as logger:
|
||||
# Report running each of the methods in the build log
|
||||
print_test_message(
|
||||
logger, 'Running {0}-time tests'.format(callback_type), True)
|
||||
|
||||
for name in method_names:
|
||||
try:
|
||||
fn = getattr(self, name)
|
||||
|
||||
msg = 'RUN-TESTS: {0}-time tests [{1}]' \
|
||||
.format(callback_type, name),
|
||||
print_test_message(logger, msg, True)
|
||||
|
||||
fn()
|
||||
except AttributeError as e:
|
||||
msg = 'RUN-TESTS: method not implemented [{0}]' \
|
||||
.format(name),
|
||||
print_test_message(logger, msg, True)
|
||||
|
||||
self.test_failures.append((e, msg))
|
||||
if fail_fast:
|
||||
break
|
||||
|
||||
# Raise any collected failures here
|
||||
if self.test_failures:
|
||||
raise TestFailure(self.test_failures)
|
||||
|
||||
@on_package_attributes(run_tests=True)
|
||||
def _run_default_build_time_test_callbacks(self):
|
||||
"""Tries to call all the methods that are listed in the attribute
|
||||
``build_time_test_callbacks`` if ``self.run_tests is True``.
|
||||
|
||||
If ``build_time_test_callbacks is None`` returns immediately.
|
||||
"""
|
||||
self._run_test_callbacks(self.build_time_test_callbacks, 'build')
|
||||
if self.build_time_test_callbacks is None:
|
||||
return
|
||||
|
||||
for name in self.build_time_test_callbacks:
|
||||
try:
|
||||
fn = getattr(self, name)
|
||||
except AttributeError:
|
||||
msg = 'RUN-TESTS: method not implemented [{0}]'
|
||||
tty.warn(msg.format(name))
|
||||
else:
|
||||
tty.msg('RUN-TESTS: build-time tests [{0}]'.format(name))
|
||||
fn()
|
||||
|
||||
@on_package_attributes(run_tests=True)
|
||||
def _run_default_install_time_test_callbacks(self):
|
||||
"""Tries to call all the methods that are listed in the attribute
|
||||
``install_time_test_callbacks`` if ``self.run_tests is True``.
|
||||
|
||||
If ``install_time_test_callbacks is None`` returns immediately.
|
||||
"""
|
||||
self._run_test_callbacks(self.install_time_test_callbacks, 'install')
|
||||
if self.install_time_test_callbacks is None:
|
||||
return
|
||||
|
||||
for name in self.install_time_test_callbacks:
|
||||
try:
|
||||
fn = getattr(self, name)
|
||||
except AttributeError:
|
||||
msg = 'RUN-TESTS: method not implemented [{0}]'
|
||||
tty.warn(msg.format(name))
|
||||
else:
|
||||
tty.msg('RUN-TESTS: install-time tests [{0}]'.format(name))
|
||||
fn()
|
||||
|
||||
|
||||
def has_test_method(pkg):
|
||||
@@ -2825,21 +2757,27 @@ def has_test_method(pkg):
|
||||
def print_test_message(logger, msg, verbose):
|
||||
if verbose:
|
||||
with logger.force_echo():
|
||||
tty.msg(msg)
|
||||
print(msg)
|
||||
else:
|
||||
tty.msg(msg)
|
||||
print(msg)
|
||||
|
||||
|
||||
def test_process(pkg, kwargs):
|
||||
verbose = kwargs.get('verbose', False)
|
||||
externals = kwargs.get('externals', False)
|
||||
with tty.log.log_output(pkg.test_log_file, verbose) as logger:
|
||||
with logger.force_echo():
|
||||
tty.msg('Testing package {0}'
|
||||
.format(pkg.test_suite.test_pkg_id(pkg.spec)))
|
||||
|
||||
with pkg._setup_test(verbose, externals) as logger:
|
||||
if pkg.spec.external and not externals:
|
||||
print_test_message(
|
||||
logger, 'Skipped tests for external package', verbose)
|
||||
print_test_message(logger, 'Skipped external package', verbose)
|
||||
return
|
||||
|
||||
# use debug print levels for log file to record commands
|
||||
old_debug = tty.is_debug()
|
||||
tty.set_debug(True)
|
||||
|
||||
# run test methods from the package and all virtuals it
|
||||
# provides virtuals have to be deduped by name
|
||||
v_names = list(set([vspec.name
|
||||
@@ -2858,7 +2796,8 @@ def test_process(pkg, kwargs):
|
||||
|
||||
ran_actual_test_function = False
|
||||
try:
|
||||
with fsys.working_dir(pkg.test_suite.test_dir_for_spec(pkg.spec)):
|
||||
with fsys.working_dir(
|
||||
pkg.test_suite.test_dir_for_spec(pkg.spec)):
|
||||
for spec in test_specs:
|
||||
pkg.test_suite.current_test_spec = spec
|
||||
# Fail gracefully if a virtual has no package/tests
|
||||
@@ -2900,9 +2839,7 @@ def test_process(pkg, kwargs):
|
||||
|
||||
# Run the tests
|
||||
ran_actual_test_function = True
|
||||
context = logger.force_echo if verbose else nullcontext
|
||||
with context():
|
||||
test_fn(pkg)
|
||||
test_fn(pkg)
|
||||
|
||||
# If fail-fast was on, we error out above
|
||||
# If we collect errors, raise them in batch here
|
||||
@@ -2910,12 +2847,15 @@ def test_process(pkg, kwargs):
|
||||
raise TestFailure(pkg.test_failures)
|
||||
|
||||
finally:
|
||||
# reset debug level
|
||||
tty.set_debug(old_debug)
|
||||
|
||||
# flag the package as having been tested (i.e., ran one or more
|
||||
# non-pass-only methods
|
||||
if ran_actual_test_function:
|
||||
fsys.touch(pkg.tested_file)
|
||||
else:
|
||||
print_test_message(logger, 'No tests to run', verbose)
|
||||
print_test_message(logger, 'No tests to run', verbose)
|
||||
|
||||
|
||||
inject_flags = PackageBase.inject_flags
|
||||
|
||||
@@ -123,11 +123,11 @@ def accept(self, id):
|
||||
|
||||
def next_token_error(self, message):
|
||||
"""Raise an error about the next token in the stream."""
|
||||
raise ParseError(message, self.text[0], self.token.end)
|
||||
raise ParseError(message, self.text, self.token.end)
|
||||
|
||||
def last_token_error(self, message):
|
||||
"""Raise an error about the previous token in the stream."""
|
||||
raise ParseError(message, self.text[0], self.token.start)
|
||||
raise ParseError(message, self.text, self.token.start)
|
||||
|
||||
def unexpected_token(self):
|
||||
self.next_token_error("Unexpected token: '%s'" % self.next.value)
|
||||
|
||||
@@ -43,12 +43,8 @@
|
||||
hooks_path = os.path.join(module_path, "hooks")
|
||||
opt_path = os.path.join(prefix, "opt")
|
||||
share_path = os.path.join(prefix, "share", "spack")
|
||||
etc_path = os.path.join(prefix, "etc", "spack")
|
||||
etc_path = os.path.join(prefix, "etc")
|
||||
|
||||
#
|
||||
# Things in $spack/etc/spack
|
||||
#
|
||||
default_license_dir = os.path.join(etc_path, "licenses")
|
||||
|
||||
#
|
||||
# Things in $spack/var/spack
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
from spack.build_systems.cuda import CudaPackage
|
||||
from spack.build_systems.gnu import GNUMirrorPackage
|
||||
from spack.build_systems.intel import IntelPackage
|
||||
from spack.build_systems.lua import LuaPackage
|
||||
from spack.build_systems.makefile import MakefilePackage
|
||||
from spack.build_systems.maven import MavenPackage
|
||||
from spack.build_systems.meson import MesonPackage
|
||||
|
||||
@@ -873,7 +873,7 @@ def is_relocatable(spec):
|
||||
Raises:
|
||||
ValueError: if the spec is not installed
|
||||
"""
|
||||
if not spec.install_status():
|
||||
if not spec.installed():
|
||||
raise ValueError('spec is not installed [{0}]'.format(str(spec)))
|
||||
|
||||
if spec.external or spec.virtual:
|
||||
|
||||
@@ -355,17 +355,9 @@ def list_packages(rev):
|
||||
ref = rev.replace('...', '')
|
||||
rev = git('merge-base', ref, 'HEAD', output=str).strip()
|
||||
|
||||
output = git('ls-tree', '-r', '--name-only', rev, output=str)
|
||||
|
||||
# recursively list the packages directory
|
||||
package_paths = [
|
||||
line.split(os.sep) for line in output.split("\n") if line.endswith("package.py")
|
||||
]
|
||||
|
||||
# take the directory names with one-level-deep package files
|
||||
package_names = sorted(set([line[0] for line in package_paths if len(line) == 2]))
|
||||
|
||||
return package_names
|
||||
output = git('ls-tree', '--name-only', rev, output=str)
|
||||
return sorted(line for line in output.split('\n')
|
||||
if line and not line.startswith('.'))
|
||||
|
||||
|
||||
def diff_packages(rev1, rev2):
|
||||
|
||||
@@ -112,7 +112,8 @@ def __enter__(self):
|
||||
# Check which specs are already installed and mark them as skipped
|
||||
# only for install_task
|
||||
if self.do_fn == '_install_task':
|
||||
for dep in filter(lambda x: x.installed, input_spec.traverse()):
|
||||
for dep in filter(lambda x: x.package.installed,
|
||||
input_spec.traverse()):
|
||||
package = {
|
||||
'name': dep.name,
|
||||
'id': dep.dag_hash(),
|
||||
@@ -139,7 +140,7 @@ def wrapper(instance, *args, **kwargs):
|
||||
raise Exception
|
||||
|
||||
# We accounted before for what is already installed
|
||||
installed_already = pkg.spec.installed
|
||||
installed_already = pkg.installed
|
||||
|
||||
package = {
|
||||
'name': pkg.name,
|
||||
|
||||
@@ -38,13 +38,13 @@ def rewire(spliced_spec):
|
||||
nodes in the DAG of that spec."""
|
||||
assert spliced_spec.spliced
|
||||
for spec in spliced_spec.traverse(order='post', root=True):
|
||||
if not spec.build_spec.installed:
|
||||
if not spec.build_spec.package.installed:
|
||||
# TODO: May want to change this at least for the root spec...
|
||||
# spec.build_spec.package.do_install(force=True)
|
||||
raise PackageNotInstalledError(spliced_spec,
|
||||
spec.build_spec,
|
||||
spec)
|
||||
if spec.build_spec is not spec and not spec.installed:
|
||||
if spec.build_spec is not spec and not spec.package.installed:
|
||||
explicit = spec is spliced_spec
|
||||
rewire_node(spec, explicit)
|
||||
|
||||
@@ -95,8 +95,7 @@ def rewire_node(spec, explicit):
|
||||
spec.prefix)
|
||||
relocate.relocate_text_bin(binaries=bins_to_relocate,
|
||||
prefixes=prefix_to_prefix)
|
||||
# Copy package into place, except for spec.json (because spec.json
|
||||
# describes the old spec and not the new spliced spec).
|
||||
# copy package into place (shutil.copytree)
|
||||
shutil.copytree(os.path.join(tempdir, spec.dag_hash()), spec.prefix,
|
||||
ignore=shutil.ignore_patterns('spec.json',
|
||||
'install_manifest.json'))
|
||||
@@ -105,10 +104,7 @@ def rewire_node(spec, explicit):
|
||||
spec.build_spec.prefix,
|
||||
spec.prefix)
|
||||
shutil.rmtree(tempdir)
|
||||
# Above, we did not copy spec.json: instead, here we write the new
|
||||
# (spliced) spec into spec.json, without this, Database.add would fail on
|
||||
# the next line (because it checks the spec.json in the prefix against the
|
||||
# spec being added to look for mismatches)
|
||||
# handle all metadata changes; don't copy over spec.json file in .spack/
|
||||
spack.store.layout.write_spec(spec, spack.store.layout.spec_file_path(spec))
|
||||
# add to database, not sure about explicit
|
||||
spack.store.db.add(spec, spack.store.layout, explicit=explicit)
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""This module contains jsonschema files for all of Spack's YAML formats."""
|
||||
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
import llnl.util.lang
|
||||
@@ -51,12 +49,10 @@ def _deprecated_properties(validator, deprecated, instance, schema):
|
||||
msg = msg_str_or_func.format(properties=deprecated_properties)
|
||||
else:
|
||||
msg = msg_str_or_func(instance, deprecated_properties)
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
is_error = deprecated['error']
|
||||
if not is_error:
|
||||
warnings.warn(msg)
|
||||
llnl.util.tty.warn(msg)
|
||||
else:
|
||||
import jsonschema
|
||||
yield jsonschema.ValidationError(msg)
|
||||
|
||||
@@ -14,25 +14,9 @@
|
||||
'type': 'object',
|
||||
'additionalProperties': False,
|
||||
'properties': {
|
||||
'reuse': {'type': 'boolean'},
|
||||
'minimal': {'type': 'boolean'},
|
||||
'targets': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'host_compatible': {'type': 'boolean'},
|
||||
'granularity': {
|
||||
'type': 'string',
|
||||
'enum': ['generic', 'microarchitectures']
|
||||
}
|
||||
}
|
||||
},
|
||||
'unify': {
|
||||
'type': 'boolean'
|
||||
# Todo: add when_possible.
|
||||
# 'oneOf': [
|
||||
# {'type': 'boolean'},
|
||||
# {'type': 'string', 'enum': ['when_possible']}
|
||||
# ]
|
||||
'reuse': {
|
||||
'anyOf': [{'type': 'boolean'},
|
||||
{'type': 'string'}]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,6 @@
|
||||
'type': 'array',
|
||||
'items': {'type': 'string'}
|
||||
},
|
||||
'license_dir': {'type': 'string'},
|
||||
'source_cache': {'type': 'string'},
|
||||
'misc_cache': {'type': 'string'},
|
||||
'connect_timeout': {'type': 'integer', 'minimum': 0},
|
||||
@@ -91,16 +90,7 @@
|
||||
'additional_external_search_paths': {
|
||||
'type': 'array',
|
||||
'items': {'type': 'string'}
|
||||
},
|
||||
'flags': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'keep_werror': {
|
||||
'type': 'string',
|
||||
'enum': ['all', 'specific', 'none'],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
'deprecatedProperties': {
|
||||
'properties': ['module_roots'],
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Schema for Cray descriptive manifest: this describes a set of
|
||||
installed packages on the system and also specifies dependency
|
||||
relationships between them (so this provides more information than
|
||||
external entries in packages configuration).
|
||||
|
||||
This does not specify a configuration - it is an input format
|
||||
that is consumed and transformed into Spack DB records.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"title": "CPE manifest schema",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"_meta": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"file-type": {"type": "string", "minLength": 1},
|
||||
"cpe-version": {"type": "string", "minLength": 1},
|
||||
"system-type": {"type": "string", "minLength": 1},
|
||||
"schema-version": {"type": "string", "minLength": 1},
|
||||
}
|
||||
},
|
||||
"compilers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"name": {"type": "string", "minLength": 1},
|
||||
"version": {"type": "string", "minLength": 1},
|
||||
"prefix": {"type": "string", "minLength": 1},
|
||||
"executables": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"cc": {"type": "string", "minLength": 1},
|
||||
"cxx": {"type": "string", "minLength": 1},
|
||||
"fc": {"type": "string", "minLength": 1}
|
||||
}
|
||||
},
|
||||
"arch": {
|
||||
"type": "object",
|
||||
"required": ["os", "target"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"os": {"type": "string", "minLength": 1},
|
||||
"target": {"type": "string", "minLength": 1}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"specs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"version",
|
||||
"arch",
|
||||
"compiler",
|
||||
"prefix",
|
||||
"hash"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"name": {"type": "string", "minLength": 1},
|
||||
"version": {"type": "string", "minLength": 1},
|
||||
"arch": {
|
||||
"type": "object",
|
||||
"required": ["platform", "platform_os", "target"],
|
||||
"additioanlProperties": False,
|
||||
"properties": {
|
||||
"platform": {"type": "string", "minLength": 1},
|
||||
"platform_os": {"type": "string", "minLength": 1},
|
||||
"target": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["name"],
|
||||
"properties": {
|
||||
"name": {"type": "string", "minLength": 1}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"compiler": {
|
||||
"type": "object",
|
||||
"required": ["name", "version"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"name": {"type": "string", "minLength": 1},
|
||||
"version": {"type": "string", "minLength": 1}
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"\\w[\\w-]*": {
|
||||
"type": "object",
|
||||
"required": ["hash"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"hash": {"type": "string", "minLength": 1},
|
||||
"type": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string", "minLength": 1}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prefix": {
|
||||
"type": "string", "minLength": 1},
|
||||
"rpm": {"type": "string", "minLength": 1},
|
||||
"hash": {"type": "string", "minLength": 1},
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,24 +16,6 @@
|
||||
import spack.schema.packages
|
||||
import spack.schema.projections
|
||||
|
||||
warned_about_concretization = False
|
||||
|
||||
|
||||
def deprecate_concretization(instance, props):
|
||||
global warned_about_concretization
|
||||
if warned_about_concretization:
|
||||
return None
|
||||
# Deprecate `spack:concretization` in favor of `spack:concretizer:unify`.
|
||||
concretization_to_unify = {'together': 'true', 'separately': 'false'}
|
||||
concretization = instance['concretization']
|
||||
unify = concretization_to_unify[concretization]
|
||||
|
||||
return (
|
||||
'concretization:{} is deprecated and will be removed in Spack 0.19 in favor of '
|
||||
'the new concretizer:unify:{} config option.'.format(concretization, unify)
|
||||
)
|
||||
|
||||
|
||||
#: legal first keys in the schema
|
||||
keys = ('spack', 'env')
|
||||
|
||||
@@ -79,11 +61,6 @@ def deprecate_concretization(instance, props):
|
||||
'type': 'object',
|
||||
'default': {},
|
||||
'additionalProperties': False,
|
||||
'deprecatedProperties': {
|
||||
'properties': ['concretization'],
|
||||
'message': deprecate_concretization,
|
||||
'error': False
|
||||
},
|
||||
'properties': union_dicts(
|
||||
# merged configuration scope schemas
|
||||
spack.schema.merged.properties,
|
||||
@@ -192,33 +169,11 @@ def update(data):
|
||||
Returns:
|
||||
True if data was changed, False otherwise
|
||||
"""
|
||||
updated = False
|
||||
if 'include' in data:
|
||||
msg = ("included configuration files should be updated manually"
|
||||
" [files={0}]")
|
||||
warnings.warn(msg.format(', '.join(data['include'])))
|
||||
|
||||
if 'packages' in data:
|
||||
updated |= spack.schema.packages.update(data['packages'])
|
||||
|
||||
# Spack 0.19 drops support for `spack:concretization` in favor of
|
||||
# `spack:concretizer:unify`. Here we provide an upgrade path that changes the former
|
||||
# into the latter, or warns when there's an ambiguity. Note that Spack 0.17 is not
|
||||
# forward compatible with `spack:concretizer:unify`.
|
||||
if 'concretization' in data:
|
||||
has_unify = 'unify' in data.get('concretizer', {})
|
||||
to_unify = {'together': True, 'separately': False}
|
||||
unify = to_unify[data['concretization']]
|
||||
|
||||
if has_unify and data['concretizer']['unify'] != unify:
|
||||
warnings.warn(
|
||||
'The following configuration conflicts: '
|
||||
'`spack:concretization:{}` and `spack:concretizer:unify:{}`'
|
||||
'. Please update manually.'.format(
|
||||
data['concretization'], data['concretizer']['unify']))
|
||||
else:
|
||||
data.update({'concretizer': {'unify': unify}})
|
||||
data.pop('concretization')
|
||||
updated = True
|
||||
|
||||
return updated
|
||||
return spack.schema.packages.update(data['packages'])
|
||||
return False
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
import spack.schema.bootstrap
|
||||
import spack.schema.cdash
|
||||
import spack.schema.compilers
|
||||
import spack.schema.concretizer
|
||||
import spack.schema.config
|
||||
import spack.schema.container
|
||||
import spack.schema.gitlab_ci
|
||||
@@ -28,7 +27,6 @@
|
||||
spack.schema.bootstrap.properties,
|
||||
spack.schema.cdash.properties,
|
||||
spack.schema.compilers.properties,
|
||||
spack.schema.concretizer.properties,
|
||||
spack.schema.config.properties,
|
||||
spack.schema.container.properties,
|
||||
spack.schema.gitlab_ci.properties,
|
||||
|
||||
@@ -110,12 +110,9 @@
|
||||
'properties': {
|
||||
'name': {'type': 'string'},
|
||||
'hash': {'type': 'string'},
|
||||
'package_hash': {'type': 'string'},
|
||||
|
||||
# these hashes were used on some specs prior to 0.18
|
||||
'full_hash': {'type': 'string'},
|
||||
'build_hash': {'type': 'string'},
|
||||
|
||||
'package_hash': {'type': 'string'},
|
||||
'version': {
|
||||
'oneOf': [
|
||||
{'type': 'string'},
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
import itertools
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import types
|
||||
import warnings
|
||||
|
||||
@@ -56,6 +55,14 @@
|
||||
parse_files = None
|
||||
|
||||
|
||||
#: whether we should write ASP unsat cores quickly in debug mode when the cores
|
||||
#: may be very large or take the time (sometimes hours) to minimize them
|
||||
minimize_cores = True
|
||||
|
||||
#: whether we should include all facts in the unsat cores or only error messages
|
||||
full_cores = False
|
||||
|
||||
|
||||
# backward compatibility functions for clingo ASTs
|
||||
def ast_getter(*names):
|
||||
def getter(node):
|
||||
@@ -70,25 +77,20 @@ def getter(node):
|
||||
ast_type = ast_getter("ast_type", "type")
|
||||
ast_sym = ast_getter("symbol", "term")
|
||||
|
||||
#: Order of precedence for version origins. Topmost types are preferred.
|
||||
version_origin_fields = [
|
||||
'spec',
|
||||
'external',
|
||||
'packages_yaml',
|
||||
'package_py',
|
||||
'installed',
|
||||
]
|
||||
|
||||
#: Look up version precedence strings by enum id
|
||||
version_origin_str = {
|
||||
i: name for i, name in enumerate(version_origin_fields)
|
||||
}
|
||||
|
||||
#: Enumeration like object to mark version provenance
|
||||
version_provenance = collections.namedtuple( # type: ignore
|
||||
'VersionProvenance',
|
||||
version_origin_fields,
|
||||
)(**{name: i for i, name in enumerate(version_origin_fields)})
|
||||
'VersionProvenance', ['external', 'packages_yaml', 'package_py', 'spec']
|
||||
)(spec=0, external=1, packages_yaml=2, package_py=3)
|
||||
|
||||
#: String representation of version origins, to emit legible
|
||||
# facts for the ASP solver
|
||||
version_origin_str = {
|
||||
0: 'spec',
|
||||
1: 'external',
|
||||
2: 'packages_yaml',
|
||||
3: 'package_py'
|
||||
}
|
||||
|
||||
#: Named tuple to contain information on declared versions
|
||||
DeclaredVersion = collections.namedtuple(
|
||||
@@ -107,7 +109,7 @@ def getter(node):
|
||||
|
||||
|
||||
def build_criteria_names(costs, tuples):
|
||||
"""Construct an ordered mapping from criteria names to costs."""
|
||||
"""Construct an ordered mapping from criteria names to indices in the cost list."""
|
||||
# pull optimization criteria names out of the solution
|
||||
priorities_names = []
|
||||
|
||||
@@ -134,10 +136,7 @@ def build_criteria_names(costs, tuples):
|
||||
# sort the criteria by priority
|
||||
priorities_names = sorted(priorities_names, reverse=True)
|
||||
|
||||
# We only have opt-criterion values for non-error types
|
||||
# error type criteria are excluded (they come first)
|
||||
error_criteria = len(costs) - len(priorities_names)
|
||||
costs = costs[error_criteria:]
|
||||
assert len(priorities_names) == len(costs), "Wrong number of optimization criteria!"
|
||||
|
||||
# split list into three parts: build criteria, fixed criteria, non-build criteria
|
||||
num_criteria = len(priorities_names)
|
||||
@@ -150,12 +149,12 @@ def build_criteria_names(costs, tuples):
|
||||
# mapping from priority to index in cost list
|
||||
indices = dict((p, i) for i, (p, n) in enumerate(priorities_names))
|
||||
|
||||
# make a list that has each name with its build and non-build costs
|
||||
# make a list that has each name with its build and non-build priority
|
||||
criteria = [
|
||||
(costs[p - fixed_priority_offset + num_build], None, name) for p, name in fixed
|
||||
(p - fixed_priority_offset + num_build, None, name) for p, name in fixed
|
||||
]
|
||||
for (i, name), (b, _) in zip(installed, build):
|
||||
criteria.append((costs[indices[i]], costs[indices[b]], name))
|
||||
criteria.append((indices[i], indices[b], name))
|
||||
|
||||
return criteria
|
||||
|
||||
@@ -327,6 +326,9 @@ def format_core(self, core):
|
||||
core_symbols = []
|
||||
for atom in core:
|
||||
sym = symbols[atom]
|
||||
if sym.name in ("rule", "error"):
|
||||
# these are special symbols we use to get messages in the core
|
||||
sym = sym.arguments[0].string
|
||||
core_symbols.append(sym)
|
||||
|
||||
return sorted(str(symbol) for symbol in core_symbols)
|
||||
@@ -385,7 +387,7 @@ def raise_if_unsat(self):
|
||||
"""
|
||||
Raise an appropriate error if the result is unsatisfiable.
|
||||
|
||||
The error is an InternalConcretizerError, and includes the minimized cores
|
||||
The error is a UnsatisfiableSpecError, and includes the minimized cores
|
||||
resulting from the solve, formatted to be human readable.
|
||||
"""
|
||||
if self.satisfiable:
|
||||
@@ -395,8 +397,12 @@ def raise_if_unsat(self):
|
||||
if len(constraints) == 1:
|
||||
constraints = constraints[0]
|
||||
|
||||
conflicts = self.format_minimal_cores()
|
||||
raise InternalConcretizerError(constraints, conflicts=conflicts)
|
||||
if minimize_cores:
|
||||
conflicts = self.format_minimal_cores()
|
||||
else:
|
||||
conflicts = self.format_cores()
|
||||
|
||||
raise UnsatisfiableSpecError(constraints, conflicts=conflicts)
|
||||
|
||||
@property
|
||||
def specs(self):
|
||||
@@ -496,11 +502,13 @@ def h2(self, name):
|
||||
def newline(self):
|
||||
self.out.write('\n')
|
||||
|
||||
def fact(self, head):
|
||||
def fact(self, head, assumption=False):
|
||||
"""ASP fact (a rule without a body).
|
||||
|
||||
Arguments:
|
||||
head (AspFunction): ASP function to generate as fact
|
||||
assumption (bool): If True and using cores, use this fact as a
|
||||
choice point in ASP and include it in unsatisfiable cores
|
||||
"""
|
||||
symbol = head.symbol() if hasattr(head, 'symbol') else head
|
||||
|
||||
@@ -508,9 +516,10 @@ def fact(self, head):
|
||||
|
||||
atom = self.backend.add_atom(symbol)
|
||||
|
||||
# Only functions relevant for constructing bug reports for bad error messages
|
||||
# are assumptions, and only when using cores.
|
||||
choice = self.cores and symbol.name == 'internal_error'
|
||||
# with `--show-cores=full or --show-cores=minimized, make all facts
|
||||
# choices/assumptions, otherwise only if assumption=True
|
||||
choice = self.cores and (full_cores or assumption)
|
||||
|
||||
self.backend.add_rule([atom], [], choice=choice)
|
||||
if choice:
|
||||
self.assumptions.append(atom)
|
||||
@@ -568,10 +577,9 @@ def visit(node):
|
||||
for term in node.body:
|
||||
if ast_type(term) == ASTType.Literal:
|
||||
if ast_type(term.atom) == ASTType.SymbolicAtom:
|
||||
name = ast_sym(term.atom).name
|
||||
if name == 'internal_error':
|
||||
if ast_sym(term.atom).name == "error":
|
||||
arg = ast_sym(ast_sym(term.atom).arguments[0])
|
||||
self.fact(AspFunction(name)(arg.string))
|
||||
self.fact(fn.error(arg.string), assumption=True)
|
||||
|
||||
path = os.path.join(parent_dir, 'concretize.lp')
|
||||
parse_files([path], visit)
|
||||
@@ -654,7 +662,7 @@ def stringify(x):
|
||||
class SpackSolverSetup(object):
|
||||
"""Class to set up and run a Spack concretization solve."""
|
||||
|
||||
def __init__(self, reuse=None, minimal=None, tests=False):
|
||||
def __init__(self, reuse=False, tests=False):
|
||||
self.gen = None # set by setup()
|
||||
|
||||
self.declared_versions = {}
|
||||
@@ -679,22 +687,24 @@ def __init__(self, reuse=None, minimal=None, tests=False):
|
||||
# Caches to optimize the setup phase of the solver
|
||||
self.target_specs_cache = None
|
||||
|
||||
# Solver paramters that affect setup -- see Solver documentation
|
||||
self.reuse = spack.config.get(
|
||||
"concretizer:reuse", False) if reuse is None else reuse
|
||||
self.minimal = spack.config.get(
|
||||
"concretizer:minimal", False) if minimal is None else minimal
|
||||
# whether to add installed/binary hashes to the solve
|
||||
self.reuse = reuse
|
||||
|
||||
# whether to add installed/binary hashes to the solve
|
||||
self.tests = tests
|
||||
|
||||
def pkg_version_rules(self, pkg):
|
||||
"""Output declared versions of a package.
|
||||
|
||||
This uses self.declared_versions so that we include any versions
|
||||
This uses self.possible_versions so that we include any versions
|
||||
that arise from a spec.
|
||||
"""
|
||||
def key_fn(version):
|
||||
# Origins are sorted by precedence defined in `version_origin_str`,
|
||||
# then by order added.
|
||||
# Origins are sorted by order of importance:
|
||||
# 1. Spec from command line
|
||||
# 2. Externals
|
||||
# 3. Package preferences
|
||||
# 4. Directives in package.py
|
||||
return version.origin, version.idx
|
||||
|
||||
pkg = packagize(pkg)
|
||||
@@ -725,7 +735,7 @@ def spec_versions(self, spec):
|
||||
|
||||
# record all version constraints for later
|
||||
self.version_constraints.add((spec.name, spec.versions))
|
||||
return [fn.node_version_satisfies(spec.name, spec.versions)]
|
||||
return [fn.version_satisfies(spec.name, spec.versions)]
|
||||
|
||||
def target_ranges(self, spec, single_target_fn):
|
||||
target = spec.architecture.target
|
||||
@@ -738,24 +748,13 @@ def target_ranges(self, spec, single_target_fn):
|
||||
return [fn.node_target_satisfies(spec.name, target)]
|
||||
|
||||
def conflict_rules(self, pkg):
|
||||
default_msg = "{0} '{1}' conflicts with '{2}'"
|
||||
no_constraint_msg = "{0} conflicts with '{1}'"
|
||||
for trigger, constraints in pkg.conflicts.items():
|
||||
trigger_msg = "conflict trigger %s" % str(trigger)
|
||||
trigger_id = self.condition(
|
||||
spack.spec.Spec(trigger), name=pkg.name, msg=trigger_msg)
|
||||
trigger_id = self.condition(spack.spec.Spec(trigger), name=pkg.name)
|
||||
self.gen.fact(fn.conflict_trigger(trigger_id))
|
||||
|
||||
for constraint, conflict_msg in constraints:
|
||||
if conflict_msg is None:
|
||||
if constraint == spack.spec.Spec():
|
||||
conflict_msg = no_constraint_msg.format(pkg.name, trigger)
|
||||
else:
|
||||
conflict_msg = default_msg.format(pkg.name, trigger, constraint)
|
||||
constraint_msg = "conflict constraint %s" % str(constraint)
|
||||
constraint_id = self.condition(
|
||||
constraint, name=pkg.name, msg=constraint_msg)
|
||||
self.gen.fact(
|
||||
fn.conflict(pkg.name, trigger_id, constraint_id, conflict_msg))
|
||||
for constraint, _ in constraints:
|
||||
constraint_id = self.condition(constraint, name=pkg.name)
|
||||
self.gen.fact(fn.conflict(pkg.name, trigger_id, constraint_id))
|
||||
self.gen.newline()
|
||||
|
||||
def available_compilers(self):
|
||||
@@ -828,7 +827,7 @@ def package_compiler_defaults(self, pkg):
|
||||
pkg.name, cspec.name, cspec.version, -i * 100
|
||||
))
|
||||
|
||||
def pkg_rules(self, pkg):
|
||||
def pkg_rules(self, pkg, tests):
|
||||
pkg = packagize(pkg)
|
||||
|
||||
# versions
|
||||
@@ -839,18 +838,9 @@ def pkg_rules(self, pkg):
|
||||
for name, entry in sorted(pkg.variants.items()):
|
||||
variant, when = entry
|
||||
|
||||
if spack.spec.Spec() in when:
|
||||
# unconditional variant
|
||||
self.gen.fact(fn.variant(pkg.name, name))
|
||||
else:
|
||||
# conditional variant
|
||||
for w in when:
|
||||
msg = "%s has variant %s" % (pkg.name, name)
|
||||
if str(w):
|
||||
msg += " when %s" % w
|
||||
|
||||
cond_id = self.condition(w, name=pkg.name, msg=msg)
|
||||
self.gen.fact(fn.variant_condition(cond_id, pkg.name, name))
|
||||
for w in when:
|
||||
cond_id = self.condition(w, name=pkg.name)
|
||||
self.gen.fact(fn.variant_condition(cond_id, pkg.name, name))
|
||||
|
||||
single_value = not variant.multi
|
||||
if single_value:
|
||||
@@ -893,9 +883,7 @@ def pkg_rules(self, pkg):
|
||||
imposed = spack.spec.Spec(value.when)
|
||||
imposed.name = pkg.name
|
||||
self.condition(
|
||||
required_spec=required, imposed_spec=imposed, name=pkg.name,
|
||||
msg="%s variant %s value %s when %s" % (
|
||||
pkg.name, name, value, when)
|
||||
required_spec=required, imposed_spec=imposed, name=pkg.name
|
||||
)
|
||||
|
||||
if variant.sticky:
|
||||
@@ -923,7 +911,7 @@ def pkg_rules(self, pkg):
|
||||
)
|
||||
)
|
||||
|
||||
def condition(self, required_spec, imposed_spec=None, name=None, msg=None):
|
||||
def condition(self, required_spec, imposed_spec=None, name=None):
|
||||
"""Generate facts for a dependency or virtual provider condition.
|
||||
|
||||
Arguments:
|
||||
@@ -932,7 +920,7 @@ def condition(self, required_spec, imposed_spec=None, name=None, msg=None):
|
||||
are imposed when this condition is triggered
|
||||
name (str or None): name for `required_spec` (required if
|
||||
required_spec is anonymous, ignored if not)
|
||||
msg (str or None): description of the condition
|
||||
|
||||
Returns:
|
||||
int: id of the condition created by this function
|
||||
"""
|
||||
@@ -941,7 +929,7 @@ def condition(self, required_spec, imposed_spec=None, name=None, msg=None):
|
||||
assert named_cond.name, "must provide name for anonymous condtions!"
|
||||
|
||||
condition_id = next(self._condition_id_counter)
|
||||
self.gen.fact(fn.condition(condition_id, msg))
|
||||
self.gen.fact(fn.condition(condition_id))
|
||||
|
||||
# requirements trigger the condition
|
||||
requirements = self.spec_clauses(
|
||||
@@ -973,8 +961,7 @@ def package_provider_rules(self, pkg):
|
||||
|
||||
for provided, whens in pkg.provided.items():
|
||||
for when in whens:
|
||||
msg = '%s provides %s when %s' % (pkg.name, provided, when)
|
||||
condition_id = self.condition(when, provided, pkg.name, msg)
|
||||
condition_id = self.condition(when, provided, pkg.name)
|
||||
self.gen.fact(fn.provider_condition(
|
||||
condition_id, when.name, provided.name
|
||||
))
|
||||
@@ -998,11 +985,7 @@ def package_dependencies_rules(self, pkg):
|
||||
if not deptypes:
|
||||
continue
|
||||
|
||||
msg = '%s depends on %s' % (pkg.name, dep.spec.name)
|
||||
if cond != spack.spec.Spec():
|
||||
msg += ' when %s' % cond
|
||||
|
||||
condition_id = self.condition(cond, dep.spec, pkg.name, msg)
|
||||
condition_id = self.condition(cond, dep.spec, pkg.name)
|
||||
self.gen.fact(fn.dependency_condition(
|
||||
condition_id, pkg.name, dep.spec.name
|
||||
))
|
||||
@@ -1082,8 +1065,7 @@ def external_packages(self):
|
||||
|
||||
# Declare external conditions with a local index into packages.yaml
|
||||
for local_idx, spec in enumerate(external_specs):
|
||||
msg = '%s available as external when satisfying %s' % (spec.name, spec)
|
||||
condition_id = self.condition(spec, msg=msg)
|
||||
condition_id = self.condition(spec)
|
||||
self.gen.fact(
|
||||
fn.possible_external(condition_id, pkg_name, local_idx)
|
||||
)
|
||||
@@ -1165,14 +1147,7 @@ def spec_clauses(self, *args, **kwargs):
|
||||
raise RuntimeError(msg)
|
||||
return clauses
|
||||
|
||||
def _spec_clauses(
|
||||
self,
|
||||
spec,
|
||||
body=False,
|
||||
transitive=True,
|
||||
expand_hashes=False,
|
||||
concrete_build_deps=False,
|
||||
):
|
||||
def _spec_clauses(self, spec, body=False, transitive=True, expand_hashes=False):
|
||||
"""Return a list of clauses for a spec mandates are true.
|
||||
|
||||
Arguments:
|
||||
@@ -1183,8 +1158,6 @@ def _spec_clauses(
|
||||
dependencies (default True)
|
||||
expand_hashes (bool): if True, descend into hashes of concrete specs
|
||||
(default False)
|
||||
concrete_build_deps (bool): if False, do not include pure build deps
|
||||
of concrete specs (as they have no effect on runtime constraints)
|
||||
|
||||
Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates
|
||||
hashes for the dependency requirements of concrete specs. If ``expand_hashes``
|
||||
@@ -1292,34 +1265,18 @@ class Body(object):
|
||||
|
||||
# add all clauses from dependencies
|
||||
if transitive:
|
||||
# TODO: Eventually distinguish 2 deps on the same pkg (build and link)
|
||||
for dspec in spec.edges_to_dependencies():
|
||||
dep = dspec.spec
|
||||
if spec.concrete:
|
||||
# TODO: We need to distinguish 2 specs from the same package later
|
||||
for edge in spec.edges_to_dependencies():
|
||||
for dtype in edge.deptypes:
|
||||
clauses.append(fn.depends_on(spec.name, edge.spec.name, dtype))
|
||||
|
||||
for dep in spec.traverse(root=False):
|
||||
if spec.concrete:
|
||||
# We know dependencies are real for concrete specs. For abstract
|
||||
# specs they just mean the dep is somehow in the DAG.
|
||||
for dtype in dspec.deptypes:
|
||||
# skip build dependencies of already-installed specs
|
||||
if concrete_build_deps or dtype != "build":
|
||||
clauses.append(fn.depends_on(spec.name, dep.name, dtype))
|
||||
|
||||
# imposing hash constraints for all but pure build deps of
|
||||
# already-installed concrete specs.
|
||||
if concrete_build_deps or dspec.deptypes != ("build",):
|
||||
clauses.append(fn.hash(dep.name, dep.dag_hash()))
|
||||
|
||||
# if the spec is abstract, descend into dependencies.
|
||||
# if it's concrete, then the hashes above take care of dependency
|
||||
# constraints, but expand the hashes if asked for.
|
||||
clauses.append(fn.hash(dep.name, dep.dag_hash()))
|
||||
if not spec.concrete or expand_hashes:
|
||||
clauses.extend(
|
||||
self._spec_clauses(
|
||||
dep,
|
||||
body=body,
|
||||
expand_hashes=expand_hashes,
|
||||
concrete_build_deps=concrete_build_deps,
|
||||
)
|
||||
self._spec_clauses(dep, body, transitive=False)
|
||||
)
|
||||
|
||||
return clauses
|
||||
@@ -1454,48 +1411,23 @@ def target_defaults(self, specs):
|
||||
|
||||
self.gen.h2('Target compatibility')
|
||||
|
||||
# Construct the list of targets which are compatible with the host
|
||||
candidate_targets = [uarch] + uarch.ancestors
|
||||
|
||||
# Get configuration options
|
||||
granularity = spack.config.get('concretizer:targets:granularity')
|
||||
host_compatible = spack.config.get('concretizer:targets:host_compatible')
|
||||
|
||||
# Add targets which are not compatible with the current host
|
||||
if not host_compatible:
|
||||
additional_targets_in_family = sorted([
|
||||
t for t in archspec.cpu.TARGETS.values()
|
||||
if (t.family.name == uarch.family.name and
|
||||
t not in candidate_targets)
|
||||
], key=lambda x: len(x.ancestors), reverse=True)
|
||||
candidate_targets += additional_targets_in_family
|
||||
|
||||
# Check if we want only generic architecture
|
||||
if granularity == 'generic':
|
||||
candidate_targets = [t for t in candidate_targets if t.vendor == 'generic']
|
||||
|
||||
compatible_targets = [uarch] + uarch.ancestors
|
||||
additional_targets_in_family = sorted([
|
||||
t for t in archspec.cpu.TARGETS.values()
|
||||
if (t.family.name == uarch.family.name and
|
||||
t not in compatible_targets)
|
||||
], key=lambda x: len(x.ancestors), reverse=True)
|
||||
compatible_targets += additional_targets_in_family
|
||||
compilers = self.possible_compilers
|
||||
|
||||
# Add targets explicitly requested from specs
|
||||
for spec in specs:
|
||||
if not spec.architecture or not spec.architecture.target:
|
||||
continue
|
||||
|
||||
target = archspec.cpu.TARGETS.get(spec.target.name)
|
||||
if not target:
|
||||
self.target_ranges(spec, None)
|
||||
continue
|
||||
|
||||
if target not in candidate_targets and not host_compatible:
|
||||
candidate_targets.append(target)
|
||||
for ancestor in target.ancestors:
|
||||
if ancestor not in candidate_targets:
|
||||
candidate_targets.append(ancestor)
|
||||
|
||||
# this loop can be used to limit the number of targets
|
||||
# considered. Right now we consider them all, but it seems that
|
||||
# many targets can make things slow.
|
||||
# TODO: investigate this.
|
||||
best_targets = set([uarch.family.name])
|
||||
for compiler in sorted(compilers):
|
||||
supported = self._supported_targets(
|
||||
compiler.name, compiler.version, candidate_targets
|
||||
compiler.name, compiler.version, compatible_targets
|
||||
)
|
||||
|
||||
# If we can't find supported targets it may be due to custom
|
||||
@@ -1508,7 +1440,7 @@ def target_defaults(self, specs):
|
||||
supported = self._supported_targets(
|
||||
compiler.name,
|
||||
compiler_obj.real_version,
|
||||
candidate_targets
|
||||
compatible_targets
|
||||
)
|
||||
|
||||
if not supported:
|
||||
@@ -1524,8 +1456,21 @@ def target_defaults(self, specs):
|
||||
compiler.name, compiler.version, uarch.family.name
|
||||
))
|
||||
|
||||
# add any targets explicitly mentioned in specs
|
||||
for spec in specs:
|
||||
if not spec.architecture or not spec.architecture.target:
|
||||
continue
|
||||
|
||||
target = archspec.cpu.TARGETS.get(spec.target.name)
|
||||
if not target:
|
||||
self.target_ranges(spec, None)
|
||||
continue
|
||||
|
||||
if target not in compatible_targets:
|
||||
compatible_targets.append(target)
|
||||
|
||||
i = 0
|
||||
for target in candidate_targets:
|
||||
for target in compatible_targets:
|
||||
self.gen.fact(fn.target(target.name))
|
||||
self.gen.fact(fn.target_family(target.name, target.family.name))
|
||||
for parent in sorted(target.parents):
|
||||
@@ -1567,12 +1512,9 @@ def generate_possible_compilers(self, specs):
|
||||
continue
|
||||
|
||||
if strict and s.compiler not in cspecs:
|
||||
if not s.concrete:
|
||||
raise spack.concretize.UnavailableCompilerVersionError(
|
||||
s.compiler
|
||||
)
|
||||
# Allow unknown compilers to exist if the associated spec
|
||||
# is already built
|
||||
raise spack.concretize.UnavailableCompilerVersionError(
|
||||
s.compiler
|
||||
)
|
||||
else:
|
||||
cspecs.add(s.compiler)
|
||||
self.gen.fact(fn.allow_compiler(
|
||||
@@ -1703,12 +1645,6 @@ def _facts_from_concrete_spec(self, spec, possible):
|
||||
# be dependencies (don't tell it about the others)
|
||||
h = spec.dag_hash()
|
||||
if spec.name in possible and h not in self.seen_hashes:
|
||||
try:
|
||||
# Only consider installed packages for repo we know
|
||||
spack.repo.path.get(spec)
|
||||
except (spack.repo.UnknownNamespaceError, spack.repo.UnknownPackageError):
|
||||
return
|
||||
|
||||
# this indicates that there is a spec like this installed
|
||||
self.gen.fact(fn.installed_hash(spec.name, h))
|
||||
|
||||
@@ -1716,16 +1652,8 @@ def _facts_from_concrete_spec(self, spec, possible):
|
||||
self.impose(h, spec, body=True)
|
||||
self.gen.newline()
|
||||
|
||||
# Declare as possible parts of specs that are not in package.py
|
||||
# - Add versions to possible versions
|
||||
# - Add OS to possible OS's
|
||||
# add OS to possible OS's
|
||||
for dep in spec.traverse():
|
||||
self.possible_versions[dep.name].add(dep.version)
|
||||
self.declared_versions[dep.name].append(DeclaredVersion(
|
||||
version=dep.version,
|
||||
idx=0,
|
||||
origin=version_provenance.installed
|
||||
))
|
||||
self.possible_oses.add(dep.os)
|
||||
|
||||
# add the hash to the one seen so far
|
||||
@@ -1790,7 +1718,7 @@ def setup(self, driver, specs):
|
||||
|
||||
# Fail if we already know an unreachable node is requested
|
||||
for spec in specs:
|
||||
missing_deps = [str(d) for d in spec.traverse()
|
||||
missing_deps = [d for d in spec.traverse()
|
||||
if d.name not in possible and not d.virtual]
|
||||
if missing_deps:
|
||||
raise spack.spec.InvalidDependencyError(spec.name, missing_deps)
|
||||
@@ -1810,14 +1738,13 @@ def setup(self, driver, specs):
|
||||
self.gen.h1("Concrete input spec definitions")
|
||||
self.define_concrete_input_specs(specs, possible)
|
||||
|
||||
self.gen.h1("Concretizer options")
|
||||
if self.reuse:
|
||||
self.gen.fact(fn.optimize_for_reuse())
|
||||
if self.minimal:
|
||||
self.gen.fact(fn.minimal_installs())
|
||||
|
||||
if self.reuse:
|
||||
self.gen.h1("Installed packages")
|
||||
if self.reuse is True:
|
||||
self.gen.fact(fn.binary_package_manager())
|
||||
else:
|
||||
self.gen.fact(fn.optimize_for_reuse())
|
||||
self.gen.newline()
|
||||
self.define_installed_packages(specs, possible)
|
||||
|
||||
self.gen.h1('General Constraints')
|
||||
@@ -1838,7 +1765,7 @@ def setup(self, driver, specs):
|
||||
self.gen.h1('Package Constraints')
|
||||
for pkg in sorted(pkgs):
|
||||
self.gen.h2('Package rules: %s' % pkg)
|
||||
self.pkg_rules(pkg)
|
||||
self.pkg_rules(pkg, tests=self.tests)
|
||||
self.gen.h2('Package preferences: %s' % pkg)
|
||||
self.preferred_variants(pkg)
|
||||
self.preferred_targets(pkg)
|
||||
@@ -1857,14 +1784,12 @@ def setup(self, driver, specs):
|
||||
fn.virtual_root(spec.name) if spec.virtual
|
||||
else fn.root(spec.name)
|
||||
)
|
||||
|
||||
for clause in self.spec_clauses(spec):
|
||||
self.gen.fact(clause)
|
||||
if clause.name == 'variant_set':
|
||||
self.gen.fact(
|
||||
fn.variant_default_value_from_cli(*clause.args)
|
||||
)
|
||||
|
||||
self.gen.fact(fn.variant_default_value_from_cli(
|
||||
*clause.args
|
||||
))
|
||||
self.gen.h1("Variant Values defined in specs")
|
||||
self.define_variant_values()
|
||||
|
||||
@@ -1887,7 +1812,6 @@ class SpecBuilder(object):
|
||||
ignored_attributes = ["opt_criterion"]
|
||||
|
||||
def __init__(self, specs):
|
||||
self._specs = {}
|
||||
self._result = None
|
||||
self._command_line_specs = specs
|
||||
self._flag_sources = collections.defaultdict(lambda: set())
|
||||
@@ -1940,17 +1864,6 @@ def node_os(self, pkg, os):
|
||||
def node_target(self, pkg, target):
|
||||
self._arch(pkg).target = target
|
||||
|
||||
def error(self, priority, msg, *args):
|
||||
msg = msg.format(*args)
|
||||
|
||||
# For variant formatting, we sometimes have to construct specs
|
||||
# to format values properly. Find/replace all occurances of
|
||||
# Spec(...) with the string representation of the spec mentioned
|
||||
specs_to_construct = re.findall(r'Spec\(([^)]*)\)', msg)
|
||||
for spec_str in specs_to_construct:
|
||||
msg = msg.replace('Spec(%s)' % spec_str, str(spack.spec.Spec(spec_str)))
|
||||
raise UnsatisfiableSpecError(msg)
|
||||
|
||||
def variant_value(self, pkg, name, value):
|
||||
# FIXME: is there a way not to special case 'dev_path' everywhere?
|
||||
if name == 'dev_path':
|
||||
@@ -2073,27 +1986,15 @@ def deprecated(self, pkg, version):
|
||||
msg = 'using "{0}@{1}" which is a deprecated version'
|
||||
tty.warn(msg.format(pkg, version))
|
||||
|
||||
@staticmethod
|
||||
def sort_fn(function_tuple):
|
||||
name = function_tuple[0]
|
||||
if name == 'error':
|
||||
priority = function_tuple[1][0]
|
||||
return (-4, priority)
|
||||
elif name == 'hash':
|
||||
return (-3, 0)
|
||||
elif name == 'node':
|
||||
return (-2, 0)
|
||||
elif name == 'node_compiler':
|
||||
return (-1, 0)
|
||||
else:
|
||||
return (0, 0)
|
||||
|
||||
def build_specs(self, function_tuples):
|
||||
# Functions don't seem to be in particular order in output. Sort
|
||||
# them here so that directives that build objects (like node and
|
||||
# node_compiler) are called in the right order.
|
||||
self.function_tuples = function_tuples
|
||||
self.function_tuples.sort(key=self.sort_fn)
|
||||
function_tuples.sort(key=lambda f: {
|
||||
"hash": -3,
|
||||
"node": -2,
|
||||
"node_compiler": -1,
|
||||
}.get(f[0], 0))
|
||||
|
||||
self._specs = {}
|
||||
for name, args in function_tuples:
|
||||
@@ -2101,6 +2002,7 @@ def build_specs(self, function_tuples):
|
||||
continue
|
||||
|
||||
action = getattr(self, name, None)
|
||||
|
||||
# print out unknown actions so we can display them for debugging
|
||||
if not action:
|
||||
msg = "%s(%s)" % (name, ", ".join(str(a) for a in args))
|
||||
@@ -2110,26 +2012,22 @@ def build_specs(self, function_tuples):
|
||||
assert action and callable(action)
|
||||
|
||||
# ignore predicates on virtual packages, as they're used for
|
||||
# solving but don't construct anything. Do not ignore error
|
||||
# predicates on virtual packages.
|
||||
if name != 'error':
|
||||
pkg = args[0]
|
||||
if spack.repo.path.is_virtual(pkg):
|
||||
continue
|
||||
# solving but don't construct anything
|
||||
pkg = args[0]
|
||||
if spack.repo.path.is_virtual(pkg):
|
||||
continue
|
||||
|
||||
# if we've already gotten a concrete spec for this pkg,
|
||||
# do not bother calling actions on it.
|
||||
spec = self._specs.get(pkg)
|
||||
if spec and spec.concrete:
|
||||
continue
|
||||
# if we've already gotten a concrete spec for this pkg,
|
||||
# do not bother calling actions on it.
|
||||
spec = self._specs.get(pkg)
|
||||
if spec and spec.concrete:
|
||||
continue
|
||||
|
||||
action(*args)
|
||||
|
||||
# namespace assignment is done after the fact, as it is not
|
||||
# currently part of the solve
|
||||
for spec in self._specs.values():
|
||||
if spec.namespace:
|
||||
continue
|
||||
repo = spack.repo.path.repo_for_pkg(spec)
|
||||
spec.namespace = repo.namespace
|
||||
|
||||
@@ -2139,7 +2037,7 @@ def build_specs(self, function_tuples):
|
||||
# inject patches -- note that we' can't use set() to unique the
|
||||
# roots here, because the specs aren't complete, and the hash
|
||||
# function will loop forever.
|
||||
roots = [spec.root for spec in self._specs.values() if not spec.root.installed]
|
||||
roots = [spec.root for spec in self._specs.values()]
|
||||
roots = dict((id(r), r) for r in roots)
|
||||
for root in roots.values():
|
||||
spack.spec.Spec.inject_patches_variant(root)
|
||||
@@ -2151,9 +2049,8 @@ def build_specs(self, function_tuples):
|
||||
for s in self._specs.values():
|
||||
_develop_specs_from_env(s, ev.active_environment())
|
||||
|
||||
# mark concrete and assign hashes to all specs in the solve
|
||||
for root in roots.values():
|
||||
root._finalize_concretization()
|
||||
for s in self._specs.values():
|
||||
s._mark_concrete()
|
||||
|
||||
for s in self._specs.values():
|
||||
spack.spec.Spec.ensure_no_deprecated(s)
|
||||
@@ -2193,13 +2090,9 @@ class Solver(object):
|
||||
|
||||
Properties of interest:
|
||||
|
||||
``reuse (bool)``
|
||||
``reuse (bool or str)``
|
||||
Whether to try to reuse existing installs/binaries
|
||||
|
||||
``minimal (bool)``
|
||||
If ``True`` make minimizing nodes the top priority, even higher
|
||||
than defaults from packages and preferences.
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self.driver = PyclingoDriver()
|
||||
@@ -2207,7 +2100,6 @@ def __init__(self):
|
||||
# These properties are settable via spack configuration, and overridable
|
||||
# by setting them directly as properties.
|
||||
self.reuse = spack.config.get("concretizer:reuse", False)
|
||||
self.minimal = spack.config.get("concretizer:minimal", False)
|
||||
|
||||
def solve(
|
||||
self,
|
||||
@@ -2238,7 +2130,7 @@ def solve(
|
||||
continue
|
||||
spack.spec.Spec.ensure_valid_variants(s)
|
||||
|
||||
setup = SpackSolverSetup(reuse=self.reuse, minimal=self.minimal, tests=tests)
|
||||
setup = SpackSolverSetup(reuse=self.reuse, tests=tests)
|
||||
return self.driver.solve(
|
||||
setup,
|
||||
specs,
|
||||
@@ -2251,27 +2143,25 @@ def solve(
|
||||
|
||||
|
||||
class UnsatisfiableSpecError(spack.error.UnsatisfiableSpecError):
|
||||
"""
|
||||
Subclass for new constructor signature for new concretizer
|
||||
"""
|
||||
def __init__(self, msg):
|
||||
super(spack.error.UnsatisfiableSpecError, self).__init__(msg)
|
||||
self.provided = None
|
||||
self.required = None
|
||||
self.constraint_type = None
|
||||
|
||||
|
||||
class InternalConcretizerError(spack.error.UnsatisfiableSpecError):
|
||||
"""
|
||||
Subclass for new constructor signature for new concretizer
|
||||
"""
|
||||
def __init__(self, provided, conflicts):
|
||||
indented = [' %s\n' % conflict for conflict in conflicts]
|
||||
error_msg = ''.join(indented)
|
||||
msg = 'Spack concretizer internal error. Please submit a bug report'
|
||||
msg += '\n Please include the command, environment if applicable,'
|
||||
msg += '\n and the following error message.'
|
||||
msg = '\n %s is unsatisfiable, errors are:\n%s' % (provided, error_msg)
|
||||
conflict_msg = ''.join(indented)
|
||||
issue = 'conflicts' if full_cores else 'errors'
|
||||
msg = '%s is unsatisfiable, %s are:\n%s' % (provided, issue, conflict_msg)
|
||||
|
||||
newline_indent = '\n '
|
||||
if not full_cores:
|
||||
msg += newline_indent + 'To see full clingo unsat cores, '
|
||||
msg += 're-run with `spack --show-cores=full`'
|
||||
if not minimize_cores or not full_cores:
|
||||
# not solver.minimalize_cores and not solver.full_cores impossible
|
||||
msg += newline_indent + 'For full, subset-minimal unsat cores, '
|
||||
msg += 're-run with `spack --show-cores=minimized'
|
||||
msg += newline_indent
|
||||
msg += 'Warning: This may take (up to) hours for some specs'
|
||||
|
||||
super(spack.error.UnsatisfiableSpecError, self).__init__(msg)
|
||||
|
||||
|
||||
@@ -7,6 +7,22 @@
|
||||
% This logic program implements Spack's concretizer
|
||||
%=============================================================================
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Generic constraints on nodes
|
||||
%-----------------------------------------------------------------------------
|
||||
|
||||
% each node must have a single version
|
||||
:- not 1 { version(Package, _) } 1, node(Package).
|
||||
|
||||
% each node must have a single platform, os and target
|
||||
:- not 1 { node_platform(Package, _) } 1, node(Package), error("A node must have exactly one platform").
|
||||
:- not 1 { node_os(Package, _) } 1, node(Package).
|
||||
:- not 1 { node_target(Package, _) } 1, node(Package).
|
||||
|
||||
% each node has a single compiler associated with it
|
||||
:- not 1 { node_compiler(Package, _) } 1, node(Package).
|
||||
:- not 1 { node_compiler_version(Package, _, _) } 1, node(Package).
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Version semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
@@ -19,40 +35,18 @@ version_declared(Package, Version, Weight) :- version_declared(Package, Version,
|
||||
:- version_declared(Package, Version, Weight, Origin1),
|
||||
version_declared(Package, Version, Weight, Origin2),
|
||||
Origin1 < Origin2,
|
||||
internal_error("Two versions with identical weights").
|
||||
|
||||
% We cannot use a version declared for an installed package if we end up building it
|
||||
:- version_declared(Package, Version, Weight, "installed"),
|
||||
version(Package, Version),
|
||||
version_weight(Package, Weight),
|
||||
not hash(Package, _).
|
||||
error("Internal error: two versions with identical weights").
|
||||
|
||||
% versions are declared w/priority -- declared with priority implies declared
|
||||
version_declared(Package, Version) :- version_declared(Package, Version, _).
|
||||
|
||||
% If something is a package, it has only one version and that must be a
|
||||
% declared version.
|
||||
% We allow clingo to choose any version(s), and infer an error if there
|
||||
% is not precisely one version chosen. Error facts are heavily optimized
|
||||
% against to ensure they cannot be inferred when a non-error solution is
|
||||
% possible
|
||||
{ version(Package, Version) : version_declared(Package, Version) }
|
||||
:- node(Package).
|
||||
error(2, "No version for '{0}' satisfies '@{1}' and '@{2}'", Package, Version1, Version2)
|
||||
:- node(Package),
|
||||
version(Package, Version1),
|
||||
version(Package, Version2),
|
||||
Version1 < Version2. % see[1]
|
||||
1 { version(Package, Version) : version_declared(Package, Version) } 1
|
||||
:- node(Package), error("Each node must have exactly one version").
|
||||
|
||||
error(2, "No versions available for package '{0}'", Package)
|
||||
:- node(Package), not version(Package, _).
|
||||
|
||||
% A virtual package may or may not have a version, but never has more than one
|
||||
error(2, "No version for '{0}' satisfies '@{1}' and '@{2}'", Virtual, Version1, Version2)
|
||||
:- virtual_node(Virtual),
|
||||
version(Virtual, Version1),
|
||||
version(Virtual, Version2),
|
||||
Version1 < Version2. % see[1]
|
||||
% A virtual package may have or not a version, but never has more than one
|
||||
:- virtual_node(Package), 2 { version(Package, _) }.
|
||||
|
||||
% If we select a deprecated version, mark the package as deprecated
|
||||
deprecated(Package, Version) :- version(Package, Version), deprecated_version(Package, Version).
|
||||
@@ -61,27 +55,14 @@ possible_version_weight(Package, Weight)
|
||||
:- version(Package, Version),
|
||||
version_declared(Package, Version, Weight).
|
||||
|
||||
version_weight(Package, Weight)
|
||||
:- version(Package, Version),
|
||||
node(Package),
|
||||
Weight = #min{W : version_declared(Package, Version, W)}.
|
||||
1 { version_weight(Package, Weight) : possible_version_weight(Package, Weight) } 1 :- node(Package), error("Internal error: Package version must have a unique weight").
|
||||
|
||||
% node_version_satisfies implies that exactly one of the satisfying versions
|
||||
% version_satisfies implies that exactly one of the satisfying versions
|
||||
% is the package's version, and vice versa.
|
||||
% While this choice rule appears redundant with the initial choice rule for
|
||||
% versions, virtual nodes with version constraints require this rule to be
|
||||
% able to choose versions
|
||||
{ version(Package, Version) : version_satisfies(Package, Constraint, Version) }
|
||||
:- node_version_satisfies(Package, Constraint).
|
||||
|
||||
% More specific error message if the version cannot satisfy some constraint
|
||||
% Otherwise covered by `no_version_error` and `versions_conflict_error`.
|
||||
error(1, "No valid version for '{0}' satisfies '@{1}'", Package, Constraint)
|
||||
:- node_version_satisfies(Package, Constraint),
|
||||
C = #count{ Version : version(Package, Version), version_satisfies(Package, Constraint, Version)},
|
||||
C < 1.
|
||||
|
||||
node_version_satisfies(Package, Constraint)
|
||||
1 { version(Package, Version) : version_satisfies(Package, Constraint, Version) } 1
|
||||
:- version_satisfies(Package, Constraint),
|
||||
error("no version satisfies the given constraints").
|
||||
version_satisfies(Package, Constraint)
|
||||
:- version(Package, Version), version_satisfies(Package, Constraint, Version).
|
||||
|
||||
#defined version_satisfies/3.
|
||||
@@ -100,7 +81,7 @@ node_version_satisfies(Package, Constraint)
|
||||
% conditions are specified with `condition_requirement` and hold when
|
||||
% corresponding spec attributes hold.
|
||||
condition_holds(ID) :-
|
||||
condition(ID, _);
|
||||
condition(ID);
|
||||
attr(Name, A1) : condition_requirement(ID, Name, A1);
|
||||
attr(Name, A1, A2) : condition_requirement(ID, Name, A1, A2);
|
||||
attr(Name, A1, A2, A3) : condition_requirement(ID, Name, A1, A2, A3).
|
||||
@@ -119,12 +100,7 @@ attr(Name, A1, A2, A3) :- impose(ID), imposed_constraint(ID, Name, A1, A2, A3).
|
||||
variant_value(Package, Variant, Value),
|
||||
not imposed_constraint(Hash, "variant_value", Package, Variant, Value).
|
||||
|
||||
% we cannot have additional flag values when we are working with concrete specs
|
||||
:- node(Package), hash(Package, Hash),
|
||||
node_flag(Package, FlagType, Flag),
|
||||
not imposed_constraint(Hash, "node_flag", Package, FlagType, Flag).
|
||||
|
||||
#defined condition/2.
|
||||
#defined condition/1.
|
||||
#defined condition_requirement/3.
|
||||
#defined condition_requirement/4.
|
||||
#defined condition_requirement/5.
|
||||
@@ -151,9 +127,9 @@ depends_on(Package, Dependency) :- depends_on(Package, Dependency, _).
|
||||
dependency_holds(Package, Dependency, Type) :-
|
||||
dependency_condition(ID, Package, Dependency),
|
||||
dependency_type(ID, Type),
|
||||
condition_holds(ID),
|
||||
build(Package),
|
||||
not external(Package),
|
||||
condition_holds(ID).
|
||||
not external(Package).
|
||||
|
||||
% We cut off dependencies of externals (as we don't really know them).
|
||||
% Don't impose constraints on dependencies that don't exist.
|
||||
@@ -179,18 +155,17 @@ node(Dependency) :- node(Package), depends_on(Package, Dependency).
|
||||
% dependencies) and get a two-node unconnected graph
|
||||
needed(Package) :- root(Package).
|
||||
needed(Dependency) :- needed(Package), depends_on(Package, Dependency).
|
||||
error(1, "'{0}' is not a valid dependency for any package in the DAG", Package)
|
||||
:- node(Package),
|
||||
not needed(Package).
|
||||
:- node(Package), not needed(Package),
|
||||
error("All dependencies must be reachable from root").
|
||||
|
||||
% Avoid cycles in the DAG
|
||||
% some combinations of conditional dependencies can result in cycles;
|
||||
% this ensures that we solve around them
|
||||
path(Parent, Child) :- depends_on(Parent, Child).
|
||||
path(Parent, Descendant) :- path(Parent, A), depends_on(A, Descendant).
|
||||
error(2, "Cyclic dependency detected between '{0}' and '{1}'\n Consider changing variants to avoid the cycle", A, B)
|
||||
:- path(A, B),
|
||||
path(B, A).
|
||||
:- path(A, B), path(B, A), error("Cyclic dependencies are not allowed").
|
||||
|
||||
#defined error/1.
|
||||
|
||||
#defined dependency_type/2.
|
||||
#defined dependency_condition/3.
|
||||
@@ -198,13 +173,14 @@ error(2, "Cyclic dependency detected between '{0}' and '{1}'\n Consider chang
|
||||
%-----------------------------------------------------------------------------
|
||||
% Conflicts
|
||||
%-----------------------------------------------------------------------------
|
||||
error(0, Msg) :- node(Package),
|
||||
conflict(Package, TriggerID, ConstraintID, Msg),
|
||||
:- node(Package),
|
||||
conflict(Package, TriggerID, ConstraintID),
|
||||
condition_holds(TriggerID),
|
||||
condition_holds(ConstraintID),
|
||||
not external(Package). % ignore conflicts for externals
|
||||
not external(Package), % ignore conflicts for externals
|
||||
error("A conflict was triggered").
|
||||
|
||||
#defined conflict/4.
|
||||
#defined conflict/3.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Virtual dependencies
|
||||
@@ -224,17 +200,8 @@ virtual_node(Virtual)
|
||||
|
||||
% If there's a virtual node, we must select one and only one provider.
|
||||
% The provider must be selected among the possible providers.
|
||||
{ provider(Package, Virtual) : possible_provider(Package, Virtual) }
|
||||
:- virtual_node(Virtual).
|
||||
error(2, "Cannot find valid provider for virtual {0}", Virtual)
|
||||
:- virtual_node(Virtual),
|
||||
P = #count{ Package : provider(Package, Virtual)},
|
||||
P < 1.
|
||||
error(2, "Spec cannot include multiple providers for virtual '{0}'\n Requested '{1}' and '{2}'", Virtual, P1, P2)
|
||||
:- virtual_node(Virtual),
|
||||
provider(P1, Virtual),
|
||||
provider(P2, Virtual),
|
||||
P1 < P2.
|
||||
1 { provider(Package, Virtual) : possible_provider(Package, Virtual) } 1
|
||||
:- virtual_node(Virtual), error("Virtual packages must be satisfied by a unique provider").
|
||||
|
||||
% virtual roots imply virtual nodes, and that one provider is a root
|
||||
virtual_node(Virtual) :- virtual_root(Virtual).
|
||||
@@ -259,7 +226,7 @@ virtual_condition_holds(Provider, Virtual) :-
|
||||
% A package cannot be the actual provider for a virtual if it does not
|
||||
% fulfill the conditions to provide that virtual
|
||||
:- provider(Package, Virtual), not virtual_condition_holds(Package, Virtual),
|
||||
internal_error("Virtual when provides not respected").
|
||||
error("Internal error: virtual when provides not respected").
|
||||
|
||||
#defined possible_provider/2.
|
||||
|
||||
@@ -272,7 +239,7 @@ virtual_condition_holds(Provider, Virtual) :-
|
||||
% we select the weight, among the possible ones, that minimizes the overall objective function.
|
||||
1 { provider_weight(Dependency, Virtual, Weight, Reason) :
|
||||
possible_provider_weight(Dependency, Virtual, Weight, Reason) } 1
|
||||
:- provider(Dependency, Virtual), internal_error("Package provider weights must be unique").
|
||||
:- provider(Dependency, Virtual), error("Internal error: package provider weights must be unique").
|
||||
|
||||
% Get rid or the reason for enabling the possible weight (useful for debugging)
|
||||
provider_weight(Dependency, Virtual, Weight) :- provider_weight(Dependency, Virtual, Weight, _).
|
||||
@@ -315,10 +282,9 @@ possible_provider_weight(Dependency, Virtual, 100, "fallback") :- provider(Depen
|
||||
% These allow us to easily define conditional dependency and conflict rules
|
||||
% without enumerating all spec attributes every time.
|
||||
node(Package) :- attr("node", Package).
|
||||
virtual_node(Virtual) :- attr("virtual_node", Virtual).
|
||||
hash(Package, Hash) :- attr("hash", Package, Hash).
|
||||
version(Package, Version) :- attr("version", Package, Version).
|
||||
node_version_satisfies(Package, Constraint) :- attr("node_version_satisfies", Package, Constraint).
|
||||
version_satisfies(Package, Constraint) :- attr("version_satisfies", Package, Constraint).
|
||||
node_platform(Package, Platform) :- attr("node_platform", Package, Platform).
|
||||
node_os(Package, OS) :- attr("node_os", Package, OS).
|
||||
node_target(Package, Target) :- attr("node_target", Package, Target).
|
||||
@@ -334,10 +300,9 @@ node_compiler_version_satisfies(Package, Compiler, Version)
|
||||
:- attr("node_compiler_version_satisfies", Package, Compiler, Version).
|
||||
|
||||
attr("node", Package) :- node(Package).
|
||||
attr("virtual_node", Virtual) :- virtual_node(Virtual).
|
||||
attr("hash", Package, Hash) :- hash(Package, Hash).
|
||||
attr("version", Package, Version) :- version(Package, Version).
|
||||
attr("node_version_satisfies", Package, Constraint) :- node_version_satisfies(Package, Constraint).
|
||||
attr("version_satisfies", Package, Constraint) :- version_satisfies(Package, Constraint).
|
||||
attr("node_platform", Package, Platform) :- node_platform(Package, Platform).
|
||||
attr("node_os", Package, OS) :- node_os(Package, OS).
|
||||
attr("node_target", Package, Target) :- node_target(Package, Target).
|
||||
@@ -365,7 +330,7 @@ attr("node_compiler_version_satisfies", Package, Compiler, Version)
|
||||
#defined external_only/1.
|
||||
#defined pkg_provider_preference/4.
|
||||
#defined default_provider_preference/3.
|
||||
#defined node_version_satisfies/2.
|
||||
#defined version_satisfies/2.
|
||||
#defined node_compiler_version_satisfies/3.
|
||||
#defined root/1.
|
||||
|
||||
@@ -374,23 +339,15 @@ attr("node_compiler_version_satisfies", Package, Compiler, Version)
|
||||
%-----------------------------------------------------------------------------
|
||||
|
||||
% if a package is external its version must be one of the external versions
|
||||
{ external_version(Package, Version, Weight):
|
||||
version_declared(Package, Version, Weight, "external") }
|
||||
:- external(Package).
|
||||
error(2, "Attempted to use external for '{0}' which does not satisfy any configured external spec", Package)
|
||||
:- external(Package),
|
||||
not external_version(Package, _, _).
|
||||
error(2, "Attempted to use external for '{0}' which does not satisfy any configured external spec", Package)
|
||||
:- external(Package),
|
||||
external_version(Package, Version1, Weight1),
|
||||
external_version(Package, Version2, Weight2),
|
||||
(Version1, Weight1) < (Version2, Weight2). % see[1]
|
||||
1 { external_version(Package, Version, Weight):
|
||||
version_declared(Package, Version, Weight, "external") } 1
|
||||
:- external(Package), error("External package version does not satisfy external spec").
|
||||
|
||||
version_weight(Package, Weight) :- external_version(Package, Version, Weight).
|
||||
version(Package, Version) :- external_version(Package, Version, Weight).
|
||||
|
||||
% if a package is not buildable (external_only), only externals are allowed
|
||||
external(Package) :- external_only(Package), node(Package).
|
||||
external(Package) :- external_only(Package), build(Package), node(Package).
|
||||
|
||||
% a package is a real_node if it is not external
|
||||
real_node(Package) :- node(Package), not external(Package).
|
||||
@@ -404,7 +361,7 @@ external(Package) :- external_spec_selected(Package, _).
|
||||
version_weight(Package, Weight),
|
||||
version_declared(Package, Version, Weight, "external"),
|
||||
not external(Package),
|
||||
internal_error("External weight used for internal spec").
|
||||
error("Internal error: external weight used for internal spec").
|
||||
|
||||
% determine if an external spec has been selected
|
||||
external_spec_selected(Package, LocalIndex) :-
|
||||
@@ -416,9 +373,8 @@ external_conditions_hold(Package, LocalIndex) :-
|
||||
|
||||
% it cannot happen that a spec is external, but none of the external specs
|
||||
% conditions hold.
|
||||
error(2, "Attempted to use external for '{0}' which does not satisfy any configured external spec", Package)
|
||||
:- external(Package),
|
||||
not external_conditions_hold(Package, _).
|
||||
:- external(Package), not external_conditions_hold(Package, _),
|
||||
error("External package does not satisfy external spec").
|
||||
|
||||
#defined possible_external/3.
|
||||
#defined external_spec_index/3.
|
||||
@@ -435,16 +391,16 @@ variant(Package, Variant) :- variant_condition(ID, Package, Variant),
|
||||
condition_holds(ID).
|
||||
|
||||
% a variant cannot be set if it is not a variant on the package
|
||||
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Package, Variant)
|
||||
:- variant_set(Package, Variant),
|
||||
not variant(Package, Variant),
|
||||
build(Package).
|
||||
:- variant_set(Package, Variant),
|
||||
not variant(Package, Variant),
|
||||
build(Package),
|
||||
error("Unsatisfied conditional variants cannot be set").
|
||||
|
||||
% a variant cannot take on a value if it is not a variant of the package
|
||||
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Package, Variant)
|
||||
:- variant_value(Package, Variant, _),
|
||||
not variant(Package, Variant),
|
||||
build(Package).
|
||||
:- variant_value(Package, Variant, _),
|
||||
not variant(Package, Variant),
|
||||
build(Package),
|
||||
error("Unsatisfied conditional variants cannot take on a variant value").
|
||||
|
||||
% if a variant is sticky and not set its value is the default value
|
||||
variant_value(Package, Variant, Value) :-
|
||||
@@ -454,30 +410,27 @@ variant_value(Package, Variant, Value) :-
|
||||
variant_default_value(Package, Variant, Value),
|
||||
build(Package).
|
||||
|
||||
% at most one variant value for single-valued variants.
|
||||
{
|
||||
% one variant value for single-valued variants.
|
||||
1 {
|
||||
variant_value(Package, Variant, Value)
|
||||
: variant_possible_value(Package, Variant, Value)
|
||||
} 1
|
||||
:- node(Package),
|
||||
variant(Package, Variant),
|
||||
variant_single_value(Package, Variant),
|
||||
build(Package),
|
||||
error("Single valued variants must have a single value").
|
||||
|
||||
% at least one variant value for multi-valued variants.
|
||||
1 {
|
||||
variant_value(Package, Variant, Value)
|
||||
: variant_possible_value(Package, Variant, Value)
|
||||
}
|
||||
:- node(Package),
|
||||
variant(Package, Variant),
|
||||
build(Package).
|
||||
|
||||
|
||||
error(2, "'{0}' required multiple values for single-valued variant '{1}'\n Requested 'Spec({1}={2})' and 'Spec({1}={3})'", Package, Variant, Value1, Value2)
|
||||
:- node(Package),
|
||||
variant(Package, Variant),
|
||||
variant_single_value(Package, Variant),
|
||||
build(Package),
|
||||
variant_value(Package, Variant, Value1),
|
||||
variant_value(Package, Variant, Value2),
|
||||
Value1 < Value2. % see[1]
|
||||
error(2, "No valid value for variant '{1}' of package '{0}'", Package, Variant)
|
||||
:- node(Package),
|
||||
variant(Package, Variant),
|
||||
build(Package),
|
||||
C = #count{ Value : variant_value(Package, Variant, Value) },
|
||||
C < 1.
|
||||
not variant_single_value(Package, Variant),
|
||||
build(Package),
|
||||
error("Internal error: All variants must have a value").
|
||||
|
||||
% if a variant is set to anything, it is considered 'set'.
|
||||
variant_set(Package, Variant) :- variant_set(Package, Variant, _).
|
||||
@@ -485,21 +438,21 @@ variant_set(Package, Variant) :- variant_set(Package, Variant, _).
|
||||
% A variant cannot have a value that is not also a possible value
|
||||
% This only applies to packages we need to build -- concrete packages may
|
||||
% have been built w/different variants from older/different package versions.
|
||||
error(1, "'Spec({1}={2})' is not a valid value for '{0}' variant '{1}'", Package, Variant, Value)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
not variant_possible_value(Package, Variant, Value),
|
||||
build(Package).
|
||||
:- variant_value(Package, Variant, Value),
|
||||
not variant_possible_value(Package, Variant, Value),
|
||||
build(Package),
|
||||
error("Variant set to invalid value").
|
||||
|
||||
% Some multi valued variants accept multiple values from disjoint sets.
|
||||
% Ensure that we respect that constraint and we don't pick values from more
|
||||
% than one set at once
|
||||
error(2, "{0} variant '{1}' cannot have values '{2}' and '{3}' as they come from disjoing value sets", Package, Variant, Value1, Value2)
|
||||
:- variant_value(Package, Variant, Value1),
|
||||
variant_value(Package, Variant, Value2),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value1, Set1),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value2, Set2),
|
||||
Set1 < Set2, % see[1]
|
||||
build(Package).
|
||||
:- variant_value(Package, Variant, Value1),
|
||||
variant_value(Package, Variant, Value2),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value1, Set1),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value2, Set2),
|
||||
Set1 < Set2,
|
||||
build(Package),
|
||||
error("Variant values selected from multiple disjoint sets").
|
||||
|
||||
% variant_set is an explicitly set variant value. If it's not 'set',
|
||||
% we revert to the default value. If it is set, we force the set value
|
||||
@@ -557,11 +510,12 @@ variant_default_value(Package, Variant, Value) :- variant_default_value_from_cli
|
||||
|
||||
% Treat 'none' in a special way - it cannot be combined with other
|
||||
% values even if the variant is multi-valued
|
||||
error(2, "{0} variant '{1}' cannot have values '{2}' and 'none'", Package, Variant, Value)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
variant_value(Package, Variant, "none"),
|
||||
Value != "none",
|
||||
build(Package).
|
||||
:- 2 {
|
||||
variant_value(Package, Variant, Value) : variant_possible_value(Package, Variant, Value)
|
||||
},
|
||||
variant_value(Package, Variant, "none"),
|
||||
build(Package),
|
||||
error("Variant value 'none' cannot be combined with any other value").
|
||||
|
||||
% patches and dev_path are special variants -- they don't have to be
|
||||
% declared in the package, so we just allow them to spring into existence
|
||||
@@ -605,18 +559,6 @@ node_platform(Package, Platform)
|
||||
% platform is set if set to anything
|
||||
node_platform_set(Package) :- node_platform_set(Package, _).
|
||||
|
||||
% each node must have a single platform
|
||||
error(2, "No valid platform found for {0}", Package)
|
||||
:- node(Package),
|
||||
C = #count{ Platform : node_platform(Package, Platform)},
|
||||
C < 1.
|
||||
|
||||
error(2, "Cannot concretize {0} with multiple platforms\n Requested 'platform={1}' and 'platform={2}'", Package, Platform1, Platform2)
|
||||
:- node(Package),
|
||||
node_platform(Package, Platform1),
|
||||
node_platform(Package, Platform2),
|
||||
Platform1 < Platform2. % see[1]
|
||||
|
||||
#defined node_platform_set/2. % avoid warnings
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
@@ -626,32 +568,20 @@ error(2, "Cannot concretize {0} with multiple platforms\n Requested 'platform
|
||||
os(OS) :- os(OS, _).
|
||||
|
||||
% one os per node
|
||||
{ node_os(Package, OS) : os(OS) } :- node(Package).
|
||||
|
||||
error(2, "Cannot find valid operating system for '{0}'", Package)
|
||||
:- node(Package),
|
||||
C = #count{ OS : node_os(Package, OS)},
|
||||
C < 1.
|
||||
|
||||
error(2, "Cannot concretize {0} with multiple operating systems\n Requested 'os={1}' and 'os={2}'", Package, OS1, OS2)
|
||||
:- node(Package),
|
||||
node_os(Package, OS1),
|
||||
node_os(Package, OS2),
|
||||
OS1 < OS2. %see [1]
|
||||
1 { node_os(Package, OS) : os(OS) } 1 :-
|
||||
node(Package), error("Each node must have exactly one OS").
|
||||
|
||||
% can't have a non-buildable OS on a node we need to build
|
||||
error(2, "Cannot concretize '{0} os={1}'. Operating system '{1}' is not buildable", Package, OS)
|
||||
:- build(Package),
|
||||
node_os(Package, OS),
|
||||
not buildable_os(OS).
|
||||
:- build(Package), node_os(Package, OS), not buildable_os(OS),
|
||||
error("No available OS can be built for").
|
||||
|
||||
% can't have dependencies on incompatible OS's
|
||||
error(2, "{0} and dependency {1} have incompatible operating systems 'os={2}' and 'os={3}'", Package, Dependency, PackageOS, DependencyOS)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_os(Package, PackageOS),
|
||||
node_os(Dependency, DependencyOS),
|
||||
not os_compatible(PackageOS, DependencyOS),
|
||||
build(Package).
|
||||
:- depends_on(Package, Dependency),
|
||||
node_os(Package, PackageOS),
|
||||
node_os(Dependency, DependencyOS),
|
||||
not os_compatible(PackageOS, DependencyOS),
|
||||
build(Package),
|
||||
error("Dependencies must have compatible OS's with their dependents").
|
||||
|
||||
% give OS choice weights according to os declarations
|
||||
node_os_weight(Package, Weight)
|
||||
@@ -683,24 +613,14 @@ node_os(Package, OS) :- node_os_set(Package, OS), node(Package).
|
||||
%-----------------------------------------------------------------------------
|
||||
|
||||
% Each node has only one target chosen among the known targets
|
||||
{ node_target(Package, Target) : target(Target) } :- node(Package).
|
||||
|
||||
error(2, "Cannot find valid target for '{0}'", Package)
|
||||
:- node(Package),
|
||||
C = #count{Target : node_target(Package, Target)},
|
||||
C < 1.
|
||||
|
||||
error(2, "Cannot concretize '{0}' with multiple targets\n Requested 'target={1}' and 'target={2}'", Package, Target1, Target2)
|
||||
:- node(Package),
|
||||
node_target(Package, Target1),
|
||||
node_target(Package, Target2),
|
||||
Target1 < Target2. % see[1]
|
||||
1 { node_target(Package, Target) : target(Target) } 1 :- node(Package), error("Each node must have exactly one target").
|
||||
|
||||
% If a node must satisfy a target constraint, enforce it
|
||||
error(1, "'{0} target={1}' cannot satisfy constraint 'target={2}'", Package, Target, Constraint)
|
||||
:- node_target(Package, Target),
|
||||
node_target_satisfies(Package, Constraint),
|
||||
not target_satisfies(Constraint, Target).
|
||||
:- node_target(Package, Target),
|
||||
node_target_satisfies(Package, Constraint),
|
||||
not target_satisfies(Constraint, Target),
|
||||
error("Node targets must satisfy node target constraints").
|
||||
|
||||
|
||||
% If a node has a target and the target satisfies a constraint, then the target
|
||||
% associated with the node satisfies the same constraint
|
||||
@@ -708,10 +628,10 @@ node_target_satisfies(Package, Constraint)
|
||||
:- node_target(Package, Target), target_satisfies(Constraint, Target).
|
||||
|
||||
% If a node has a target, all of its dependencies must be compatible with that target
|
||||
error(2, "Cannot find compatible targets for {0} and {1}", Package, Dependency)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_target(Package, Target),
|
||||
not node_target_compatible(Dependency, Target).
|
||||
:- depends_on(Package, Dependency),
|
||||
node_target(Package, Target),
|
||||
not node_target_compatible(Dependency, Target),
|
||||
error("Dependency node targets must be compatible with dependent targets").
|
||||
|
||||
% Intermediate step for performance reasons
|
||||
% When the integrity constraint above was formulated including this logic
|
||||
@@ -752,12 +672,12 @@ target_weight(Target, Package, Weight)
|
||||
:- package_target_weight(Target, Package, Weight).
|
||||
|
||||
% can't use targets on node if the compiler for the node doesn't support them
|
||||
error(2, "{0} compiler '{2}@{3}' incompatible with 'target={1}'", Package, Target, Compiler, Version)
|
||||
:- node_target(Package, Target),
|
||||
not compiler_supports_target(Compiler, Version, Target),
|
||||
node_compiler(Package, Compiler),
|
||||
node_compiler_version(Package, Compiler, Version),
|
||||
build(Package).
|
||||
:- node_target(Package, Target),
|
||||
not compiler_supports_target(Compiler, Version, Target),
|
||||
node_compiler(Package, Compiler),
|
||||
node_compiler_version(Package, Compiler, Version),
|
||||
build(Package),
|
||||
error("No satisfying compiler available is compatible with a satisfying target").
|
||||
|
||||
% if a target is set explicitly, respect it
|
||||
node_target(Package, Target)
|
||||
@@ -784,10 +704,8 @@ node_target_mismatch(Parent, Dependency)
|
||||
not node_target_match(Parent, Dependency).
|
||||
|
||||
% disallow reusing concrete specs that don't have a compatible target
|
||||
error(2, "'{0} target={1}' is not compatible with this machine", Package, Target)
|
||||
:- node(Package),
|
||||
node_target(Package, Target),
|
||||
not target(Target).
|
||||
:- node(Package), node_target(Package, Target), not target(Target),
|
||||
error("No satisfying package's target is compatible with this machine").
|
||||
|
||||
#defined node_target_set/2.
|
||||
#defined package_target_weight/3.
|
||||
@@ -799,19 +717,10 @@ compiler(Compiler) :- compiler_version(Compiler, _).
|
||||
|
||||
% There must be only one compiler set per built node. The compiler
|
||||
% is chosen among available versions.
|
||||
{ node_compiler_version(Package, Compiler, Version) : compiler_version(Compiler, Version) } :-
|
||||
1 { node_compiler_version(Package, Compiler, Version) : compiler_version(Compiler, Version) } 1 :-
|
||||
node(Package),
|
||||
build(Package).
|
||||
|
||||
error(2, "No valid compiler version found for '{0}'", Package)
|
||||
:- node(Package),
|
||||
C = #count{ Version : node_compiler_version(Package, _, Version)},
|
||||
C < 1.
|
||||
error(2, "'{0}' compiler constraints '%{1}@{2}' and '%{3}@{4}' are incompatible", Package, Compiler1, Version1, Compiler2, Version2)
|
||||
:- node(Package),
|
||||
node_compiler_version(Package, Compiler1, Version1),
|
||||
node_compiler_version(Package, Compiler2, Version2),
|
||||
(Compiler1, Version1) < (Compiler2, Version2). % see[1]
|
||||
build(Package),
|
||||
error("Each node must have exactly one compiler").
|
||||
|
||||
% Sometimes we just need to know the compiler and not the version
|
||||
node_compiler(Package, Compiler) :- node_compiler_version(Package, Compiler, _).
|
||||
@@ -820,22 +729,14 @@ node_compiler(Package, Compiler) :- node_compiler_version(Package, Compiler, _).
|
||||
:- node_compiler(Package, Compiler1),
|
||||
node_compiler_version(Package, Compiler2, _),
|
||||
Compiler1 != Compiler2,
|
||||
internal_error("Mismatch between selected compiler and compiler version").
|
||||
|
||||
% If the compiler of a node cannot be satisfied, raise
|
||||
error(1, "No valid compiler for {0} satisfies '%{1}'", Package, Compiler)
|
||||
:- node(Package),
|
||||
node_compiler_version_satisfies(Package, Compiler, ":"),
|
||||
C = #count{ Version : node_compiler_version(Package, Compiler, Version), compiler_version_satisfies(Compiler, ":", Version) },
|
||||
C < 1.
|
||||
error("Internal error: mismatch between selected compiler and compiler version").
|
||||
|
||||
% If the compiler of a node must satisfy a constraint, then its version
|
||||
% must be chosen among the ones that satisfy said constraint
|
||||
error(2, "No valid version for '{0}' compiler '{1}' satisfies '@{2}'", Package, Compiler, Constraint)
|
||||
:- node(Package),
|
||||
node_compiler_version_satisfies(Package, Compiler, Constraint),
|
||||
C = #count{ Version : node_compiler_version(Package, Compiler, Version), compiler_version_satisfies(Compiler, Constraint, Version) },
|
||||
C < 1.
|
||||
1 { node_compiler_version(Package, Compiler, Version)
|
||||
: compiler_version_satisfies(Compiler, Constraint, Version) } 1 :-
|
||||
node_compiler_version_satisfies(Package, Compiler, Constraint),
|
||||
error("Internal error: node compiler version mismatch").
|
||||
|
||||
% If the node is associated with a compiler and the compiler satisfy a constraint, then
|
||||
% the compiler associated with the node satisfy the same constraint
|
||||
@@ -853,12 +754,11 @@ node_compiler_version(Package, Compiler, Version) :- node_compiler_version_set(P
|
||||
% Cannot select a compiler if it is not supported on the OS
|
||||
% Compilers that are explicitly marked as allowed
|
||||
% are excluded from this check
|
||||
error(2, "{0} compiler '%{1}@{2}' incompatible with 'os={3}'", Package, Compiler, Version, OS)
|
||||
:- node_compiler_version(Package, Compiler, Version),
|
||||
node_os(Package, OS),
|
||||
not compiler_supports_os(Compiler, Version, OS),
|
||||
not allow_compiler(Compiler, Version),
|
||||
build(Package).
|
||||
:- node_compiler_version(Package, Compiler, Version), node_os(Package, OS),
|
||||
not compiler_supports_os(Compiler, Version, OS),
|
||||
not allow_compiler(Compiler, Version),
|
||||
build(Package),
|
||||
error("No satisfying compiler available is compatible with a satisfying os").
|
||||
|
||||
% If a package and one of its dependencies don't have the
|
||||
% same compiler there's a mismatch.
|
||||
@@ -951,7 +851,7 @@ no_flags(Package, FlagType)
|
||||
%-----------------------------------------------------------------------------
|
||||
% the solver is free to choose at most one installed hash for each package
|
||||
{ hash(Package, Hash) : installed_hash(Package, Hash) } 1
|
||||
:- node(Package), internal_error("Package must resolve to at most one hash").
|
||||
:- node(Package), error("Internal error: package must resolve to at most one hash").
|
||||
|
||||
% you can't choose an installed hash for a dev spec
|
||||
:- hash(Package, Hash), variant_value(Package, "dev_path", _).
|
||||
@@ -962,72 +862,29 @@ impose(Hash) :- hash(Package, Hash).
|
||||
% if we haven't selected a hash for a package, we'll be building it
|
||||
build(Package) :- not hash(Package, _), node(Package).
|
||||
|
||||
% Minimizing builds is tricky. We want a minimizing criterion because we want to reuse
|
||||
% what is avaialble, but we also want things that are built to stick to *default
|
||||
% preferences* from the package and from the user. We therefore treat built specs
|
||||
% differently and apply a different set of optimization criteria to them. Spack's first
|
||||
% priority is to reuse what it can, but if it builds something, the built specs will
|
||||
% respect defaults and preferences.
|
||||
%
|
||||
% This is implemented by bumping the priority of optimization criteria for built specs
|
||||
% -- so that they take precedence over the otherwise topmost-priority criterion to reuse
|
||||
% what is installed.
|
||||
%
|
||||
% If the user explicitly asks for *minimal* installs, we don't differentiate between
|
||||
% built and reused specs - the top priority is just minimizing builds.
|
||||
% If we are acting as a binary package manager, we cannot build anything
|
||||
:- build(_), binary_package_manager().
|
||||
|
||||
% Minimizing builds is tricky. We want a minimizing criterion
|
||||
|
||||
% because we want to reuse what is avaialble, but
|
||||
% we also want things that are built to stick to *default preferences* from
|
||||
% the package and from the user. We therefore treat built specs differently and apply
|
||||
% a different set of optimization criteria to them. Spack's *first* priority is to
|
||||
% reuse what it *can*, but if it builds something, the built specs will respect
|
||||
% defaults and preferences. This is implemented by bumping the priority of optimization
|
||||
% criteria for built specs -- so that they take precedence over the otherwise
|
||||
% topmost-priority criterion to reuse what is installed.
|
||||
%
|
||||
% The priority ranges are:
|
||||
% 200+ Shifted priorities for build nodes; correspond to priorities 0 - 99.
|
||||
% 100 - 199 Unshifted priorities. Currently only includes minimizing #builds.
|
||||
% 0 - 99 Priorities for non-built nodes.
|
||||
build_priority(Package, 200) :- node(Package), build(Package), optimize_for_reuse(),
|
||||
not minimal_installs().
|
||||
build_priority(Package, 0) :- node(Package), not build(Package), optimize_for_reuse().
|
||||
|
||||
% Don't adjust build priorities if reusing, or if doing minimal installs
|
||||
% With minimal, minimizing builds is the TOP priority
|
||||
build_priority(Package, 0) :- node(Package), not optimize_for_reuse().
|
||||
build_priority(Package, 0) :- node(Package), minimal_installs().
|
||||
|
||||
% Minimize builds with both --reuse and with --minimal
|
||||
minimize_builds() :- optimize_for_reuse().
|
||||
minimize_builds() :- minimal_installs().
|
||||
|
||||
% don't assign versions from installed packages unless reuse is enabled
|
||||
% NOTE: that "installed" means the declared version was only included because
|
||||
% that package happens to be installed, NOT because it was asked for on the
|
||||
% command line. If the user specifies a hash, the origin will be "spec".
|
||||
%
|
||||
% TODO: There's a slight inconsistency with this: if the user concretizes
|
||||
% and installs `foo ^bar`, for some build dependency `bar`, and then later
|
||||
% does a `spack install --fresh foo ^bar/abcde` (i.e.,the hash of `bar`, it
|
||||
% currently *won't* force versions for `bar`'s build dependencies -- `--fresh`
|
||||
% will instead build the latest bar. When we actually include transitive
|
||||
% build deps in the solve, consider using them as a preference to resolve this.
|
||||
:- version(Package, Version),
|
||||
version_weight(Package, Weight),
|
||||
version_declared(Package, Version, Weight, "installed"),
|
||||
not optimize_for_reuse().
|
||||
build_priority(Package, 200) :- build(Package), node(Package).
|
||||
build_priority(Package, 0) :- not build(Package), node(Package).
|
||||
|
||||
#defined installed_hash/2.
|
||||
#defined minimal_installs/0.
|
||||
|
||||
%-----------------------------------------------------------------
|
||||
% Optimization to avoid errors
|
||||
%-----------------------------------------------------------------
|
||||
% Some errors are handled as rules instead of constraints because
|
||||
% it allows us to explain why something failed. Here we optimize
|
||||
% HEAVILY against the facts generated by those rules.
|
||||
#minimize{ 0@1000: #true}.
|
||||
#minimize{ 0@1001: #true}.
|
||||
#minimize{ 0@1002: #true}.
|
||||
|
||||
#minimize{ 1000@1000+Priority,Msg: error(Priority, Msg) }.
|
||||
#minimize{ 1000@1000+Priority,Msg,Arg1: error(Priority, Msg, Arg1) }.
|
||||
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2: error(Priority, Msg, Arg1, Arg2) }.
|
||||
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3: error(Priority, Msg, Arg1, Arg2, Arg3) }.
|
||||
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4) }.
|
||||
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4,Arg5: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4, Arg5) }.
|
||||
#defined binary_package_manager/0.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% How to optimize the spec (high to low priority)
|
||||
@@ -1040,7 +897,7 @@ minimize_builds() :- minimal_installs().
|
||||
% Try hard to reuse installed packages (i.e., minimize the number built)
|
||||
opt_criterion(100, "number of packages to build (vs. reuse)").
|
||||
#minimize { 0@100: #true }.
|
||||
#minimize { 1@100,Package : build(Package), minimize_builds() }.
|
||||
#minimize { 1@100,Package : build(Package), optimize_for_reuse() }.
|
||||
#defined optimize_for_reuse/0.
|
||||
|
||||
% Minimize the number of deprecated versions being used
|
||||
@@ -1208,11 +1065,3 @@ opt_criterion(1, "non-preferred targets").
|
||||
#heuristic variant_value(Package, Variant, Value) : variant_default_value(Package, Variant, Value), node(Package). [10, true]
|
||||
#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), virtual_node(Virtual). [10, true]
|
||||
#heuristic node(Package) : possible_provider_weight(Package, Virtual, 0, _), virtual_node(Virtual). [10, true]
|
||||
|
||||
%-----------
|
||||
% Notes
|
||||
%-----------
|
||||
|
||||
% [1] Clingo ensures a total ordering among all atoms. We rely on that total ordering
|
||||
% to reduce symmetry in the solution by checking `<` instead of `!=` in symmetric
|
||||
% cases. These choices are made without loss of generality.
|
||||
|
||||
@@ -34,13 +34,3 @@
|
||||
|
||||
% deprecated packages
|
||||
#show deprecated/2.
|
||||
|
||||
% error types
|
||||
#show error/2.
|
||||
#show error/3.
|
||||
#show error/4.
|
||||
#show error/5.
|
||||
#show error/6.
|
||||
#show error/7.
|
||||
|
||||
% debug
|
||||
|
||||
@@ -184,7 +184,7 @@
|
||||
default_format += '{variants}{arch=architecture}'
|
||||
|
||||
#: specfile format version. Must increase monotonically
|
||||
specfile_format_version = 3
|
||||
specfile_format_version = 2
|
||||
|
||||
|
||||
def colorize_spec(spec):
|
||||
@@ -1191,14 +1191,11 @@ def __init__(self, spec_like=None, normal=False,
|
||||
self._dependencies = _EdgeMap(store_by=EdgeDirection.child)
|
||||
self.namespace = None
|
||||
|
||||
# initial values for all spec hash types
|
||||
for h in ht.hashes:
|
||||
setattr(self, h.attr, None)
|
||||
|
||||
# Python __hash__ is handled separately from the cached spec hashes
|
||||
self._hash = None
|
||||
self._build_hash = None
|
||||
self._full_hash = None
|
||||
self._package_hash = None
|
||||
self._dunder_hash = None
|
||||
|
||||
# cache of package for this spec
|
||||
self._package = None
|
||||
|
||||
# Most of these are internal implementation details that can be
|
||||
@@ -1213,6 +1210,14 @@ def __init__(self, spec_like=None, normal=False,
|
||||
self.external_path = external_path
|
||||
self.external_modules = Spec._format_module_list(external_modules)
|
||||
|
||||
# Older spack versions did not compute full_hash or build_hash,
|
||||
# and we may not have the necessary information to recompute them
|
||||
# if we read in old specs. Old concrete specs are marked "final"
|
||||
# when read in to indicate that we shouldn't recompute full_hash
|
||||
# or build_hash. New specs are not final; we can lazily compute
|
||||
# their hashes.
|
||||
self._hashes_final = False
|
||||
|
||||
# This attribute is used to store custom information for
|
||||
# external specs. None signal that it was not set yet.
|
||||
self.extra_attributes = None
|
||||
@@ -1549,38 +1554,6 @@ def spliced(self):
|
||||
"""
|
||||
return any(s.build_spec is not s for s in self.traverse(root=True))
|
||||
|
||||
@property
|
||||
def installed(self):
|
||||
"""Installation status of a package.
|
||||
|
||||
Returns:
|
||||
True if the package has been installed, False otherwise.
|
||||
"""
|
||||
if not self.concrete:
|
||||
return False
|
||||
|
||||
try:
|
||||
# If the spec is in the DB, check the installed
|
||||
# attribute of the record
|
||||
return spack.store.db.get_record(self).installed
|
||||
except KeyError:
|
||||
# If the spec is not in the DB, the method
|
||||
# above raises a Key error
|
||||
return False
|
||||
|
||||
@property
|
||||
def installed_upstream(self):
|
||||
"""Whether the spec is installed in an upstream repository.
|
||||
|
||||
Returns:
|
||||
True if the package is installed in an upstream, False otherwise.
|
||||
"""
|
||||
if not self.concrete:
|
||||
return False
|
||||
|
||||
upstream, _ = spack.store.db.query_by_spec_hash(self.dag_hash())
|
||||
return upstream
|
||||
|
||||
def traverse(self, **kwargs):
|
||||
direction = kwargs.get('direction', 'children')
|
||||
depth = kwargs.get('depth', False)
|
||||
@@ -1599,15 +1572,7 @@ def traverse(self, **kwargs):
|
||||
def traverse_edges(self, visited=None, d=0, deptype='all',
|
||||
dep_spec=None, **kwargs):
|
||||
"""Generic traversal of the DAG represented by this spec.
|
||||
|
||||
This yields ``DependencySpec`` objects as they are traversed.
|
||||
|
||||
When traversing top-down, an imaginary incoming edge to the root
|
||||
is yielded first as ``DependencySpec(None, root, ())``. When
|
||||
traversing bottom-up, imaginary edges to leaves are yielded first
|
||||
as ``DependencySpec(left, None, ())`` objects.
|
||||
|
||||
Options:
|
||||
This will yield each node in the spec. Options:
|
||||
|
||||
order [=pre|post]
|
||||
Order to traverse spec nodes. Defaults to preorder traversal.
|
||||
@@ -1762,7 +1727,7 @@ def prefix(self):
|
||||
def prefix(self, value):
|
||||
self._prefix = spack.util.prefix.Prefix(pth.convert_to_platform_path(value))
|
||||
|
||||
def spec_hash(self, hash):
|
||||
def _spec_hash(self, hash):
|
||||
"""Utility method for computing different types of Spec hashes.
|
||||
|
||||
Arguments:
|
||||
@@ -1776,69 +1741,71 @@ def spec_hash(self, hash):
|
||||
json_text = sjson.dump(node_dict)
|
||||
return spack.util.hash.b32_hash(json_text)
|
||||
|
||||
def _cached_hash(self, hash, length=None, force=False):
|
||||
def _cached_hash(self, hash, length=None):
|
||||
"""Helper function for storing a cached hash on the spec.
|
||||
|
||||
This will run spec_hash() with the deptype and package_hash
|
||||
This will run _spec_hash() with the deptype and package_hash
|
||||
parameters, and if this spec is concrete, it will store the value
|
||||
in the supplied attribute on this spec.
|
||||
|
||||
Arguments:
|
||||
hash (spack.hash_types.SpecHashDescriptor): type of hash to generate.
|
||||
length (int): length of hash prefix to return (default is full hash string)
|
||||
force (bool): cache the hash even if spec is not concrete (default False)
|
||||
"""
|
||||
if not hash.attr:
|
||||
return self.spec_hash(hash)[:length]
|
||||
return self._spec_hash(hash)[:length]
|
||||
|
||||
hash_string = getattr(self, hash.attr, None)
|
||||
if hash_string:
|
||||
return hash_string[:length]
|
||||
else:
|
||||
hash_string = self.spec_hash(hash)
|
||||
if force or self.concrete:
|
||||
hash_string = self._spec_hash(hash)
|
||||
if self.concrete:
|
||||
setattr(self, hash.attr, hash_string)
|
||||
|
||||
return hash_string[:length]
|
||||
|
||||
def package_hash(self):
|
||||
"""Compute the hash of the contents of the package for this node"""
|
||||
# Concrete specs with the old DAG hash did not have the package hash, so we do
|
||||
# not know what the package looked like at concretization time
|
||||
if self.concrete and not self._package_hash:
|
||||
raise ValueError(
|
||||
"Cannot call package_hash() on concrete specs with the old dag_hash()"
|
||||
)
|
||||
|
||||
return self._cached_hash(ht.package_hash)
|
||||
|
||||
def dag_hash(self, length=None):
|
||||
"""This is Spack's default hash, used to identify installations.
|
||||
|
||||
Same as the full hash (includes package hash and build/link/run deps).
|
||||
Tells us when package files and any dependencies have changes.
|
||||
|
||||
NOTE: Versions of Spack prior to 0.18 only included link and run deps.
|
||||
|
||||
At the moment, it excludes build dependencies to avoid rebuilding
|
||||
packages whenever build dependency versions change. We will
|
||||
revise this to include more detailed provenance when the
|
||||
concretizer can more aggressievly reuse installed dependencies.
|
||||
"""
|
||||
return self._cached_hash(ht.dag_hash, length)
|
||||
|
||||
def build_hash(self, length=None):
|
||||
"""Hash used to store specs in environments.
|
||||
|
||||
This hash includes build dependencies, and we need to preserve
|
||||
them to be able to rebuild an entire environment for a user.
|
||||
"""
|
||||
return self._cached_hash(ht.build_hash, length)
|
||||
|
||||
def process_hash(self, length=None):
|
||||
"""Hash used to transfer specs among processes.
|
||||
"""Hash used to store specs in environments.
|
||||
|
||||
This hash includes build and test dependencies and is only used to
|
||||
serialize a spec and pass it around among processes.
|
||||
"""
|
||||
return self._cached_hash(ht.process_hash, length)
|
||||
|
||||
def full_hash(self, length=None):
|
||||
"""Hash to determine when to rebuild packages in the build pipeline.
|
||||
|
||||
This hash includes the package hash, so that we know when package
|
||||
files has changed between builds.
|
||||
"""
|
||||
return self._cached_hash(ht.full_hash, length)
|
||||
|
||||
def dag_hash_bit_prefix(self, bits):
|
||||
"""Get the first <bits> bits of the DAG hash as an integer type."""
|
||||
return spack.util.hash.base32_prefix_bits(self.dag_hash(), bits)
|
||||
|
||||
def process_hash_bit_prefix(self, bits):
|
||||
"""Get the first <bits> bits of the DAG hash as an integer type."""
|
||||
return spack.util.hash.base32_prefix_bits(self.process_hash(), bits)
|
||||
|
||||
def to_node_dict(self, hash=ht.dag_hash):
|
||||
"""Create a dictionary representing the state of this Spec.
|
||||
|
||||
@@ -1932,13 +1899,8 @@ def to_node_dict(self, hash=ht.dag_hash):
|
||||
if hasattr(variant, '_patches_in_order_of_appearance'):
|
||||
d['patches'] = variant._patches_in_order_of_appearance
|
||||
|
||||
if self._concrete and hash.package_hash and self._package_hash:
|
||||
# We use the attribute here instead of `self.package_hash()` because this
|
||||
# should *always* be assignhed at concretization time. We don't want to try
|
||||
# to compute a package hash for concrete spec where a) the package might not
|
||||
# exist, or b) the `dag_hash` didn't include the package hash when the spec
|
||||
# was concretized.
|
||||
package_hash = self._package_hash
|
||||
if hash.package_hash:
|
||||
package_hash = self.package_hash()
|
||||
|
||||
# Full hashes are in bytes
|
||||
if (not isinstance(package_hash, six.text_type)
|
||||
@@ -2009,7 +1971,7 @@ def to_dict(self, hash=ht.dag_hash):
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "readline",
|
||||
"hash": "4f47cggum7p4qmp3xna4hi547o66unva",
|
||||
"build_hash": "4f47cggum7p4qmp3xna4hi547o66unva",
|
||||
"type": [
|
||||
"build",
|
||||
"link"
|
||||
@@ -2017,14 +1979,16 @@ def to_dict(self, hash=ht.dag_hash):
|
||||
},
|
||||
{
|
||||
"name": "zlib",
|
||||
"hash": "uvgh6p7rhll4kexqnr47bvqxb3t33jtq",
|
||||
"build_hash": "uvgh6p7rhll4kexqnr47bvqxb3t33jtq",
|
||||
"type": [
|
||||
"build",
|
||||
"link"
|
||||
]
|
||||
}
|
||||
],
|
||||
"hash": "tve45xfqkfgmzwcyfetze2z6syrg7eaf",
|
||||
"hash": "d2yzqp2highd7sn4nr5ndkw3ydcrlhtk",
|
||||
"full_hash": "tve45xfqkfgmzwcyfetze2z6syrg7eaf",
|
||||
"build_hash": "tsjnz7lgob7bu2wd4sqzzjenxewc2zha"
|
||||
},
|
||||
# ... more node dicts for readline and its dependencies ...
|
||||
]
|
||||
@@ -2053,12 +2017,10 @@ def to_dict(self, hash=ht.dag_hash):
|
||||
node_list = [] # Using a list to preserve preorder traversal for hash.
|
||||
hash_set = set()
|
||||
for s in self.traverse(order='pre', deptype=hash.deptype):
|
||||
spec_hash = s._cached_hash(hash)
|
||||
|
||||
spec_hash = s.node_dict_with_hashes(hash)[hash.name]
|
||||
if spec_hash not in hash_set:
|
||||
node_list.append(s.node_dict_with_hashes(hash))
|
||||
hash_set.add(spec_hash)
|
||||
|
||||
if s.build_spec is not s:
|
||||
build_spec_list = s.build_spec.to_dict(hash)['spec']['nodes']
|
||||
for node in build_spec_list:
|
||||
@@ -2066,7 +2028,6 @@ def to_dict(self, hash=ht.dag_hash):
|
||||
if node_hash not in hash_set:
|
||||
node_list.append(node)
|
||||
hash_set.add(node_hash)
|
||||
|
||||
meta_dict = syaml.syaml_dict([('version', specfile_format_version)])
|
||||
inner_dict = syaml.syaml_dict([('_meta', meta_dict), ('nodes', node_list)])
|
||||
spec_dict = syaml.syaml_dict([('spec', inner_dict)])
|
||||
@@ -2079,19 +2040,38 @@ def node_dict_with_hashes(self, hash=ht.dag_hash):
|
||||
node = self.to_node_dict(hash)
|
||||
node[ht.dag_hash.name] = self.dag_hash()
|
||||
|
||||
# dag_hash is lazily computed -- but if we write a spec out, we want it
|
||||
# to be included. This is effectively the last chance we get to compute
|
||||
# it accurately.
|
||||
# full_hash and build_hash are lazily computed -- but if we write
|
||||
# a spec out, we want them to be included. This is effectively
|
||||
# the last chance we get to compute them accurately.
|
||||
if self.concrete:
|
||||
# all specs have at least a DAG hash
|
||||
node[ht.dag_hash.name] = self.dag_hash()
|
||||
# build and full hashes can be written out if:
|
||||
# 1. they're precomputed (i.e. we read them from somewhere
|
||||
# and they were already on the spec
|
||||
# 2. we can still compute them lazily (i.e. we just made them and
|
||||
# have the full dependency graph on-hand)
|
||||
#
|
||||
# we want to avoid recomputing either hash for specs we read
|
||||
# in from the DB or elsewhere, as we may not have the info
|
||||
# (like patches, package versions, etc.) that we need to
|
||||
# compute them. Unknown hashes are better than wrong hashes.
|
||||
write_full_hash = (
|
||||
self._hashes_final and self._full_hash or # cached and final
|
||||
not self._hashes_final) # lazily compute
|
||||
if write_full_hash:
|
||||
node[ht.full_hash.name] = self.full_hash()
|
||||
|
||||
write_build_hash = 'build' in hash.deptype and (
|
||||
self._hashes_final and self._build_hash or # cached and final
|
||||
not self._hashes_final) # lazily compute
|
||||
if write_build_hash:
|
||||
node[ht.build_hash.name] = self.build_hash()
|
||||
else:
|
||||
node['concrete'] = False
|
||||
|
||||
# we can also give them other hash types if we want
|
||||
if hash.name != ht.dag_hash.name:
|
||||
node[hash.name] = self._cached_hash(hash)
|
||||
if hash.name == 'build_hash':
|
||||
node[hash.name] = self.build_hash()
|
||||
elif hash.name == 'process_hash':
|
||||
node[hash.name] = self.process_hash()
|
||||
|
||||
return node
|
||||
|
||||
@@ -2172,6 +2152,11 @@ def from_node_dict(node):
|
||||
# specs read in are concrete unless marked abstract
|
||||
spec._concrete = node.get('concrete', True)
|
||||
|
||||
# this spec may have been built with older packages than we have
|
||||
# on-hand, and we may not have the build dependencies, so mark it
|
||||
# so we don't recompute full_hash and build_hash.
|
||||
spec._hashes_final = spec._concrete
|
||||
|
||||
if 'patches' in node:
|
||||
patches = node['patches']
|
||||
if len(patches) > 0:
|
||||
@@ -2182,7 +2167,7 @@ def from_node_dict(node):
|
||||
# FIXME: Monkey patches mvar to store patches order
|
||||
mvar._patches_in_order_of_appearance = patches
|
||||
|
||||
# Don't read dependencies here; from_dict() is used by
|
||||
# Don't read dependencies here; from_node_dict() is used by
|
||||
# from_yaml() and from_json() to read the root *and* each dependency
|
||||
# spec.
|
||||
|
||||
@@ -2209,6 +2194,7 @@ def dependencies_from_node_dict(node):
|
||||
@staticmethod
|
||||
def read_yaml_dep_specs(deps, hash_type=ht.dag_hash.name):
|
||||
"""Read the DependencySpec portion of a YAML-formatted Spec.
|
||||
|
||||
This needs to be backward-compatible with older spack spec
|
||||
formats so that reindex will work on old specs/databases.
|
||||
"""
|
||||
@@ -2227,13 +2213,17 @@ def read_yaml_dep_specs(deps, hash_type=ht.dag_hash.name):
|
||||
dep_hash, deptypes = elt
|
||||
elif isinstance(elt, dict):
|
||||
# new format: elements of dependency spec are keyed.
|
||||
for h in ht.hashes:
|
||||
if h.name in elt:
|
||||
dep_hash, deptypes = elt[h.name], elt['type']
|
||||
hash_type = h.name
|
||||
for key in (ht.full_hash.name,
|
||||
ht.build_hash.name,
|
||||
ht.dag_hash.name,
|
||||
ht.process_hash.name):
|
||||
if key in elt:
|
||||
dep_hash, deptypes = elt[key], elt['type']
|
||||
hash_type = key
|
||||
break
|
||||
else: # We never determined a hash type...
|
||||
raise spack.error.SpecError("Couldn't parse dependency spec.")
|
||||
raise spack.error.SpecError(
|
||||
"Couldn't parse dependency spec.")
|
||||
else:
|
||||
raise spack.error.SpecError(
|
||||
"Couldn't parse dependency types in spec.")
|
||||
@@ -2665,11 +2655,11 @@ def _old_concretize(self, tests=False, deprecation_warning=True):
|
||||
import spack.concretize
|
||||
|
||||
# Add a warning message to inform users that the original concretizer
|
||||
# will be removed
|
||||
# will be removed in v0.18.0
|
||||
if deprecation_warning:
|
||||
msg = ('the original concretizer is currently being used.\n\tUpgrade to '
|
||||
'"clingo" at your earliest convenience. The original concretizer '
|
||||
'will be removed from Spack in a future version.')
|
||||
'will be removed from Spack starting at v0.18.0')
|
||||
warnings.warn(msg)
|
||||
|
||||
if not self.name:
|
||||
@@ -2708,8 +2698,8 @@ def _old_concretize(self, tests=False, deprecation_warning=True):
|
||||
# TODO: or turn external_path into a lazy property
|
||||
Spec.ensure_external_path_if_external(s)
|
||||
|
||||
# assign hashes and mark concrete
|
||||
self._finalize_concretization()
|
||||
# Mark everything in the spec as concrete, as well.
|
||||
self._mark_concrete()
|
||||
|
||||
# If any spec in the DAG is deprecated, throw an error
|
||||
Spec.ensure_no_deprecated(self)
|
||||
@@ -2739,21 +2729,6 @@ def _old_concretize(self, tests=False, deprecation_warning=True):
|
||||
# there are declared inconsistencies)
|
||||
self.architecture.target.optimization_flags(self.compiler)
|
||||
|
||||
def _patches_assigned(self):
|
||||
"""Whether patches have been assigned to this spec by the concretizer."""
|
||||
# FIXME: _patches_in_order_of_appearance is attached after concretization
|
||||
# FIXME: to store the order of patches.
|
||||
# FIXME: Probably needs to be refactored in a cleaner way.
|
||||
if "patches" not in self.variants:
|
||||
return False
|
||||
|
||||
# ensure that patch state is consistent
|
||||
patch_variant = self.variants["patches"]
|
||||
assert hasattr(patch_variant, "_patches_in_order_of_appearance"), \
|
||||
"patches should always be assigned with a patch variant."
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def inject_patches_variant(root):
|
||||
# This dictionary will store object IDs rather than Specs as keys
|
||||
@@ -2838,13 +2813,13 @@ def ensure_external_path_if_external(external_spec):
|
||||
|
||||
@staticmethod
|
||||
def ensure_no_deprecated(root):
|
||||
"""Raise if a deprecated spec is in the dag.
|
||||
"""Raise is a deprecated spec is in the dag.
|
||||
|
||||
Args:
|
||||
root (Spec): root spec to be analyzed
|
||||
|
||||
Raises:
|
||||
SpecDeprecatedError: if any deprecated spec is found
|
||||
SpecDeprecatedError: is any deprecated spec is found
|
||||
"""
|
||||
deprecated = []
|
||||
with spack.store.db.read_transaction():
|
||||
@@ -2888,6 +2863,7 @@ def _new_concretize(self, tests=False):
|
||||
|
||||
concretized = answer[name]
|
||||
self._dup(concretized)
|
||||
self._mark_concrete()
|
||||
|
||||
def concretize(self, tests=False):
|
||||
"""Concretize the current spec.
|
||||
@@ -2904,7 +2880,7 @@ def concretize(self, tests=False):
|
||||
|
||||
def _mark_root_concrete(self, value=True):
|
||||
"""Mark just this spec (not dependencies) concrete."""
|
||||
if (not value) and self.concrete and self.installed:
|
||||
if (not value) and self.concrete and self.package.installed:
|
||||
return
|
||||
self._normal = value
|
||||
self._concrete = value
|
||||
@@ -2918,70 +2894,12 @@ def _mark_concrete(self, value=True):
|
||||
# if set to false, clear out all hashes (set to None or remove attr)
|
||||
# may need to change references to respect None
|
||||
for s in self.traverse():
|
||||
if (not value) and s.concrete and s.installed:
|
||||
if (not value) and s.concrete and s.package.installed:
|
||||
continue
|
||||
elif not value:
|
||||
s.clear_cached_hashes()
|
||||
s._mark_root_concrete(value)
|
||||
|
||||
def _assign_hash(self, hash):
|
||||
"""Compute and cache the provided hash type for this spec and its dependencies.
|
||||
|
||||
Arguments:
|
||||
hash (spack.hash_types.SpecHashDescriptor): the hash to assign to nodes
|
||||
in the spec.
|
||||
|
||||
There are special semantics to consider for `package_hash`.
|
||||
|
||||
This should be called:
|
||||
1. for `package_hash`, immediately after concretization, but *before* marking
|
||||
concrete, and
|
||||
2. for `dag_hash`, immediately after marking concrete.
|
||||
|
||||
`package_hash` is tricky, because we can't call it on *already* concrete specs,
|
||||
but we need to assign it *at concretization time* to just-concretized specs. So,
|
||||
the concretizer must assign the package hash *before* marking their specs
|
||||
concrete (so that the only concrete specs are the ones already marked concrete).
|
||||
|
||||
`dag_hash` is also tricky, since it cannot compute `package_hash()` lazily for
|
||||
the same reason. `package_hash` needs to be assigned *at concretization time*,
|
||||
so, `to_node_dict()` can't just assume that it can compute `package_hash` itself
|
||||
-- it needs to either see or not see a `_package_hash` attribute.
|
||||
|
||||
Rules of thumb for `package_hash`:
|
||||
1. Old-style concrete specs from *before* `dag_hash` included `package_hash`
|
||||
will not have a `_package_hash` attribute at all.
|
||||
2. New-style concrete specs will have a `_package_hash` assigned at
|
||||
concretization time.
|
||||
3. Abstract specs will not have a `_package_hash` attribute at all.
|
||||
|
||||
"""
|
||||
for spec in self.traverse():
|
||||
# Already concrete specs either already have a package hash (new dag_hash())
|
||||
# or they never will b/c we can't know it (old dag_hash()). Skip them.
|
||||
if hash is ht.package_hash and not spec.concrete:
|
||||
spec._cached_hash(hash, force=True)
|
||||
|
||||
# keep this check here to ensure package hash is saved
|
||||
assert getattr(spec, hash.attr)
|
||||
else:
|
||||
spec._cached_hash(hash)
|
||||
|
||||
def _finalize_concretization(self):
|
||||
"""Assign hashes to this spec, and mark it concrete.
|
||||
|
||||
This is called at the end of concretization.
|
||||
"""
|
||||
# See docs for in _assign_hash for why package_hash needs to happen here.
|
||||
self._assign_hash(ht.package_hash)
|
||||
|
||||
# Mark everything in the spec as concrete
|
||||
self._mark_concrete()
|
||||
|
||||
# Assign dag_hash (this *could* be done lazily, but it's assigned anyway in
|
||||
# ensure_no_deprecated, and it's clearer to see explicitly where it happens)
|
||||
self._assign_hash(ht.dag_hash)
|
||||
|
||||
def concretized(self, tests=False):
|
||||
"""This is a non-destructive version of concretize().
|
||||
|
||||
@@ -2993,7 +2911,7 @@ def concretized(self, tests=False):
|
||||
if a list of names activate them for the packages in the list,
|
||||
if True activate 'test' dependencies for all packages.
|
||||
"""
|
||||
clone = self.copy()
|
||||
clone = self.copy(caches=True)
|
||||
clone.concretize(tests=tests)
|
||||
return clone
|
||||
|
||||
@@ -3241,7 +3159,7 @@ def _normalize_helper(self, visited, spec_deps, provider_index, tests):
|
||||
# Avoid recursively adding constraints for already-installed packages:
|
||||
# these may include build dependencies which are not needed for this
|
||||
# install (since this package is already installed).
|
||||
if self.concrete and self.installed:
|
||||
if self.concrete and self.package.installed:
|
||||
return False
|
||||
|
||||
# Combine constraints from package deps with constraints from
|
||||
@@ -3292,8 +3210,8 @@ def normalize(self, force=False, tests=False, user_spec_deps=None):
|
||||
"Attempting to normalize anonymous spec")
|
||||
|
||||
# Set _normal and _concrete to False when forced
|
||||
if force and not self._concrete:
|
||||
self._normal = False
|
||||
if force:
|
||||
self._mark_concrete(False)
|
||||
|
||||
if self._normal:
|
||||
return False
|
||||
@@ -3733,6 +3651,7 @@ def virtual_dependencies(self):
|
||||
return [spec for spec in self.traverse() if spec.virtual]
|
||||
|
||||
@property # type: ignore[misc] # decorated prop not supported in mypy
|
||||
@lang.memoized
|
||||
def patches(self):
|
||||
"""Return patch objects for any patch sha256 sums on this Spec.
|
||||
|
||||
@@ -3742,19 +3661,26 @@ def patches(self):
|
||||
TODO: this only checks in the package; it doesn't resurrect old
|
||||
patches from install directories, but it probably should.
|
||||
"""
|
||||
if not hasattr(self, "_patches"):
|
||||
self._patches = []
|
||||
if not self.concrete:
|
||||
raise spack.error.SpecError("Spec is not concrete: " + str(self))
|
||||
|
||||
# translate patch sha256sums to patch objects by consulting the index
|
||||
if self._patches_assigned():
|
||||
for sha256 in self.variants["patches"]._patches_in_order_of_appearance:
|
||||
index = spack.repo.path.patch_index
|
||||
patch = index.patch_for_package(sha256, self.package)
|
||||
self._patches.append(patch)
|
||||
if 'patches' not in self.variants:
|
||||
return []
|
||||
|
||||
return self._patches
|
||||
# FIXME: _patches_in_order_of_appearance is attached after
|
||||
# FIXME: concretization to store the order of patches somewhere.
|
||||
# FIXME: Needs to be refactored in a cleaner way.
|
||||
|
||||
def _dup(self, other, deps=True, cleardeps=True):
|
||||
# translate patch sha256sums to patch objects by consulting the index
|
||||
patches = []
|
||||
for sha256 in self.variants['patches']._patches_in_order_of_appearance:
|
||||
index = spack.repo.path.patch_index
|
||||
patch = index.patch_for_package(sha256, self.package)
|
||||
patches.append(patch)
|
||||
|
||||
return patches
|
||||
|
||||
def _dup(self, other, deps=True, cleardeps=True, caches=None):
|
||||
"""Copy the spec other into self. This is an overwriting
|
||||
copy. It does not copy any dependents (parents), but by default
|
||||
copies dependencies.
|
||||
@@ -3769,6 +3695,10 @@ def _dup(self, other, deps=True, cleardeps=True):
|
||||
cleardeps (bool): if True clears the dependencies of ``self``,
|
||||
before possibly copying the dependencies of ``other`` onto
|
||||
``self``
|
||||
caches (bool or None): preserve cached fields such as
|
||||
``_normal``, ``_hash``, and ``_dunder_hash``. By
|
||||
default this is ``False`` if DAG structure would be
|
||||
changed by the copy, ``True`` if it's an exact copy.
|
||||
|
||||
Returns:
|
||||
True if ``self`` changed because of the copy operation,
|
||||
@@ -3819,6 +3749,12 @@ def _dup(self, other, deps=True, cleardeps=True):
|
||||
self.extra_attributes = other.extra_attributes
|
||||
self.namespace = other.namespace
|
||||
|
||||
# Cached fields are results of expensive operations.
|
||||
# If we preserved the original structure, we can copy them
|
||||
# safely. If not, they need to be recomputed.
|
||||
if caches is None:
|
||||
caches = (deps is True or deps == dp.all_deptypes)
|
||||
|
||||
# If we copy dependencies, preserve DAG structure in the new spec
|
||||
if deps:
|
||||
# If caller restricted deptypes to be copied, adjust that here.
|
||||
@@ -3826,26 +3762,29 @@ def _dup(self, other, deps=True, cleardeps=True):
|
||||
deptypes = dp.all_deptypes
|
||||
if isinstance(deps, (tuple, list)):
|
||||
deptypes = deps
|
||||
self._dup_deps(other, deptypes)
|
||||
self._dup_deps(other, deptypes, caches)
|
||||
|
||||
self._concrete = other._concrete
|
||||
self._hashes_final = other._hashes_final
|
||||
|
||||
if self._concrete:
|
||||
if caches:
|
||||
self._hash = other._hash
|
||||
self._build_hash = other._build_hash
|
||||
self._dunder_hash = other._dunder_hash
|
||||
self._normal = other._normal
|
||||
for h in ht.hashes:
|
||||
setattr(self, h.attr, getattr(other, h.attr, None))
|
||||
self._full_hash = other._full_hash
|
||||
self._package_hash = other._package_hash
|
||||
else:
|
||||
self._hash = None
|
||||
self._build_hash = None
|
||||
self._dunder_hash = None
|
||||
# Note, we could use other._normal if we are copying all deps, but
|
||||
# always set it False here to avoid the complexity of checking
|
||||
self._normal = False
|
||||
for h in ht.hashes:
|
||||
setattr(self, h.attr, None)
|
||||
self._full_hash = None
|
||||
self._package_hash = None
|
||||
|
||||
return changed
|
||||
|
||||
def _dup_deps(self, other, deptypes):
|
||||
def _dup_deps(self, other, deptypes, caches):
|
||||
def spid(spec):
|
||||
return id(spec)
|
||||
|
||||
@@ -3856,11 +3795,11 @@ def spid(spec):
|
||||
|
||||
if spid(edge.parent) not in new_specs:
|
||||
new_specs[spid(edge.parent)] = edge.parent.copy(
|
||||
deps=False
|
||||
deps=False, caches=caches
|
||||
)
|
||||
|
||||
if spid(edge.spec) not in new_specs:
|
||||
new_specs[spid(edge.spec)] = edge.spec.copy(deps=False)
|
||||
new_specs[spid(edge.spec)] = edge.spec.copy(deps=False, caches=caches)
|
||||
|
||||
new_specs[spid(edge.parent)].add_dependency_edge(
|
||||
new_specs[spid(edge.spec)], edge.deptypes
|
||||
@@ -4539,15 +4478,54 @@ def __str__(self):
|
||||
spec_str = " ^".join(d.format() for d in sorted_nodes)
|
||||
return spec_str.strip()
|
||||
|
||||
def install_status(self):
|
||||
install_stati = lang.enum(
|
||||
installed=0,
|
||||
upstream=1,
|
||||
binary=2,
|
||||
missing=3,
|
||||
missing_with_binary=4,
|
||||
unknown=5
|
||||
)
|
||||
install_status_symbols = {
|
||||
install_stati.installed: '@g{[+]}',
|
||||
install_stati.upstream: '@g{[^]}',
|
||||
install_stati.binary: '@K{ . }',
|
||||
install_stati.missing: '@r{[-]}',
|
||||
install_stati.missing_with_binary: '@r{[.]}',
|
||||
install_stati.unknown: '@K{ - }'
|
||||
}
|
||||
|
||||
def install_status(self, binary_status=True):
|
||||
"""Helper for tree to print DB install status."""
|
||||
if not self.concrete:
|
||||
return None
|
||||
try:
|
||||
record = spack.store.db.get_record(self)
|
||||
return record.installed
|
||||
except KeyError:
|
||||
return None
|
||||
return self.install_stati.unknown
|
||||
|
||||
binary = False
|
||||
if binary_status:
|
||||
import spack.binary_distribution as bindist
|
||||
try:
|
||||
binaries = [s.dag_hash()
|
||||
for s in bindist.update_cache_and_get_specs()
|
||||
]
|
||||
binary = self.dag_hash() in binaries
|
||||
except bindist.FetchCacheError:
|
||||
pass
|
||||
|
||||
upstream, record = spack.store.db.query_by_spec_hash(self.dag_hash())
|
||||
if not record and binary:
|
||||
return self.install_stati.binary
|
||||
elif not record:
|
||||
return self.install_stati.unknown
|
||||
elif upstream and record.installed:
|
||||
return self.install_stati.upstream
|
||||
elif record.installed:
|
||||
return self.install_stati.installed
|
||||
elif record and binary:
|
||||
return self.install_stati.missing_with_binary
|
||||
elif record:
|
||||
return self.install_stati.missing
|
||||
else:
|
||||
assert False, "invalid enum value"
|
||||
|
||||
def _installed_explicitly(self):
|
||||
"""Helper for tree to print DB install status."""
|
||||
@@ -4590,14 +4568,8 @@ def tree(self, **kwargs):
|
||||
|
||||
if status_fn:
|
||||
status = status_fn(node)
|
||||
if node.installed_upstream:
|
||||
out += clr.colorize("@g{[^]} ", color=color)
|
||||
elif status is None:
|
||||
out += clr.colorize("@K{ - } ", color=color) # !installed
|
||||
elif status:
|
||||
out += clr.colorize("@g{[+]} ", color=color) # installed
|
||||
else:
|
||||
out += clr.colorize("@r{[-]} ", color=color) # missing
|
||||
out += clr.colorize('%s ' % self.install_status_symbols[status],
|
||||
color=color)
|
||||
|
||||
if hashes:
|
||||
out += clr.colorize(
|
||||
@@ -4738,16 +4710,16 @@ def from_self(name, transitive):
|
||||
return False
|
||||
return True
|
||||
|
||||
self_nodes = dict((s.name, s.copy(deps=False))
|
||||
self_nodes = dict((s.name, s.copy(deps=False, caches=True))
|
||||
for s in self.traverse(root=True)
|
||||
if from_self(s.name, transitive))
|
||||
|
||||
if transitive:
|
||||
other_nodes = dict((s.name, s.copy(deps=False))
|
||||
other_nodes = dict((s.name, s.copy(deps=False, caches=True))
|
||||
for s in other.traverse(root=True))
|
||||
else:
|
||||
# NOTE: Does not fully validate providers; loader races possible
|
||||
other_nodes = dict((s.name, s.copy(deps=False))
|
||||
other_nodes = dict((s.name, s.copy(deps=False, caches=True))
|
||||
for s in other.traverse(root=True)
|
||||
if s is other or s.name not in self)
|
||||
|
||||
@@ -4780,10 +4752,23 @@ def from_self(name, transitive):
|
||||
for dep in ret.traverse(root=True, order='post'):
|
||||
opposite = other_nodes if dep.name in self_nodes else self_nodes
|
||||
if any(name in dep for name in opposite.keys()):
|
||||
# Record whether hashes are already cached
|
||||
# So we don't try to compute a hash from insufficient
|
||||
# provenance later
|
||||
has_build_hash = getattr(dep, ht.build_hash.name, None)
|
||||
has_full_hash = getattr(dep, ht.full_hash.name, None)
|
||||
|
||||
# package hash cannot be affected by splice
|
||||
dep.clear_cached_hashes(ignore=['package_hash'])
|
||||
|
||||
dep.dag_hash()
|
||||
# Since this is a concrete spec, we want to make sure hashes
|
||||
# are cached writing specs only writes cached hashes in case
|
||||
# the spec is too old to have full provenance for these hashes,
|
||||
# so we can't rely on doing it at write time.
|
||||
if has_build_hash:
|
||||
_ = dep.build_hash()
|
||||
if has_full_hash:
|
||||
_ = dep.full_hash()
|
||||
|
||||
return nodes[self.name]
|
||||
|
||||
@@ -4795,21 +4780,16 @@ def clear_cached_hashes(self, ignore=()):
|
||||
if h.attr not in ignore:
|
||||
if hasattr(self, h.attr):
|
||||
setattr(self, h.attr, None)
|
||||
self._dunder_hash = None
|
||||
|
||||
def __hash__(self):
|
||||
# If the spec is concrete, we leverage the process hash and just use
|
||||
# a 64-bit prefix of it. The process hash has the advantage that it's
|
||||
# computed once per concrete spec, and it's saved -- so if we read
|
||||
# concrete specs we don't need to recompute the whole hash. This is
|
||||
# good for large, unchanging specs.
|
||||
#
|
||||
# We use the process hash instead of the DAG hash here because the DAG
|
||||
# hash includes the package hash, which can cause infinite recursion,
|
||||
# and which isn't defined unless the spec has a known package.
|
||||
# If the spec is concrete, we leverage the DAG hash and just use
|
||||
# a 64-bit prefix of it. The DAG hash has the advantage that it's
|
||||
# computed once per concrete spec, and it's saved -- so if we
|
||||
# read concrete specs we don't need to recompute the whole hash.
|
||||
# This is good for large, unchanging specs.
|
||||
if self.concrete:
|
||||
if not self._dunder_hash:
|
||||
self._dunder_hash = self.process_hash_bit_prefix(64)
|
||||
self._dunder_hash = self.dag_hash_bit_prefix(64)
|
||||
return self._dunder_hash
|
||||
|
||||
# This is the normal hash for lazy_lexicographic_ordering. It's
|
||||
@@ -4874,7 +4854,7 @@ def _spec_from_old_dict(data):
|
||||
if 'dependencies' not in node[name]:
|
||||
continue
|
||||
|
||||
for dname, _, dtypes, _ in Spec.dependencies_from_node_dict(node):
|
||||
for dname, dhash, dtypes, _ in Spec.dependencies_from_node_dict(node):
|
||||
deps[name]._add_dependency(deps[dname], dtypes)
|
||||
|
||||
return spec
|
||||
@@ -4908,7 +4888,7 @@ def _spec_from_dict(data):
|
||||
break
|
||||
|
||||
if not any_deps: # If we never see a dependency...
|
||||
hash_type = ht.dag_hash.name
|
||||
hash_type = ht.dag_hash.name # use the full_hash provenance
|
||||
elif not hash_type: # Seen a dependency, still don't know hash_type
|
||||
raise spack.error.SpecError("Spec dictionary contains malformed "
|
||||
"dependencies. Old format?")
|
||||
@@ -4918,7 +4898,10 @@ def _spec_from_dict(data):
|
||||
|
||||
# Pass 1: Create a single lookup dictionary by hash
|
||||
for i, node in enumerate(nodes):
|
||||
node_hash = node[hash_type]
|
||||
if 'build_spec' in node.keys():
|
||||
node_hash = node[hash_type]
|
||||
else:
|
||||
node_hash = node[hash_type]
|
||||
node_spec = Spec.from_node_dict(node)
|
||||
hash_dict[node_hash] = node
|
||||
hash_dict[node_hash]['node_spec'] = node_spec
|
||||
@@ -5103,7 +5086,7 @@ def do_parse(self):
|
||||
|
||||
# Raise an error if the previous spec is already
|
||||
# concrete (assigned by hash)
|
||||
if specs[-1].concrete:
|
||||
if specs[-1]._hash:
|
||||
raise RedundantSpecError(specs[-1], 'dependency')
|
||||
# command line deps get empty deptypes now.
|
||||
# Real deptypes are assigned later per packages.
|
||||
@@ -5113,8 +5096,9 @@ def do_parse(self):
|
||||
# If the next token can be part of a valid anonymous spec,
|
||||
# create the anonymous spec
|
||||
if self.next.type in (AT, ON, OFF, PCT):
|
||||
# Raise an error if the previous spec is already concrete
|
||||
if specs and specs[-1].concrete:
|
||||
# Raise an error if the previous spec is already
|
||||
# concrete (assigned by hash)
|
||||
if specs and specs[-1]._hash:
|
||||
raise RedundantSpecError(specs[-1],
|
||||
'compiler, version, '
|
||||
'or variant')
|
||||
@@ -5184,15 +5168,10 @@ def parse_compiler(self, text):
|
||||
return self.compiler()
|
||||
|
||||
def spec_by_hash(self):
|
||||
# TODO: Remove parser dependency on active environment and database.
|
||||
import spack.environment
|
||||
self.expect(ID)
|
||||
|
||||
dag_hash = self.token.value
|
||||
matches = []
|
||||
if spack.environment.active_environment():
|
||||
matches = spack.environment.active_environment().get_by_hash(dag_hash)
|
||||
if not matches:
|
||||
matches = spack.store.db.get_by_hash(dag_hash)
|
||||
matches = spack.store.db.get_by_hash(dag_hash)
|
||||
if not matches:
|
||||
raise NoSuchHashError(dag_hash)
|
||||
|
||||
@@ -5376,7 +5355,7 @@ def save_dependency_specfiles(
|
||||
json_path = os.path.join(output_directory, '{0}.json'.format(dep_name))
|
||||
|
||||
with open(json_path, 'w') as fd:
|
||||
fd.write(dep_spec.to_json(hash=ht.dag_hash))
|
||||
fd.write(dep_spec.to_json(hash=ht.build_hash))
|
||||
|
||||
|
||||
class SpecParseError(spack.error.SpecError):
|
||||
@@ -5386,16 +5365,6 @@ def __init__(self, parse_error):
|
||||
self.string = parse_error.string
|
||||
self.pos = parse_error.pos
|
||||
|
||||
@property
|
||||
def long_message(self):
|
||||
return "\n".join(
|
||||
[
|
||||
" Encountered when parsing spec:",
|
||||
" %s" % self.string,
|
||||
" %s^" % (" " * self.pos),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class DuplicateDependencyError(spack.error.SpecError):
|
||||
"""Raised when the same dependency occurs in a spec twice."""
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
(['wrong-variant-in-depends-on'], 'PKG-DIRECTIVES'),
|
||||
# This package has a GitHub patch URL without full_index=1
|
||||
(['invalid-github-patch-url'], 'PKG-DIRECTIVES'),
|
||||
# This package has a stand-alone 'test' method in build-time callbacks
|
||||
(['test-build-callbacks'], 'PKG-DIRECTIVES'),
|
||||
# This package has no issues
|
||||
(['mpileaks'], None),
|
||||
# This package has a conflict with a trigger which cannot constrain the constraint
|
||||
|
||||
@@ -11,12 +11,15 @@
|
||||
import py
|
||||
import pytest
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.config
|
||||
import spack.hooks.sbang as sbang
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.repo
|
||||
import spack.spec as spec
|
||||
import spack.store
|
||||
import spack.util.gpg
|
||||
import spack.util.web as web_util
|
||||
@@ -78,13 +81,13 @@ def config_directory(tmpdir_factory):
|
||||
tmpdir = tmpdir_factory.mktemp('test_configs')
|
||||
# restore some sane defaults for packages and config
|
||||
config_path = py.path.local(spack.paths.etc_path)
|
||||
modules_yaml = config_path.join('defaults', 'modules.yaml')
|
||||
os_modules_yaml = config_path.join('defaults', '%s' %
|
||||
modules_yaml = config_path.join('spack', 'defaults', 'modules.yaml')
|
||||
os_modules_yaml = config_path.join('spack', 'defaults', '%s' %
|
||||
platform.system().lower(),
|
||||
'modules.yaml')
|
||||
packages_yaml = config_path.join('defaults', 'packages.yaml')
|
||||
config_yaml = config_path.join('defaults', 'config.yaml')
|
||||
repos_yaml = config_path.join('defaults', 'repos.yaml')
|
||||
packages_yaml = config_path.join('spack', 'defaults', 'packages.yaml')
|
||||
config_yaml = config_path.join('spack', 'defaults', 'config.yaml')
|
||||
repos_yaml = config_path.join('spack', 'defaults', 'repos.yaml')
|
||||
tmpdir.ensure('site', dir=True)
|
||||
tmpdir.ensure('user', dir=True)
|
||||
tmpdir.ensure('site/%s' % platform.system().lower(), dir=True)
|
||||
@@ -391,12 +394,31 @@ def test_built_spec_cache(mirror_dir):
|
||||
|
||||
gspec, cspec = Spec('garply').concretized(), Spec('corge').concretized()
|
||||
|
||||
for s in [gspec, cspec]:
|
||||
results = bindist.get_mirrors_for_spec(s)
|
||||
assert(any([r['spec'] == s for r in results]))
|
||||
full_hash_map = {
|
||||
'garply': gspec.full_hash(),
|
||||
'corge': cspec.full_hash(),
|
||||
}
|
||||
|
||||
gspec_results = bindist.get_mirrors_for_spec(gspec)
|
||||
|
||||
gspec_mirrors = {}
|
||||
for result in gspec_results:
|
||||
s = result['spec']
|
||||
assert(s._full_hash == full_hash_map[s.name])
|
||||
assert(result['mirror_url'] not in gspec_mirrors)
|
||||
gspec_mirrors[result['mirror_url']] = True
|
||||
|
||||
cspec_results = bindist.get_mirrors_for_spec(cspec, full_hash_match=True)
|
||||
|
||||
cspec_mirrors = {}
|
||||
for result in cspec_results:
|
||||
s = result['spec']
|
||||
assert(s._full_hash == full_hash_map[s.name])
|
||||
assert(result['mirror_url'] not in cspec_mirrors)
|
||||
cspec_mirrors[result['mirror_url']] = True
|
||||
|
||||
|
||||
def fake_dag_hash(spec):
|
||||
def fake_full_hash(spec):
|
||||
# Generate an arbitrary hash that is intended to be different than
|
||||
# whatever a Spec reported before (to test actions that trigger when
|
||||
# the hash changes)
|
||||
@@ -408,7 +430,7 @@ def fake_dag_hash(spec):
|
||||
'test_mirror'
|
||||
)
|
||||
def test_spec_needs_rebuild(monkeypatch, tmpdir):
|
||||
"""Make sure needs_rebuild properly compares remote hash
|
||||
"""Make sure needs_rebuild properly compares remote full_hash
|
||||
against locally computed one, avoiding unnecessary rebuilds"""
|
||||
|
||||
# Create a temp mirror directory for buildcache usage
|
||||
@@ -423,14 +445,14 @@ def test_spec_needs_rebuild(monkeypatch, tmpdir):
|
||||
# Put installed package in the buildcache
|
||||
buildcache_cmd('create', '-u', '-a', '-d', mirror_dir.strpath, s.name)
|
||||
|
||||
rebuild = bindist.needs_rebuild(s, mirror_url)
|
||||
rebuild = bindist.needs_rebuild(s, mirror_url, rebuild_on_errors=True)
|
||||
|
||||
assert not rebuild
|
||||
|
||||
# Now monkey patch Spec to change the hash on the package
|
||||
monkeypatch.setattr(spack.spec.Spec, 'dag_hash', fake_dag_hash)
|
||||
# Now monkey patch Spec to change the full hash on the package
|
||||
monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash)
|
||||
|
||||
rebuild = bindist.needs_rebuild(s, mirror_url)
|
||||
rebuild = bindist.needs_rebuild(s, mirror_url, rebuild_on_errors=True)
|
||||
|
||||
assert rebuild
|
||||
|
||||
@@ -602,6 +624,57 @@ def test_install_legacy_yaml(test_legacy_mirror, install_mockery_mutable_config,
|
||||
uninstall_cmd('-y', '/t5mczux3tfqpxwmg7egp7axy2jvyulqk')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(
|
||||
'install_mockery_mutable_config', 'mock_packages', 'mock_fetch',
|
||||
)
|
||||
def test_update_index_fix_deps(monkeypatch, tmpdir, mutable_config):
|
||||
"""Ensure spack buildcache update-index properly fixes up spec descriptor
|
||||
files on the mirror when updating the buildcache index."""
|
||||
|
||||
# Create a temp mirror directory for buildcache usage
|
||||
mirror_dir = tmpdir.join('mirror_dir')
|
||||
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
|
||||
spack.config.set('mirrors', {'test': mirror_url})
|
||||
|
||||
a = Spec('a').concretized()
|
||||
b = Spec('b').concretized()
|
||||
new_b_full_hash = 'abcdef'
|
||||
|
||||
# Install package a with dep b
|
||||
install_cmd('--no-cache', a.name)
|
||||
|
||||
# Create a buildcache for a and its dep b, and update index
|
||||
buildcache_cmd('create', '-uad', mirror_dir.strpath, a.name)
|
||||
buildcache_cmd('update-index', '-d', mirror_dir.strpath)
|
||||
|
||||
# Simulate an update to b that only affects full hash by simply overwriting
|
||||
# the full hash in the spec.json file on the mirror
|
||||
b_spec_json_name = bindist.tarball_name(b, '.spec.json')
|
||||
b_spec_json_path = os.path.join(mirror_dir.strpath,
|
||||
bindist.build_cache_relative_path(),
|
||||
b_spec_json_name)
|
||||
fs.filter_file(r'"full_hash":\s"\S+"',
|
||||
'"full_hash": "{0}"'.format(new_b_full_hash),
|
||||
b_spec_json_path)
|
||||
# When we update the index, spack should notice that a's notion of the
|
||||
# full hash of b doesn't match b's notion of it's own full hash, and as
|
||||
# a result, spack should fix the spec.json for a
|
||||
buildcache_cmd('update-index', '-d', mirror_dir.strpath)
|
||||
|
||||
# Read in the concrete spec json of a
|
||||
a_spec_json_name = bindist.tarball_name(a, '.spec.json')
|
||||
a_spec_json_path = os.path.join(mirror_dir.strpath,
|
||||
bindist.build_cache_relative_path(),
|
||||
a_spec_json_name)
|
||||
|
||||
# Turn concrete spec json into a concrete spec (a)
|
||||
with open(a_spec_json_path) as fd:
|
||||
a_prime = spec.Spec.from_json(fd.read())
|
||||
|
||||
# Make sure the full hash of b in a's spec json matches the new value
|
||||
assert(a_prime[b.name].full_hash() == new_b_full_hash)
|
||||
|
||||
|
||||
def test_FetchCacheError_only_accepts_lists_of_errors():
|
||||
with pytest.raises(TypeError, match="list"):
|
||||
bindist.FetchCacheError("error")
|
||||
|
||||
@@ -62,22 +62,6 @@ def test_raising_exception_if_bootstrap_disabled(mutable_config):
|
||||
spack.bootstrap.store_path()
|
||||
|
||||
|
||||
def test_raising_exception_module_importable():
|
||||
with pytest.raises(
|
||||
ImportError,
|
||||
match='cannot bootstrap the "asdf" Python module',
|
||||
):
|
||||
spack.bootstrap.ensure_module_importable_or_raise("asdf")
|
||||
|
||||
|
||||
def test_raising_exception_executables_in_path():
|
||||
with pytest.raises(
|
||||
RuntimeError,
|
||||
match="cannot bootstrap any of the asdf, fdsa executables",
|
||||
):
|
||||
spack.bootstrap.ensure_executables_in_path_or_raise(["asdf", "fdsa"], "python")
|
||||
|
||||
|
||||
@pytest.mark.regression('25603')
|
||||
def test_bootstrap_deactivates_environments(active_mock_environment):
|
||||
assert spack.environment.active_environment() == active_mock_environment
|
||||
|
||||
@@ -95,25 +95,6 @@ def _ensure(env_mods):
|
||||
return _ensure
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_module_cmd(monkeypatch):
|
||||
|
||||
class Logger(object):
|
||||
def __init__(self, fn=None):
|
||||
self.fn = fn
|
||||
self.calls = []
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.calls.append((args, kwargs))
|
||||
if self.fn:
|
||||
return self.fn(*args, **kwargs)
|
||||
|
||||
mock_module_cmd = Logger()
|
||||
monkeypatch.setattr(spack.build_environment, 'module', mock_module_cmd)
|
||||
monkeypatch.setattr(spack.build_environment, '_on_cray', lambda: (True, None))
|
||||
return mock_module_cmd
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == 'win32',
|
||||
reason="Static to Shared not supported on Win (yet)")
|
||||
def test_static_to_shared_library(build_environment):
|
||||
@@ -452,23 +433,3 @@ def test_build_jobs_defaults():
|
||||
parallel=True, command_line=None, config_default=1, max_cpus=10) == 1
|
||||
assert determine_number_of_jobs(
|
||||
parallel=True, command_line=None, config_default=100, max_cpus=10) == 10
|
||||
|
||||
|
||||
def test_dirty_disable_module_unload(
|
||||
config, mock_packages, working_env, mock_module_cmd
|
||||
):
|
||||
"""Test that on CRAY platform 'module unload' is not called if the 'dirty'
|
||||
option is on.
|
||||
"""
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
|
||||
# If called with "dirty" we don't unload modules, so no calls to the
|
||||
# `module` function on Cray
|
||||
spack.build_environment.setup_package(s.package, dirty=True)
|
||||
assert not mock_module_cmd.calls
|
||||
|
||||
# If called without "dirty" we unload modules on Cray
|
||||
spack.build_environment.setup_package(s.package, dirty=False)
|
||||
assert mock_module_cmd.calls
|
||||
assert any(('unload', 'cray-libsci') == item[0] for item in mock_module_cmd.calls)
|
||||
assert any(('unload', 'cray-mpich') == item[0] for item in mock_module_cmd.calls)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user