Compare commits

..

1 Commits

Author SHA1 Message Date
Harmen Stoppels
e8f03fa9dd modules/common.py: format_path instead of format 2024-10-21 09:44:12 +02:00
1149 changed files with 5484 additions and 14889 deletions

View File

@@ -28,8 +28,8 @@ jobs:
run:
shell: ${{ matrix.system.shell }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -e
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable github-actions-v0.5
$PYTHON bin/spack bootstrap disable github-actions-v0.4
$PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack $SPACK_FLAGS solve zlib
tree $BOOTSTRAP/store

View File

@@ -37,14 +37,14 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
@@ -60,17 +60,17 @@ jobs:
run: |
brew install cmake bison tree
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: "3.12"
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find --not-buildable cmake bison
spack -d solve zlib
tree $HOME/.spack/bootstrap/store/
@@ -83,22 +83,22 @@ jobs:
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: brew install tree gawk
- name: Remove system executables
run: |
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
sudo rm $(command -v gpg gpg2 patchelf)
done
brew install tree gawk
sudo rm -rf $(command -v gpg gpg2)
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: sudo rm -rf $(command -v gpg gpg2 patchelf)
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
@@ -110,17 +110,19 @@ jobs:
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: brew install tree
- name: Remove system executables
run: |
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
sudo rm $(command -v gpg gpg2 patchelf)
done
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: |
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: |
3.8
@@ -128,16 +130,15 @@ jobs:
3.10
3.11
3.12
3.13
- name: Set bootstrap sources
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
- name: Bootstrap clingo
run: |
set -e
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' '3.13'; do
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
if [[ -d "$ver_dir" ]] ; then
@@ -171,10 +172,10 @@ jobs:
runs-on: "windows-latest"
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: "3.12"
- name: Setup Windows
@@ -184,8 +185,8 @@ jobs:
- name: Bootstrap clingo
run: |
./share/spack/setup-env.ps1
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find --not-buildable cmake bison
spack -d solve zlib
./share/spack/qa/validate_last_exit.ps1

View File

@@ -55,7 +55,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta

View File

@@ -24,7 +24,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
@@ -83,17 +83,10 @@ jobs:
all-prechecks:
needs: [ prechecks ]
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Success
run: |
if [ "${{ needs.prechecks.result }}" == "failure" ] || [ "${{ needs.prechecks.result }}" == "canceled" ]; then
echo "Unit tests failed."
exit 1
else
exit 0
fi
run: "true"
coverage:
needs: [ unit-tests, prechecks ]
@@ -101,19 +94,8 @@ jobs:
secrets: inherit
all:
needs: [ unit-tests, coverage, bootstrap ]
if: ${{ always() }}
needs: [ coverage, bootstrap ]
runs-on: ubuntu-latest
# See https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#needs-context
steps:
- name: Status summary
run: |
if [ "${{ needs.unit-tests.result }}" == "failure" ] || [ "${{ needs.unit-tests.result }}" == "canceled" ]; then
echo "Unit tests failed."
exit 1
elif [ "${{ needs.bootstrap.result }}" == "failure" ] || [ "${{ needs.bootstrap.result }}" == "canceled" ]; then
echo "Bootstrap tests failed."
exit 1
else
exit 0
fi
- name: Success
run: "true"

View File

@@ -8,8 +8,8 @@ jobs:
upload:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: '3.11'
cache: 'pip'

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,7 +1,7 @@
black==24.10.0
black==24.8.0
clingo==5.7.1
flake8==7.1.1
isort==5.13.2
mypy==1.8.0
types-six==1.16.21.20241105
types-six==1.16.21.20240513
vermin==1.6.0

View File

@@ -40,10 +40,10 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -89,10 +89,10 @@ jobs:
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: '3.11'
- name: Install System packages
@@ -130,7 +130,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- name: Setup repo and non-root user
run: |
git --version
@@ -149,10 +149,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: '3.13'
- name: Install System packages
@@ -170,11 +170,11 @@ jobs:
run: |
. share/spack/setup-env.sh
spack bootstrap disable spack-install
spack bootstrap disable github-actions-v0.4
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.6
spack bootstrap status
spack solve zlib
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretization/core.py
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretize.py
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
with:
name: coverage-clingo-cffi
@@ -188,10 +188,10 @@ jobs:
os: [macos-13, macos-14]
python-version: ["3.11"]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -226,10 +226,10 @@ jobs:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
runs-on: windows-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -18,8 +18,8 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: '3.11'
cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
with:
python-version: '3.11'
cache: 'pip'
@@ -70,7 +70,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
- name: Setup repo and non-root user
run: |
git --version
@@ -98,14 +98,14 @@ jobs:
# PR: use the base of the PR as the old commit
- name: Checkout PR base commit
if: github.event_name == 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
ref: ${{ github.event.pull_request.base.sha }}
path: old
# not a PR: use the previous commit as the old commit
- name: Checkout previous commit
if: github.event_name != 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
fetch-depth: 2
path: old
@@ -114,14 +114,14 @@ jobs:
run: git -C old reset --hard HEAD^
- name: Checkout new commit
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
path: new
- name: Install circular import checker
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
with:
repository: haampie/circular-import-fighter
ref: 9f60f51bc7134e0be73f27623f1b0357d1718427
ref: 555519c6fd5564fd2eb844e7b87e84f4d12602e2
path: circular-import-fighter
- name: Install dependencies
working-directory: circular-import-fighter

View File

@@ -14,26 +14,3 @@ sphinx:
python:
install:
- requirements: lib/spack/docs/requirements.txt
search:
ranking:
spack.html: -10
spack.*.html: -10
llnl.html: -10
llnl.*.html: -10
_modules/*: -10
command_index.html: -9
basic_usage.html: 5
configuration.html: 5
config_yaml.html: 5
packages_yaml.html: 5
build_settings.html: 5
environments.html: 5
containers.html: 5
mirrors.html: 5
module_file_support.html: 5
repositories.html: 5
binary_caches.html: 5
chain.html: 5
pipelines.html: 5
packaging_guide.html: 5

View File

@@ -1,11 +1,71 @@
@ECHO OFF
setlocal EnableDelayedExpansion
:: (c) 2021 Lawrence Livermore National Laboratory
:: To use this file independently of Spack's installer, execute this script in its directory, or add the
:: associated bin directory to your PATH. Invoke to launch Spack Shell.
::
:: source_dir/spack/bin/spack_cmd.bat
::
pushd %~dp0..
set SPACK_ROOT=%CD%
pushd %CD%\..
set spackinstdir=%CD%
popd
call "%~dp0..\share\spack\setup-env.bat"
pushd %SPACK_ROOT%
%comspec% /K
:: Check if Python is on the PATH
if not defined python_pf_ver (
(for /f "delims=" %%F in ('where python.exe') do (
set "python_pf_ver=%%F"
goto :found_python
) ) 2> NUL
)
:found_python
if not defined python_pf_ver (
:: If not, look for Python from the Spack installer
:get_builtin
(for /f "tokens=*" %%g in ('dir /b /a:d "!spackinstdir!\Python*"') do (
set "python_ver=%%g")) 2> NUL
if not defined python_ver (
echo Python was not found on your system.
echo Please install Python or add Python to your PATH.
) else (
set "py_path=!spackinstdir!\!python_ver!"
set "py_exe=!py_path!\python.exe"
)
goto :exitpoint
) else (
:: Python is already on the path
set "py_exe=!python_pf_ver!"
(for /F "tokens=* USEBACKQ" %%F in (
`"!py_exe!" --version`) do (set "output=%%F")) 2>NUL
if not "!output:Microsoft Store=!"=="!output!" goto :get_builtin
goto :exitpoint
)
:exitpoint
set "PATH=%SPACK_ROOT%\bin\;%PATH%"
if defined py_path (
set "PATH=%py_path%;%PATH%"
)
if defined py_exe (
"%py_exe%" "%SPACK_ROOT%\bin\haspywin.py"
)
set "EDITOR=notepad"
DOSKEY spacktivate=spack env activate $*
@echo **********************************************************************
@echo ** Spack Package Manager
@echo **********************************************************************
IF "%1"=="" GOTO CONTINUE
set
GOTO:EOF
:continue
set PROMPT=[spack] %PROMPT%
%comspec% /k

View File

@@ -9,15 +9,15 @@ bootstrap:
# may not be able to bootstrap all the software that Spack needs,
# depending on its type.
sources:
- name: github-actions-v0.6
metadata: $spack/share/spack/bootstrap/github-actions-v0.6
- name: github-actions-v0.5
- name: 'github-actions-v0.5'
metadata: $spack/share/spack/bootstrap/github-actions-v0.5
- name: spack-install
- name: 'github-actions-v0.4'
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow
github-actions-v0.6: true
github-actions-v0.5: true
github-actions-v0.4: true
spack-install: true

View File

@@ -39,19 +39,11 @@ concretizer:
# Option to deal with possible duplicate nodes (i.e. different nodes from the same package) in the DAG.
duplicates:
# "none": allows a single node for any package in the DAG.
# "minimal": allows the duplication of 'build-tools' nodes only
# (e.g. py-setuptools, cmake etc.)
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal
# Option to specify compatibility between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {}
# Option to specify whether to support splicing. Splicing allows for
# the relinking of concrete package dependencies in order to better
# reuse already built packages with ABI compatible dependencies
splice:
explicit: []
automatic: false

View File

@@ -40,9 +40,9 @@ packages:
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libc: [glibc, musl]
libgfortran: [gcc-runtime]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx]
libifcore: [intel-oneapi-runtime]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit]

View File

@@ -1359,10 +1359,6 @@ For example, for the ``stackstart`` variant:
mpileaks stackstart==4 # variant will be propagated to dependencies
mpileaks stackstart=4 # only mpileaks will have this variant value
Spack also allows variants to be propagated from a package that does
not have that variant.
^^^^^^^^^^^^^^
Compiler Flags
^^^^^^^^^^^^^^

View File

@@ -237,35 +237,3 @@ is optional -- by default, splices will be transitive.
``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack
will warn the user in this case, but will not fail the
concretization.
.. _automatic_splicing:
^^^^^^^^^^^^^^^^^^
Automatic Splicing
^^^^^^^^^^^^^^^^^^
The Spack solver can be configured to do automatic splicing for
ABI-compatible packages. Automatic splices are enabled in the concretizer
config section
.. code-block:: yaml
concretizer:
splice:
automatic: True
Packages can include ABI-compatibility information using the
``can_splice`` directive. See :ref:`the packaging
guide<abi_compatibility>` for instructions on specifying ABI
compatibility using the ``can_splice`` directive.
.. note::
The ``can_splice`` directive is experimental and may be changed in
future versions.
When automatic splicing is enabled, the concretizer will combine any
number of ABI-compatible specs if possible to reuse installed packages
and packages available from binary caches. The end result of these
specs is equivalent to a series of transitive/intransitive splices,
but the series may be non-obvious.

View File

@@ -214,14 +214,12 @@ def setup(sphinx):
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.spec.ArchSpec"),
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
("py:class", "spack.traverse.EdgeAndDepth"),
("py:class", "archspec.cpu.microarchitecture.Microarchitecture"),
("py:class", "spack.compiler.CompilerCache"),
# TypeVar that is not handled correctly
("py:class", "llnl.util.lang.T"),
]

View File

@@ -511,7 +511,6 @@ Spack understands over a dozen special variables. These are:
* ``$target_family``. The target family for the current host, as
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
* ``$date``: the current date in the format YYYY-MM-DD
* ``$spack_short_version``: the Spack version truncated to the first components.
Note that, as with shell variables, you can write these as ``$varname``

View File

@@ -184,7 +184,7 @@ Style Tests
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
`mypy <https://mypy.readthedocs.io/en/stable/>`_ for type checking. PEP 8 is
`mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
a series of style guides for Python that provide suggestions for everything
from variable naming to indentation. In order to limit the number of PRs that
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR

View File

@@ -333,9 +333,13 @@ inserting them at different places in the spack code base. Whenever a hook
type triggers by way of a function call, we find all the hooks of that type,
and run them.
Spack defines hooks by way of a module in the ``lib/spack/spack/hooks`` directory.
This module has to be registered in ``__init__.py`` so that Spack is aware of it.
This section will cover the basic kind of hooks, and how to write them.
Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
types of hooks in the ``__init__.py``, and then python files in that folder
can use hook functions. The files are automatically parsed, so if you write
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
you can then write hook functions in that file that will be automatically detected,
and run whenever your hook is called. This section will cover the basic kind
of hooks, and how to write them.
^^^^^^^^^^^^^^
Types of Hooks

View File

@@ -35,7 +35,7 @@ A build matrix showing which packages are working on which systems is shown belo
.. code-block:: console
apt update
apt install bzip2 ca-certificates file g++ gcc gfortran git gzip lsb-release patch python3 tar unzip xz-utils zstd
apt install build-essential ca-certificates coreutils curl environment-modules gfortran git gpg lsb-release python3 python3-distutils python3-venv unzip zip
.. tab-item:: RHEL
@@ -43,14 +43,14 @@ A build matrix showing which packages are working on which systems is shown belo
dnf install epel-release
dnf group install "Development Tools"
dnf install gcc-gfortran redhat-lsb-core python3 unzip
dnf install curl findutils gcc-gfortran gnupg2 hostname iproute redhat-lsb-core python3 python3-pip python3-setuptools unzip python3-boto3
.. tab-item:: macOS Brew
.. code-block:: console
brew update
brew install gcc git zip
brew install curl gcc git gnupg zip
------------
Installation

View File

@@ -12,6 +12,10 @@
Spack
===================
.. epigraph::
`These are docs for the Spack package manager. For sphere packing, see` `pyspack <https://pyspack.readthedocs.io>`_.
Spack is a package management tool designed to support multiple
versions and configurations of software on a wide variety of platforms
and environments. It was designed for large supercomputing centers,

View File

@@ -1267,7 +1267,7 @@ Git fetching supports the following parameters to ``version``:
This feature requires ``git`` to be version ``2.25.0`` or later but is useful for
large repositories that have separate portions that can be built independently.
If paths provided are directories then all the subdirectories and associated files
will also be cloned.
will also be cloned.
Only one of ``tag``, ``branch``, or ``commit`` can be used at a time.
@@ -1367,8 +1367,8 @@ Submodules
git-submodule``.
Sparse-Checkout
You can supply ``git_sparse_paths`` at the package or version level to utilize git's
sparse-checkout feature. This will only clone the paths that are specified in the
You can supply ``git_sparse_paths`` at the package or version level to utilize git's
sparse-checkout feature. This will only clone the paths that are specified in the
``git_sparse_paths`` attribute for the package along with the files in the top level directory.
This feature allows you to only clone what you need from a large repository.
Note that this is a newer feature in git and requries git ``2.25.0`` or greater.
@@ -2392,7 +2392,7 @@ by the ``--jobs`` option:
.. code-block:: python
:emphasize-lines: 7, 11
:linenos:
class Xios(Package):
...
def install(self, spec, prefix):
@@ -2503,14 +2503,15 @@ with. For example, suppose that in the ``libdwarf`` package you write:
depends_on("libelf@0.8")
Now ``libdwarf`` will require ``libelf`` in the range ``0.8``, which
includes patch versions ``0.8.1``, ``0.8.2``, etc. Apart from version
restrictions, you can also specify variants if this package requires
optional features of the dependency.
Now ``libdwarf`` will require ``libelf`` at *exactly* version ``0.8``.
You can also specify a requirement for a particular variant or for
specific compiler flags:
.. code-block:: python
depends_on("libelf@0.8 +parser +pic")
depends_on("libelf@0.8+debug")
depends_on("libelf debug=True")
depends_on("libelf cppflags='-fPIC'")
Both users *and* package authors can use the same spec syntax to refer
to different package configurations. Users use the spec syntax on the
@@ -2518,82 +2519,46 @@ command line to find installed packages or to install packages with
particular constraints, and package authors can use specs to describe
relationships between packages.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specifying backward and forward compatibility
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^
Version ranges
^^^^^^^^^^^^^^
Packages are often compatible with a range of versions of their
dependencies. This is typically referred to as backward and forward
compatibility. Spack allows you to specify this in the ``depends_on``
directive using version ranges.
**Backwards compatibility** means that the package requires at least a
certain version of its dependency:
Although some packages require a specific version for their dependencies,
most can be built with a range of versions. For example, if you are
writing a package for a legacy Python module that only works with Python
2.4 through 2.6, this would look like:
.. code-block:: python
depends_on("python@3.10:")
depends_on("python@2.4:2.6")
In this case, the package requires Python 3.10 or newer.
Commonly, packages drop support for older versions of a dependency as
they release new versions. In Spack you can conveniently add every
backward compatibility rule as a separate line:
Version ranges in Spack are *inclusive*, so ``2.4:2.6`` means any version
greater than or equal to ``2.4`` and up to and including any ``2.6.x``. If
you want to specify that a package works with any version of Python 3 (or
higher), this would look like:
.. code-block:: python
# backward compatibility with Python
depends_on("python@3.8:")
depends_on("python@3.9:", when="@1.2:")
depends_on("python@3.10:", when="@1.4:")
depends_on("python@3:")
This means that in general we need Python 3.8 or newer; from version
1.2 onwards we need Python 3.9 or newer; from version 1.4 onwards we
need Python 3.10 or newer. Notice that it's fine to have overlapping
ranges in the ``when`` clauses.
**Forward compatibility** means that the package requires at most a
certain version of its dependency. Forward compatibility rules are
necessary when there are breaking changes in the dependency that the
package cannot handle. In Spack we often add forward compatibility
bounds only at the time a new, breaking version of a dependency is
released. As with backward compatibility, it is typical to see a list
of forward compatibility bounds in a package file as seperate lines:
Here we leave out the upper bound. If you want to say that a package
requires Python 2, you can similarly leave out the lower bound:
.. code-block:: python
# forward compatibility with Python
depends_on("python@:3.12", when="@:1.10")
depends_on("python@:3.13", when="@:1.12")
depends_on("python@:2")
Notice how the ``:`` now appears before the version number both in the
dependency and in the ``when`` clause. This tells Spack that in general
we need Python 3.13 or older up to version ``1.12.x``, and up to version
``1.10.x`` we need Python 3.12 or older. Said differently, forward compatibility
with Python 3.13 was added in version 1.11, while version 1.13 added forward
compatibility with Python 3.14.
Notice that we didn't use ``@:3``. Version ranges are *inclusive*, so
``@:3`` means "up to and including any 3.x version".
Notice that a version range ``@:3.12`` includes *any* patch version
number ``3.12.x``, which is often useful when specifying forward compatibility
bounds.
So far we have seen open-ended version ranges, which is by far the most
common use case. It is also possible to specify both a lower and an upper bound
on the version of a dependency, like this:
You can also simply write
.. code-block:: python
depends_on("python@3.10:3.12")
depends_on("python@2.7")
There is short syntax to specify that a package is compatible with say any
``3.x`` version:
.. code-block:: python
depends_on("python@3")
The above is equivalent to ``depends_on("python@3:3")``, which means at least
Python version 3 and at most any version ``3.x.y``.
to tell Spack that the package needs Python 2.7.x. This is equivalent to
``@2.7:2.7``.
In very rare cases, you may need to specify an exact version, for example
if you need to distinguish between ``3.2`` and ``3.2.1``:
@@ -5420,7 +5385,7 @@ by build recipes. Examples of checking :ref:`variant settings <variants>` and
determine whether it needs to also set up build dependencies (see
:ref:`test-build-tests`).
The ``MyPackage`` package below provides two basic test examples:
The ``MyPackage`` package below provides two basic test examples:
``test_example`` and ``test_example2``. The first runs the installed
``example`` and ensures its output contains an expected string. The second
runs ``example2`` without checking output so is only concerned with confirming
@@ -5737,7 +5702,7 @@ subdirectory of the installation prefix. They are automatically copied to
the appropriate relative paths under the test stage directory prior to
executing stand-alone tests.
.. tip::
.. tip::
*Perform test-related conversions once when copying files.*
@@ -7113,46 +7078,6 @@ might write:
CXXFLAGS += -I$DWARF_PREFIX/include
CXXFLAGS += -L$DWARF_PREFIX/lib
.. _abi_compatibility:
----------------------------
Specifying ABI Compatibility
----------------------------
Packages can include ABI-compatibility information using the
``can_splice`` directive. For example, if ``Foo`` version 1.1 can
always replace version 1.0, then the package could have:
.. code-block:: python
can_splice("foo@1.0", when="@1.1")
For virtual packages, packages can also specify ABI-compabitiliby with
other packages providing the same virtual. For example, ``zlib-ng``
could specify:
.. code-block:: python
can_splice("zlib@1.3.1", when="@2.2+compat")
Some packages have ABI-compatibility that is dependent on matching
variant values, either for all variants or for some set of
ABI-relevant variants. In those cases, it is not necessary to specify
the full combinatorial explosion. The ``match_variants`` keyword can
cover all single-value variants.
.. code-block:: python
can_splice("foo@1.1", when="@1.2", match_variants=["bar"]) # any value for bar as long as they're the same
can_splice("foo@1.2", when="@1.3", match_variants="*") # any variant values if all single-value variants match
The concretizer will use ABI compatibility to determine automatic
splices when :ref:`automatic splicing<automatic_splicing>` is enabled.
.. note::
The ``can_splice`` directive is experimental, and may be replaced
by a higher-level interface in future versions of Spack.
.. _package_class_structure:

View File

@@ -59,7 +59,7 @@ Functional Example
------------------
The simplest fully functional standalone example of a working pipeline can be
examined live at this example `project <https://gitlab.com/spack/pipeline-quickstart>`_
examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
on gitlab.com.
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
@@ -67,46 +67,39 @@ pipeline:
.. code-block:: yaml
stages: [ "generate", "build" ]
stages: [generate, build]
variables:
SPACK_REPOSITORY: "https://github.com/spack/spack.git"
SPACK_REF: "develop-2024-10-06"
SPACK_USER_CONFIG_PATH: ${CI_PROJECT_DIR}
SPACK_BACKTRACE: 1
SPACK_REPO: https://github.com/scottwittenburg/spack.git
SPACK_REF: pipelines-reproducible-builds
generate-pipeline:
tags:
- saas-linux-small-amd64
stage: generate
tags:
- docker
image:
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
script:
- git clone ${SPACK_REPOSITORY}
- cd spack && git checkout ${SPACK_REF} && cd ../
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
entrypoint: [""]
before_script:
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_REF} && popd
- . "./spack/share/spack/setup-env.sh"
- spack --version
script:
- spack env activate --without-view .
- spack -d -v --color=always
ci generate
--check-index-only
- spack -d ci generate
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml"
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
artifacts:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
build-pipeline:
build-jobs:
stage: build
trigger:
include:
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
- artifact: "jobs_scratch_dir/pipeline.yml"
job: generate-pipeline
strategy: depend
needs:
- artifacts: True
job: generate-pipeline
The key thing to note above is that there are two jobs: The first job to run,
``generate-pipeline``, runs the ``spack ci generate`` command to generate a
@@ -121,93 +114,82 @@ And here's the spack environment built by the pipeline represented as a
spack:
view: false
concretizer:
unify: true
reuse: false
unify: false
definitions:
- pkgs:
- zlib
- bzip2 ~debug
- compiler:
- '%gcc'
- bzip2
- arch:
- '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
specs:
- matrix:
- - $pkgs
- - $compiler
- - $arch
mirrors: { "mirror": "s3://spack-public/mirror" }
ci:
target: gitlab
enable-artifacts-buildcache: True
rebuild-index: False
pipeline-gen:
- any-job:
tags:
- saas-linux-small-amd64
image:
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
before_script:
- git clone ${SPACK_REPOSITORY}
- cd spack && git checkout ${SPACK_REF} && cd ../
- . "./spack/share/spack/setup-env.sh"
- spack --version
- export SPACK_USER_CONFIG_PATH=${CI_PROJECT_DIR}
- spack config blame mirrors
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
- . "./spack/share/spack/setup-env.sh"
- build-job:
tags: [docker]
image:
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
entrypoint: [""]
The elements of this file important to spack ci pipelines are described in more
detail below, but there are a couple of things to note about the above working
example:
.. note::
The use of ``reuse: false`` in spack environments used for pipelines is
almost always what you want, as without it your pipelines will not rebuild
packages even if package hashes have changed. This is due to the concretizer
strongly preferring known hashes when ``reuse: true``.
There is no ``script`` attribute specified for here. The reason for this is
Spack CI will automatically generate reasonable default scripts. More
detail on what is in these scripts can be found below.
The ``ci`` section in the above environment file contains the bare minimum
configuration required for ``spack ci generate`` to create a working pipeline.
The ``target: gitlab`` tells spack that the desired pipeline output is for
gitlab. However, this isn't strictly required, as currently gitlab is the
only possible output format for pipelines. The ``pipeline-gen`` section
contains the key information needed to specify attributes for the generated
jobs. Notice that it contains a list which has only a single element in
this case. In real pipelines it will almost certainly have more elements,
and in those cases, order is important: spack starts at the bottom of the
list and works upwards when applying attributes.
Also notice the ``before_script`` section. It is required when using any of the
default scripts to source the ``setup-env.sh`` script in order to inform
the default scripts where to find the ``spack`` executable.
But in this simple case, we use only the special key ``any-job`` to
indicate that spack should apply the specified attributes (``tags``, ``image``,
and ``before_script``) to any job it generates. This includes jobs for
building/pushing all packages, a ``rebuild-index`` job at the end of the
pipeline, as well as any ``noop`` jobs that might be needed by gitlab when
no rebuilds are required.
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
results in large binary artifacts getting transferred back and forth between
gitlab and the runners. But in this example on gitlab.com where there is no
shared, persistent file system, and where no secrets are stored for giving
permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
way to propagate binaries from jobs to their dependents.
Something to note is that in this simple case, we rely on spack to
generate a reasonable script for the package build jobs (it just creates
a script that invokes ``spack ci rebuild``).
Also, it is usually a good idea to let the pipeline generate a final "rebuild the
buildcache index" job, so that subsequent pipeline generation can quickly determine
which specs are up to date and which need to be rebuilt (it's a good idea for other
reasons as well, but those are out of scope for this discussion). In this case we
have disabled it (using ``rebuild-index: False``) because the index would only be
generated in the artifacts mirror anyway, and consequently would not be available
during subsequent pipeline runs.
Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment
variable in any generated jobs. The purpose of this is to make spack
aware of one final file in the example, the one that contains the mirror
configuration. This file, ``mirrors.yaml`` looks like this:
.. note::
With the addition of reproducible builds (#22887) a previously working
pipeline will require some changes:
.. code-block:: yaml
* In the build-jobs, the environment location changed.
This will typically show as a ``KeyError`` in the failing job. Be sure to
point to ``${SPACK_CONCRETE_ENV_DIR}``.
mirrors:
buildcache-destination:
url: oci://registry.gitlab.com/spack/pipeline-quickstart
binary: true
access_pair:
id_variable: CI_REGISTRY_USER
secret_variable: CI_REGISTRY_PASSWORD
* When using ``include`` in your environment, be sure to make the included
files available in the build jobs. This means adding those files to the
artifact directory. Those files will also be missing in the reproducibility
artifact.
Note the name of the mirror is ``buildcache-destination``, which is required
as of Spack 0.23 (see below for more information). The mirror url simply
points to the container registry associated with the project, while
``id_variable`` and ``secret_variable`` refer to to environment variables
containing the access credentials for the mirror.
When spack builds packages for this example project, they will be pushed to
the project container registry, where they will be available for subsequent
jobs to install as dependencies, or for other pipelines to use to build runnable
container images.
* Because the location of the environment changed, including files with
relative path may have to be adapted to work both in the project context
(generation job) and in the concrete env dir context (build job).
-----------------------------------
Spack commands supporting pipelines
@@ -435,6 +417,15 @@ configuration with a ``script`` attribute. Specifying a signing job without a sc
does not create a signing job and the job configuration attributes will be ignored.
Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``.
^^^^^^^^^^^^^^^^^
Cleanup (cleanup)
^^^^^^^^^^^^^^^^^
When using ``temporary-storage-url-prefix`` the cleanup job will destroy the mirror
created for the associated Gitlab pipeline. Cleanup jobs do not allow modifying the
script, but do expect that the spack command is in the path and require a
``before_script`` to be specified that sources the ``setup-env.sh`` script.
.. _noop_jobs:
^^^^^^^^^^^^
@@ -750,6 +741,15 @@ environment/stack file, and in that case no bootstrapping will be done (only the
specs will be staged for building) and the runners will be expected to already
have all needed compilers installed and configured for spack to use.
^^^^^^^^^^^^^^^^^^^
Pipeline Buildcache
^^^^^^^^^^^^^^^^^^^
The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
^^^^^^^^^^^^^^^^
Broken Specs URL
^^^^^^^^^^^^^^^^

View File

@@ -1,13 +1,13 @@
sphinx==8.1.3
sphinx==7.4.7
sphinxcontrib-programoutput==0.17
sphinx_design==0.6.1
sphinx-rtd-theme==3.0.1
python-levenshtein==0.26.1
docutils==0.21.2
python-levenshtein==0.26.0
docutils==0.20.1
pygments==2.18.0
urllib3==2.2.3
pytest==8.3.3
isort==5.13.2
black==24.10.0
black==24.8.0
flake8==7.1.1
mypy==1.11.1

238
lib/spack/env/cc vendored
View File

@@ -101,9 +101,10 @@ setsep() {
esac
}
# prepend LISTNAME ELEMENT
# prepend LISTNAME ELEMENT [SEP]
#
# Prepend ELEMENT to the list stored in the variable LISTNAME.
# Prepend ELEMENT to the list stored in the variable LISTNAME,
# assuming the list is separated by SEP.
# Handles empty lists and single-element lists.
prepend() {
varname="$1"
@@ -237,36 +238,6 @@ esac
}
"
# path_list functions. Path_lists have 3 parts: spack_store_<list>, <list> and system_<list>,
# which are used to prioritize paths when assembling the final command line.
# init_path_lists LISTNAME
# Set <LISTNAME>, spack_store_<LISTNAME>, and system_<LISTNAME> to "".
init_path_lists() {
eval "spack_store_$1=\"\""
eval "$1=\"\""
eval "system_$1=\"\""
}
# assign_path_lists LISTNAME1 LISTNAME2
# Copy contents of LISTNAME2 into LISTNAME1, for each path_list prefix.
assign_path_lists() {
eval "spack_store_$1=\"\${spack_store_$2}\""
eval "$1=\"\${$2}\""
eval "system_$1=\"\${system_$2}\""
}
# append_path_lists LISTNAME ELT
# Append the provided ELT to the appropriate list, based on the result of path_order().
append_path_lists() {
path_order "$2"
case $? in
0) eval "append spack_store_$1 \"\$2\"" ;;
1) eval "append $1 \"\$2\"" ;;
2) eval "append system_$1 \"\$2\"" ;;
esac
}
# Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
@@ -499,7 +470,12 @@ input_command="$*"
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
append_path_lists return_rpath_dirs_list "$1"
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
wl_expect_rpath=no
else
case "$1" in
@@ -508,14 +484,24 @@ parse_Wl() {
if [ -z "$arg" ]; then
shift; continue
fi
append_path_lists return_rpath_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
--rpath=*)
arg="${1#--rpath=}"
if [ -z "$arg" ]; then
shift; continue
fi
append_path_lists return_rpath_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
-rpath|--rpath)
wl_expect_rpath=yes
@@ -523,7 +509,8 @@ parse_Wl() {
"$dtags_to_strip")
;;
-Wl)
# Nested -Wl,-Wl means we're in NAG compiler territory. We don't support it.
# Nested -Wl,-Wl means we're in NAG compiler territory, we don't support
# it.
return 1
;;
*)
@@ -542,10 +529,21 @@ categorize_arguments() {
return_other_args_list=""
return_isystem_was_used=""
init_path_lists return_isystem_include_dirs_list
init_path_lists return_include_dirs_list
init_path_lists return_lib_dirs_list
init_path_lists return_rpath_dirs_list
return_isystem_spack_store_include_dirs_list=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_spack_store_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_spack_store_lib_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_spack_store_rpath_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
@@ -611,17 +609,32 @@ categorize_arguments() {
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_isystem_include_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_isystem_spack_store_include_dirs_list "$arg" ;;
1) append return_isystem_include_dirs_list "$arg" ;;
2) append return_isystem_system_include_dirs_list "$arg" ;;
esac
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_include_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_include_dirs_list "$arg" ;;
1) append return_include_dirs_list "$arg" ;;
2) append return_system_include_dirs_list "$arg" ;;
esac
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_lib_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_lib_dirs_list "$arg" ;;
1) append return_lib_dirs_list "$arg" ;;
2) append return_system_lib_dirs_list "$arg" ;;
esac
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -654,17 +667,32 @@ categorize_arguments() {
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
append_path_lists return_rpath_dirs_list "$1"
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
--rpath=*)
arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
@@ -681,36 +709,7 @@ categorize_arguments() {
"$dtags_to_strip")
;;
*)
# if mode is not ld, we can just add to other args
if [ "$mode" != "ld" ]; then
append return_other_args_list "$1"
shift
continue
fi
# if we're in linker mode, we need to parse raw RPATH args
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
--rpath=*)
arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
-rpath|--rpath)
if [ $# -eq 1 ]; then
# -rpath without value: let the linker raise an error.
append return_other_args_list "$1"
break
fi
shift
append_path_lists return_rpath_dirs_list "$1"
;;
*)
append return_other_args_list "$1"
;;
esac
append return_other_args_list "$1"
;;
esac
shift
@@ -732,10 +731,21 @@ categorize_arguments() {
categorize_arguments "$@"
assign_path_lists isystem_include_dirs_list return_isystem_include_dirs_list
assign_path_lists include_dirs_list return_include_dirs_list
assign_path_lists lib_dirs_list return_lib_dirs_list
assign_path_lists rpath_dirs_list return_rpath_dirs_list
spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
include_dirs_list="$return_include_dirs_list"
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
@@ -811,10 +821,21 @@ IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
assign_path_lists spack_flags_isystem_include_dirs_list return_isystem_include_dirs_list
assign_path_lists spack_flags_include_dirs_list return_include_dirs_list
assign_path_lists spack_flags_lib_dirs_list return_lib_dirs_list
assign_path_lists spack_flags_rpath_dirs_list return_rpath_dirs_list
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
@@ -873,7 +894,7 @@ esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend spack_store_isystem_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
@@ -889,63 +910,64 @@ args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system)
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_store_spack_flags_include_dirs_list -I
extend args_list spack_flags_spack_store_include_dirs_list -I
extend args_list spack_store_include_dirs_list -I
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_store_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_store_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
extend args_list system_spack_flags_include_dirs_list -I
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list system_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list system_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_store_spack_flags_lib_dirs_list "-L"
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list system_spack_flags_lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
# RPATHs arguments
rpath_prefix=""
case "$mode" in
ccld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
rpath_prefix="$rpath"
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
extend args_list spack_store_rpath_dirs_list "$rpath"
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
ld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
rpath_prefix="-rpath${lsep}"
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;
esac
# if mode is ccld or ld, extend RPATH lists with the prefix determined above
if [ -n "$rpath_prefix" ]; then
extend args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list spack_store_rpath_dirs_list "$rpath_prefix"
extend args_list spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list rpath_dirs_list "$rpath_prefix"
extend args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list system_rpath_dirs_list "$rpath_prefix"
fi
# Other arguments from the input command
extend args_list other_args_list
extend args_list spack_flags_other_args_list

View File

@@ -20,23 +20,11 @@
import tempfile
from contextlib import contextmanager
from itertools import accumulate
from typing import (
Callable,
Deque,
Dict,
Iterable,
List,
Match,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
import llnl.util.symlink
from llnl.util import tty
from llnl.util.lang import dedupe, fnmatch_translate_multiple, memoized
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
from ..path import path_to_os_path, system_path_filter
@@ -97,8 +85,6 @@
"visit_directory_tree",
]
Path = Union[str, pathlib.Path]
if sys.version_info < (3, 7, 4):
# monkeypatch shutil.copystat to fix PermissionError when copying read-only
# files on Lustre when using Python < 3.7.4
@@ -1687,203 +1673,105 @@ def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2)
return FindFirstFile(root, *files, bfs_depth=bfs_depth).find()
def find(
root: Union[Path, Sequence[Path]],
files: Union[str, Sequence[str]],
recursive: bool = True,
max_depth: Optional[int] = None,
) -> List[str]:
"""Finds all files matching the patterns from ``files`` starting from ``root``. This function
returns a deterministic result for the same input and directory structure when run multiple
times. Symlinked directories are followed, and unique directories are searched only once. Each
matching file is returned only once at lowest depth in case multiple paths exist due to
symlinked directories.
def find(root, files, recursive=True):
"""Search for ``files`` starting from the ``root`` directory.
Like GNU/BSD find but written entirely in Python.
Examples:
.. code-block:: console
$ find /usr -name python
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch:
========== ====================================
Pattern Meaning
========== ====================================
``*`` matches one or more characters
``*`` matches everything
``?`` matches any single character
``[seq]`` matches any character in ``seq``
``[!seq]`` matches any character not in ``seq``
========== ====================================
Examples:
>>> find("/usr", "*.txt", recursive=True, max_depth=2)
finds all files with the extension ``.txt`` in the directory ``/usr`` and subdirectories up to
depth 2.
>>> find(["/usr", "/var"], ["*.txt", "*.log"], recursive=True)
finds all files with the extension ``.txt`` or ``.log`` in the directories ``/usr`` and
``/var`` at any depth.
>>> find("/usr", "GL/*.h", recursive=True)
finds all header files in a directory GL at any depth in the directory ``/usr``.
Parameters:
root: One or more root directories to start searching from
files: One or more filename patterns to search for
recursive: if False search only root, if True descends from roots. Defaults to True.
max_depth: if set, don't search below this depth. Cannot be set if recursive is False
root (str): The root directory to start searching from
files (str or collections.abc.Sequence): Library name(s) to search for
recursive (bool): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns a list of absolute, matching file paths.
Returns:
list: The files that have been found
"""
if isinstance(root, (str, pathlib.Path)):
root = [root]
elif not isinstance(root, collections.abc.Sequence):
raise TypeError(f"'root' arg must be a path or a sequence of paths, not '{type(root)}']")
if isinstance(files, str):
files = [files]
elif not isinstance(files, collections.abc.Sequence):
raise TypeError(f"'files' arg must be str or a sequence of str, not '{type(files)}']")
# If recursive is false, max_depth can only be None or 0
if max_depth and not recursive:
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
if recursive:
tty.debug(f"Find (recursive): {root} {str(files)}")
result = _find_recursive(root, files)
else:
tty.debug(f"Find (not recursive): {root} {str(files)}")
result = _find_non_recursive(root, files)
tty.debug(f"Find (max depth = {max_depth}): {root} {files}")
if not recursive:
max_depth = 0
elif max_depth is None:
max_depth = sys.maxsize
result = _find_max_depth(root, files, max_depth)
tty.debug(f"Find complete: {root} {files}")
tty.debug(f"Find complete: {root} {str(files)}")
return result
def _log_file_access_issue(e: OSError, path: str) -> None:
errno_name = errno.errorcode.get(e.errno, "UNKNOWN")
tty.debug(f"find must skip {path}: {errno_name} {e}")
@system_path_filter
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _file_id(s: os.stat_result) -> Tuple[int, int]:
# Note: on windows, st_ino is the file index and st_dev is the volume serial number. See
# https://github.com/python/cpython/blob/3.9/Python/fileutils.c
return (s.st_ino, s.st_dev)
@system_path_filter
def _find_non_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict as os.list_dir
# can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
def _dedupe_files(paths: List[str]) -> List[str]:
"""Deduplicate files by inode and device, dropping files that cannot be accessed."""
unique_files: List[str] = []
# tuple of (inode, device) for each file without following symlinks
visited: Set[Tuple[int, int]] = set()
for path in paths:
try:
stat_info = os.lstat(path)
except OSError as e:
_log_file_access_issue(e, path)
continue
file_id = _file_id(stat_info)
if file_id not in visited:
unique_files.append(path)
visited.add(file_id)
return unique_files
for search_file in search_files:
matches = glob.glob(os.path.join(root, search_file))
matches = [os.path.join(root, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
def _find_max_depth(
roots: Sequence[Path], globs: Sequence[str], max_depth: int = sys.maxsize
) -> List[str]:
"""See ``find`` for the public API."""
# We optimize for the common case of simple filename only patterns: a single, combined regex
# is used. For complex patterns that include path components, we use a slower glob call from
# every directory we visit within max_depth.
filename_only_patterns = {
f"pattern_{i}": os.path.normcase(x) for i, x in enumerate(globs) if "/" not in x
}
complex_patterns = {f"pattern_{i}": x for i, x in enumerate(globs) if "/" in x}
regex = re.compile(fnmatch_translate_multiple(filename_only_patterns))
# Ordered dictionary that keeps track of what pattern found which files
matched_paths: Dict[str, List[str]] = {f"pattern_{i}": [] for i, _ in enumerate(globs)}
# Ensure returned paths are always absolute
roots = [os.path.abspath(r) for r in roots]
# Breadth-first search queue. Each element is a tuple of (depth, dir)
dir_queue: Deque[Tuple[int, str]] = collections.deque()
# Set of visited directories. Each element is a tuple of (inode, device)
visited_dirs: Set[Tuple[int, int]] = set()
for root in roots:
try:
stat_root = os.stat(root)
except OSError as e:
_log_file_access_issue(e, root)
continue
dir_id = _file_id(stat_root)
if dir_id not in visited_dirs:
dir_queue.appendleft((0, root))
visited_dirs.add(dir_id)
while dir_queue:
depth, curr_dir = dir_queue.pop()
try:
dir_iter = os.scandir(curr_dir)
except OSError as e:
_log_file_access_issue(e, curr_dir)
continue
# Use glob.glob for complex patterns.
for pattern_name, pattern in complex_patterns.items():
matched_paths[pattern_name].extend(
path for path in glob.glob(os.path.join(curr_dir, pattern))
)
# List of subdirectories by path and (inode, device) tuple
subdirs: List[Tuple[str, Tuple[int, int]]] = []
with dir_iter:
for dir_entry in dir_iter:
# Match filename only patterns
if filename_only_patterns:
m = regex.match(os.path.normcase(dir_entry.name))
if m:
for pattern_name in filename_only_patterns:
if m.group(pattern_name):
matched_paths[pattern_name].append(dir_entry.path)
break
# Collect subdirectories
if depth >= max_depth:
continue
try:
if not dir_entry.is_dir(follow_symlinks=True):
continue
if sys.platform == "win32":
# Note: st_ino/st_dev on DirEntry.stat are not set on Windows, so we have
# to call os.stat
stat_info = os.stat(dir_entry.path, follow_symlinks=True)
else:
stat_info = dir_entry.stat(follow_symlinks=True)
except OSError as e:
# Possible permission issue, or a symlink that cannot be resolved (ELOOP).
_log_file_access_issue(e, dir_entry.path)
continue
subdirs.append((dir_entry.path, _file_id(stat_info)))
# Enqueue subdirectories in a deterministic order
if subdirs:
subdirs.sort(key=lambda s: os.path.basename(s[0]))
for subdir, subdir_id in subdirs:
if subdir_id not in visited_dirs:
dir_queue.appendleft((depth + 1, subdir))
visited_dirs.add(subdir_id)
# Sort the matched paths for deterministic output
for paths in matched_paths.values():
paths.sort()
all_matching_paths = [path for paths in matched_paths.values() for path in paths]
# We only dedupe files if we have any complex patterns, since only they can match the same file
# multiple times
return _dedupe_files(all_matching_paths) if complex_patterns else all_matching_paths
return answer
# Utilities for libraries and headers
@@ -2322,9 +2210,7 @@ def find_system_libraries(libraries, shared=True):
return libraries_found
def find_libraries(
libraries, root, shared=True, recursive=False, runtime=True, max_depth: Optional[int] = None
):
def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
@@ -2345,8 +2231,6 @@ def find_libraries(
otherwise for static. Defaults to True.
recursive (bool): if False search only root folder,
if True descends top-down from the root. Defaults to False.
max_depth (int): if set, don't search below this depth. Cannot be set
if recursive is False
runtime (bool): Windows only option, no-op elsewhere. If true,
search for runtime shared libs (.DLL), otherwise, search
for .Lib files. If shared is false, this has no meaning.
@@ -2355,7 +2239,6 @@ def find_libraries(
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, str):
libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence):
@@ -2388,10 +2271,8 @@ def find_libraries(
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
if not recursive:
if max_depth:
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
# If not recursive, look for the libraries directly in root
return LibraryList(find(root, libraries, recursive=False))
return LibraryList(find(root, libraries, False))
# To speedup the search for external packages configured e.g. in /usr,
# perform first non-recursive search in root/lib then in root/lib64 and
@@ -2409,7 +2290,7 @@ def find_libraries(
if found_libs:
break
else:
found_libs = find(root, libraries, recursive=True, max_depth=max_depth)
found_libs = find(root, libraries, True)
return LibraryList(found_libs)

View File

@@ -5,17 +5,14 @@
import collections.abc
import contextlib
import fnmatch
import functools
import itertools
import os
import re
import sys
import traceback
import typing
import warnings
from datetime import datetime, timedelta
from typing import Callable, Dict, Iterable, List, Tuple, TypeVar
from typing import Callable, Iterable, List, Tuple, TypeVar
# Ignore emacs backups when listing modules
ignore_modules = r"^\.#|~$"
@@ -861,19 +858,6 @@ def elide_list(line_list: List[str], max_num: int = 10) -> List[str]:
return line_list
if sys.version_info >= (3, 9):
PatternStr = re.Pattern[str]
else:
PatternStr = typing.Pattern[str]
def fnmatch_translate_multiple(named_patterns: Dict[str, str]) -> str:
"""Similar to ``fnmatch.translate``, but takes an ordered dictionary where keys are pattern
names, and values are filename patterns. The output is a regex that matches any of the
patterns in order, and named capture groups are used to identify which pattern matched."""
return "|".join(f"(?P<{n}>{fnmatch.translate(p)})" for n, p in named_patterns.items())
@contextlib.contextmanager
def nullcontext(*args, **kwargs):
"""Empty context manager.
@@ -886,6 +870,15 @@ class UnhashableArguments(TypeError):
"""Raise when an @memoized function receives unhashable arg or kwarg values."""
def enum(**kwargs):
"""Return an enum-like class.
Args:
**kwargs: explicit dictionary of enums
"""
return type("Enum", (object,), kwargs)
T = TypeVar("T")
@@ -921,21 +914,6 @@ def ensure_last(lst, *elements):
lst.append(lst.pop(lst.index(elt)))
class Const:
"""Class level constant, raises when trying to set the attribute"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
raise TypeError(f"Const value does not support assignment [value={self.value}]")
class TypedMutableSequence(collections.abc.MutableSequence):
"""Base class that behaves like a list, just with a different type.
@@ -1040,42 +1018,3 @@ def __init__(self, callback):
def __get__(self, instance, owner):
return self.callback(owner)
class DeprecatedProperty:
"""Data descriptor to error or warn when a deprecated property is accessed.
Derived classes must define a factory method to return an adaptor for the deprecated
property, if the descriptor is not set to error.
"""
__slots__ = ["name"]
#: 0 - Nothing
#: 1 - Warning
#: 2 - Error
error_lvl = 0
def __init__(self, name: str) -> None:
self.name = name
def __get__(self, instance, owner):
if instance is None:
return self
if self.error_lvl == 1:
warnings.warn(
f"accessing the '{self.name}' property of '{instance}', which is deprecated"
)
elif self.error_lvl == 2:
raise AttributeError(f"cannot access the '{self.name}' attribute of '{instance}'")
return self.factory(instance, owner)
def __set__(self, instance, value):
raise TypeError(
f"the deprecated property '{self.name}' of '{instance}' does not support assignment"
)
def factory(self, instance, owner):
raise NotImplementedError("must be implemented by derived classes")

View File

@@ -263,9 +263,7 @@ def match_to_ansi(match):
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
)
color_number = colors.get(color_code, "")
semi = ";" if color_number else ""
ansi_code = _escape(f"{styles[style]}{semi}{color_number}", color, enclose, zsh)
ansi_code = _escape(f"{styles[style]};{colors.get(color_code, '')}", color, enclose, zsh)
if text:
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
else:

View File

@@ -10,6 +10,7 @@
import errno
import io
import multiprocessing
import multiprocessing.connection
import os
import re
import select
@@ -18,10 +19,9 @@
import threading
import traceback
from contextlib import contextmanager
from multiprocessing.connection import Connection
from threading import Thread
from types import ModuleType
from typing import Callable, Optional
from typing import Optional
import llnl.util.tty as tty
@@ -345,6 +345,49 @@ def close(self):
self.file.close()
class MultiProcessFd:
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
if sys.version_info >= (3, 8):
self._connection = multiprocessing.connection.Connection(fd)
else:
self._fd = fd
@property
def fd(self):
if self._connection:
return self._connection._handle
else:
return self._fd
def close(self):
if self._connection:
self._connection.close()
else:
os.close(self._fd)
def close_connection_and_file(multiprocess_fd, file):
# MultiprocessFd is intended to transmit a FD
# to a child process, this FD is then opened to a Python File object
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
# multiprocessing.connection.Connection; Connection closes the FD
# when it is deleted, and prints a warning about duplicate closure if
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
# simple FD; closing the FD here appears to conflict with
# closure of the File object (in < 3.8 that is). Therefore this needs
# to choose whether to close the File or the Connection.
if sys.version_info >= (3, 8):
multiprocess_fd.close()
else:
file.close()
@contextmanager
def replace_environment(env):
"""Replace the current environment (`os.environ`) with `env`.
@@ -502,20 +545,22 @@ def __enter__(self):
# forcing debug output.
self._saved_debug = tty._debug
# Pipe for redirecting output to logger
read_fd, self.write_fd = multiprocessing.Pipe(duplex=False)
# OS-level pipe for redirecting output to logger
read_fd, write_fd = os.pipe()
# Pipe for communication back from the daemon
read_multiprocess_fd = MultiProcessFd(read_fd)
# Multiprocessing pipe for communication back from the daemon
# Currently only used to save echo value between uses
self.parent_pipe, child_pipe = multiprocessing.Pipe(duplex=False)
self.parent_pipe, child_pipe = multiprocessing.Pipe()
# Sets a daemon that writes to file what it reads from a pipe
try:
# need to pass this b/c multiprocessing closes stdin in child.
input_fd = None
input_multiprocess_fd = None
try:
if sys.stdin.isatty():
input_fd = Connection(os.dup(sys.stdin.fileno()))
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
except BaseException:
# just don't forward input if this fails
pass
@@ -524,9 +569,9 @@ def __enter__(self):
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_fd,
read_fd,
self.write_fd,
input_multiprocess_fd,
read_multiprocess_fd,
write_fd,
self.echo,
self.log_file,
child_pipe,
@@ -537,9 +582,9 @@ def __enter__(self):
self.process.start()
finally:
if input_fd:
input_fd.close()
read_fd.close()
if input_multiprocess_fd:
input_multiprocess_fd.close()
read_multiprocess_fd.close()
# Flush immediately before redirecting so that anything buffered
# goes to the original stream
@@ -557,9 +602,9 @@ def __enter__(self):
self._saved_stderr = os.dup(sys.stderr.fileno())
# redirect to the pipe we created above
os.dup2(self.write_fd.fileno(), sys.stdout.fileno())
os.dup2(self.write_fd.fileno(), sys.stderr.fileno())
self.write_fd.close()
os.dup2(write_fd, sys.stdout.fileno())
os.dup2(write_fd, sys.stderr.fileno())
os.close(write_fd)
else:
# Handle I/O the Python way. This won't redirect lower-level
@@ -572,7 +617,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(self.write_fd.fileno(), "w", closefd=False)
pipe_fd_out = os.fdopen(write_fd, "w")
sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out
@@ -608,7 +653,6 @@ def __exit__(self, exc_type, exc_val, exc_tb):
else:
sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr
self.write_fd.close()
# print log contents in parent if needed.
if self.log_file.write_in_parent:
@@ -822,14 +866,14 @@ def force_echo(self):
def _writer_daemon(
stdin_fd: Optional[Connection],
read_fd: Connection,
write_fd: Connection,
echo: bool,
log_file_wrapper: FileWrapper,
control_fd: Connection,
filter_fn: Optional[Callable[[str], str]],
) -> None:
stdin_multiprocess_fd,
read_multiprocess_fd,
write_fd,
echo,
log_file_wrapper,
control_pipe,
filter_fn,
):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
@@ -866,37 +910,43 @@ def _writer_daemon(
``StringIO`` in the parent. This is mainly for testing.
Arguments:
stdin_fd: optional input from the terminal
read_fd: pipe for reading from parent's redirected stdout
echo: initial echo setting -- controlled by user and preserved across multiple writer
daemons
log_file_wrapper: file to log all output
control_pipe: multiprocessing pipe on which to send control information to the parent
filter_fn: optional function to filter each line of output
stdin_multiprocess_fd (int): input from the terminal
read_multiprocess_fd (int): pipe for reading from parent's redirected
stdout
echo (bool): initial echo setting -- controlled by user and
preserved across multiple writer daemons
log_file_wrapper (FileWrapper): file to log all output
control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent
filter_fn (callable, optional): function to filter each line of output
"""
# This process depends on closing all instances of write_pipe to terminate the reading loop
write_fd.close()
# If this process was forked, then it will inherit file descriptors from
# the parent process. This process depends on closing all instances of
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != "darwin":
os.close(write_fd)
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
# 3. closefd=False because Connection has "ownership"
read_file = os.fdopen(read_fd.fileno(), "r", 1, encoding="utf-8", closefd=False)
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
if stdin_fd:
stdin_file = os.fdopen(stdin_fd.fileno(), closefd=False)
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
else:
stdin_file = None
stdin = None
# list of streams to select from
istreams = [read_file, stdin_file] if stdin_file else [read_file]
istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap()
try:
with keyboard_input(stdin_file) as kb:
with keyboard_input(stdin) as kb:
while True:
# fix the terminal settings if we recently came to
# the foreground
@@ -909,12 +959,12 @@ def _writer_daemon(
# Allow user to toggle echo with 'v' key.
# Currently ignores other chars.
# only read stdin if we're in the foreground
if stdin_file and stdin_file in rlist and not _is_background_tty(stdin_file):
if stdin in rlist and not _is_background_tty(stdin):
# it's possible to be backgrounded between the above
# check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN):
try:
if stdin_file.read(1) == "v":
if stdin.read(1) == "v":
echo = not echo
except IOError as e:
# If SIGTTIN is ignored, the system gives EIO
@@ -923,13 +973,13 @@ def _writer_daemon(
if e.errno != errno.EIO:
raise
if read_file in rlist:
if in_pipe in rlist:
line_count = 0
try:
while line_count < 100:
# Handle output from the calling process.
try:
line = _retry(read_file.readline)()
line = _retry(in_pipe.readline)()
except UnicodeDecodeError:
# installs like --test=root gpgme produce non-UTF8 logs
line = "<line lost: output was not encoded as UTF-8>\n"
@@ -958,7 +1008,7 @@ def _writer_daemon(
if xoff in controls:
force_echo = False
if not _input_available(read_file):
if not _input_available(in_pipe):
break
finally:
if line_count > 0:
@@ -973,14 +1023,14 @@ def _writer_daemon(
finally:
# send written data back to parent if we used a StringIO
if isinstance(log_file, io.StringIO):
control_fd.send(log_file.getvalue())
control_pipe.send(log_file.getvalue())
log_file_wrapper.close()
read_fd.close()
if stdin_fd:
stdin_fd.close()
close_connection_and_file(read_multiprocess_fd, in_pipe)
if stdin_multiprocess_fd:
close_connection_and_file(stdin_multiprocess_fd, stdin)
# send echo value back to the parent so it can be preserved.
control_fd.send(echo)
control_pipe.send(echo)
def _retry(function):

View File

@@ -11,7 +11,7 @@
import spack.util.git
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
__version__ = "1.0.0.dev0"
__version__ = "0.23.0.dev0"
spack_version = __version__
@@ -69,15 +69,4 @@ def get_version() -> str:
return spack_version
def get_short_version() -> str:
"""Short Spack version."""
return f"{spack_version_info[0]}.{spack_version_info[1]}"
__all__ = [
"spack_version_info",
"spack_version",
"get_version",
"get_spack_commit",
"get_short_version",
]
__all__ = ["spack_version_info", "spack_version", "get_version", "get_spack_commit"]

View File

@@ -714,16 +714,17 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
# values are either ConditionalValue objects or the values themselves
# values are either Value objects (for conditional values) or the values themselves
build_system_names = set(
v.value if isinstance(v, spack.variant.ConditionalValue) else v
v.value if isinstance(v, spack.variant.Value) else v
for _, variant in pkg_cls.variant_definitions("build_system")
for v in variant.values
)
builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in build_system_names]
module = pkg_cls.module
has_builders_in_package_py = any(
spack.builder.get_builder_class(pkg_cls, name) for name in builder_cls_names
getattr(module, name, False) for name in builder_cls_names
)
if not has_builders_in_package_py:
continue
@@ -805,7 +806,7 @@ def _uses_deprecated_globals(pkgs, error_cls):
file = spack.repo.PATH.filename_for_package_name(pkg_name)
tree = ast.parse(open(file).read())
visitor = DeprecatedMagicGlobals(("std_cmake_args", "std_meson_args", "std_pip_args"))
visitor = DeprecatedMagicGlobals(("std_cmake_args",))
visitor.visit(tree)
if visitor.references_to_globals:
errors.append(

View File

@@ -252,7 +252,7 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
spec_list = [
s
for s in db.query_local(installed=any)
for s in db.query_local(installed=any, in_buildcache=any)
if s.external or db.query_local_by_spec_hash(s.dag_hash()).in_buildcache
]
@@ -1182,9 +1182,6 @@ def __init__(self, mirror: spack.mirror.Mirror, force: bool, update_index: bool)
self.tmpdir: str
self.executor: concurrent.futures.Executor
# Verify if the mirror meets the requirements to push
self.mirror.ensure_mirror_usable("push")
def __enter__(self):
self._tmpdir = tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root())
self._executor = spack.util.parallel.make_concurrent_executor()
@@ -2565,13 +2562,7 @@ def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
return pkg_prefix
def install_root_node(
spec: spack.spec.Spec,
unsigned=False,
force: bool = False,
sha256: Optional[str] = None,
allow_missing: bool = False,
) -> None:
def install_root_node(spec, unsigned=False, force=False, sha256=None):
"""Install the root node of a concrete spec from a buildcache.
Checking the sha256 sum of a node before installation is usually needed only
@@ -2580,10 +2571,11 @@ def install_root_node(
Args:
spec: spec to be installed (note that only the root node will be installed)
unsigned: if True allows installing unsigned binaries
force: force installation if the spec is already present in the local store
sha256: optional sha256 of the binary package, to be checked before installation
allow_missing: when true, allows installing a node with missing dependencies
unsigned (bool): if True allows installing unsigned binaries
force (bool): force installation if the spec is already present in the
local store
sha256 (str): optional sha256 of the binary package, to be checked
before installation
"""
# Early termination
if spec.external or spec.virtual:
@@ -2621,7 +2613,7 @@ def install_root_node(
spec, spack.store.STORE.layout.spec_file_path(spec)
)
spack.hooks.post_install(spec, False)
spack.store.STORE.db.add(spec, allow_missing=allow_missing)
spack.store.STORE.db.add(spec)
def install_single_spec(spec, unsigned=False, force=False):

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Common basic functions used through the spack.bootstrap package"""
import fnmatch
import glob
import importlib
import os.path
import re
@@ -61,19 +60,10 @@ def _try_import_from_store(
python, *_ = candidate_spec.dependencies("python-venv")
else:
python, *_ = candidate_spec.dependencies("python")
# if python is installed, ask it for the layout
if python.installed:
module_paths = [
os.path.join(candidate_spec.prefix, python.package.purelib),
os.path.join(candidate_spec.prefix, python.package.platlib),
]
# otherwise search for the site-packages directory
# (clingo from binaries with truncated python-venv runtime)
else:
module_paths = glob.glob(
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
)
module_paths = [
os.path.join(candidate_spec.prefix, python.package.purelib),
os.path.join(candidate_spec.prefix, python.package.platlib),
]
path_before = list(sys.path)
# NOTE: try module_paths first and last, last allows an existing version in path

View File

@@ -37,7 +37,6 @@
import spack.binary_distribution
import spack.config
import spack.detection
import spack.mirror
import spack.platforms
import spack.spec
import spack.store
@@ -45,6 +44,7 @@
import spack.util.executable
import spack.util.path
import spack.util.spack_yaml
import spack.util.url
import spack.version
from spack.installer import PackageInstaller
@@ -91,7 +91,12 @@ def __init__(self, conf: ConfigDictionary) -> None:
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
# Promote (relative) paths to file urls
self.url = spack.mirror.Mirror(conf["info"]["url"]).fetch_url
url = conf["info"]["url"]
if spack.util.url.is_path_instead_of_url(url):
if not os.path.isabs(url):
url = os.path.join(self.metadata_dir, url)
url = spack.util.url.path_to_file_url(url)
self.url = url
@property
def mirror_scope(self) -> spack.config.InternalConfigScope:
@@ -170,15 +175,7 @@ def _install_by_hash(
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query):
spack.binary_distribution.install_root_node(
# allow_missing is true since when bootstrapping clingo we truncate runtime
# deps such as gcc-runtime, since we link libstdc++ statically, and the other
# further runtime deps are loaded by the Python interpreter. This just silences
# warnings about missing dependencies.
match,
unsigned=True,
force=True,
sha256=pkg_sha256,
allow_missing=True,
match, unsigned=True, force=True, sha256=pkg_sha256
)
def _install_and_test(
@@ -602,10 +599,7 @@ def bootstrapping_sources(scope: Optional[str] = None):
current = copy.copy(entry)
metadata_dir = spack.util.path.canonicalize_path(entry["metadata"])
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
try:
with open(metadata_yaml, encoding="utf-8") as stream:
current.update(spack.util.spack_yaml.load(stream))
list_of_sources.append(current)
except OSError:
pass
with open(metadata_yaml, encoding="utf-8") as stream:
current.update(spack.util.spack_yaml.load(stream))
list_of_sources.append(current)
return list_of_sources

View File

@@ -44,7 +44,6 @@
from collections import defaultdict
from enum import Flag, auto
from itertools import chain
from multiprocessing.connection import Connection
from typing import Callable, Dict, List, Optional, Set, Tuple
import archspec.cpu
@@ -55,6 +54,7 @@
from llnl.util.lang import dedupe, stable_partition
from llnl.util.symlink import symlink
from llnl.util.tty.color import cescape, colorize
from llnl.util.tty.log import MultiProcessFd
import spack.build_systems._checks
import spack.build_systems.cmake
@@ -1061,8 +1061,8 @@ def set_all_package_py_globals(self):
pkg.setup_dependent_package(dependent_module, spec)
dependent_module.propagate_changes_to_mro()
pkg = self.specs[0].package
if self.context == Context.BUILD:
pkg = self.specs[0].package
module = ModuleChangePropagator(pkg)
# std_cmake_args is not sufficiently static to be defined
# in set_package_py_globals and is deprecated so its handled
@@ -1143,10 +1143,10 @@ def _setup_pkg_and_run(
serialized_pkg: "spack.subprocess_context.PackageInstallContext",
function: Callable,
kwargs: Dict,
write_pipe: Connection,
input_pipe: Optional[Connection],
jsfd1: Optional[Connection],
jsfd2: Optional[Connection],
write_pipe: multiprocessing.connection.Connection,
input_multiprocess_fd: Optional[MultiProcessFd],
jsfd1: Optional[MultiProcessFd],
jsfd2: Optional[MultiProcessFd],
):
"""Main entry point in the child process for Spack builds.
@@ -1188,12 +1188,13 @@ def _setup_pkg_and_run(
context: str = kwargs.get("context", "build")
try:
# We are in the child process. Python sets sys.stdin to open(os.devnull) to prevent our
# process and its parent from simultaneously reading from the original stdin. But, we
# assume that the parent process is not going to read from it till we are done with the
# child, so we undo Python's precaution. closefd=False since Connection has ownership.
if input_pipe is not None:
sys.stdin = os.fdopen(input_pipe.fileno(), closefd=False)
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_multiprocess_fd is not None:
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
pkg = serialized_pkg.restore()
@@ -1262,8 +1263,8 @@ def _setup_pkg_and_run(
finally:
write_pipe.close()
if input_pipe is not None:
input_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def start_build_process(pkg, function, kwargs):
@@ -1290,9 +1291,23 @@ def child_fun():
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
This uses `multiprocessing.Process` to create the child process. The
mechanism used to create the process differs on different operating
systems and for different versions of Python. In some cases "fork"
is used (i.e. the "fork" system call) and some cases it starts an
entirely new Python interpreter process (in the docs this is referred
to as the "spawn" start method). Breaking it down by OS:
- Linux always uses fork.
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
- Windows always uses the "spawn" start method.
For more information on `multiprocessing` child process creation
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
input_fd = None
input_multiprocess_fd = None
jobserver_fd1 = None
jobserver_fd2 = None
@@ -1301,13 +1316,14 @@ def child_fun():
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.platform != "win32" and sys.stdin.isatty() and hasattr(sys.stdin, "fileno"):
input_fd = Connection(os.dup(sys.stdin.fileno()))
input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
mflags = os.environ.get("MAKEFLAGS", False)
if mflags:
m = re.search(r"--jobserver-[^=]*=(\d),(\d)", mflags)
if m:
jobserver_fd1 = Connection(int(m.group(1)))
jobserver_fd2 = Connection(int(m.group(2)))
jobserver_fd1 = MultiProcessFd(int(m.group(1)))
jobserver_fd2 = MultiProcessFd(int(m.group(2)))
p = multiprocessing.Process(
target=_setup_pkg_and_run,
@@ -1316,7 +1332,7 @@ def child_fun():
function,
kwargs,
write_pipe,
input_fd,
input_multiprocess_fd,
jobserver_fd1,
jobserver_fd2,
),
@@ -1336,8 +1352,8 @@ def child_fun():
finally:
# Close the input stream in the parent process
if input_fd is not None:
input_fd.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def exitcode_msg(p):
typ = "exit" if p.exitcode >= 0 else "signal"

View File

@@ -12,7 +12,6 @@
import spack.error
import spack.multimethod
import spack.repo
#: Builder classes, as registered by the "builder" decorator
BUILDER_CLS = {}
@@ -75,14 +74,6 @@ def __call__(self, spec, prefix):
return self.phase_fn(self.builder.pkg, spec, prefix)
def get_builder_class(pkg, name: str) -> Optional[type]:
"""Return the builder class if a package module defines it."""
cls = getattr(pkg.module, name, None)
if cls and cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
return cls
return None
def _create(pkg):
"""Return a new builder object for the package object being passed as argument.
@@ -108,10 +99,9 @@ class hierarchy (look at AspellDictPackage for an example of that)
package_buildsystem = buildsystem_name(pkg)
default_builder_cls = BUILDER_CLS[package_buildsystem]
builder_cls_name = default_builder_cls.__name__
builder_class = get_builder_class(pkg, builder_cls_name)
if builder_class:
return builder_class(pkg)
builder_cls = getattr(pkg.module, builder_cls_name, None)
if builder_cls:
return builder_cls(pkg)
# Specialized version of a given buildsystem can subclass some
# base classes and specialize certain phases or methods or attributes.

View File

@@ -5,6 +5,7 @@
"""Caches used by Spack to store data"""
import os
from typing import Union
import llnl.util.lang
from llnl.util.filesystem import mkdirp
@@ -31,8 +32,12 @@ def _misc_cache():
return spack.util.file_cache.FileCache(path)
FileCacheType = Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton]
#: Spack's cache for small data
MISC_CACHE: spack.util.file_cache.FileCache = llnl.util.lang.Singleton(_misc_cache) # type: ignore
MISC_CACHE: Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton] = (
llnl.util.lang.Singleton(_misc_cache)
)
def fetch_cache_location():
@@ -69,4 +74,6 @@ def store(self, fetcher, relative_dest):
#: Spack's local cache for downloaded source archives
FETCH_CACHE: spack.fetch_strategy.FsCache = llnl.util.lang.Singleton(_fetch_cache) # type: ignore
FETCH_CACHE: Union[spack.fetch_strategy.FsCache, llnl.util.lang.Singleton] = (
llnl.util.lang.Singleton(_fetch_cache)
)

View File

@@ -34,7 +34,7 @@
import spack.binary_distribution as bindist
import spack.concretize
import spack.config as cfg
import spack.error
import spack.environment as ev
import spack.main
import spack.mirror
import spack.paths
@@ -95,6 +95,8 @@ def dispatch_open(fullurl, data=None, timeout=None, verify_ssl=True):
TEMP_STORAGE_MIRROR_NAME = "ci_temporary_mirror"
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
# TODO: Remove this in Spack 0.23
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
JOB_NAME_FORMAT = (
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{ arch=architecture}"
)
@@ -199,11 +201,11 @@ def _remove_satisfied_deps(deps, satisfied_list):
return nodes, edges, stages
def _print_staging_summary(spec_labels, stages, rebuild_decisions):
def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions):
if not stages:
return
mirrors = spack.mirror.MirrorCollection(binary=True)
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True)
tty.msg("Checked the following mirrors for binaries:")
for m in mirrors.values():
tty.msg(f" {m.fetch_url}")
@@ -250,14 +252,21 @@ def _spec_matches(spec, match_string):
return spec.intersects(match_string)
def _format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions):
def _format_job_needs(
dep_jobs, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
):
needs_list = []
for dep_job in dep_jobs:
dep_spec_key = _spec_ci_label(dep_job)
rebuild = rebuild_decisions[dep_spec_key].rebuild
if not prune_dag or rebuild:
needs_list.append({"job": get_job_name(dep_job, build_group), "artifacts": False})
needs_list.append(
{
"job": get_job_name(dep_job, build_group),
"artifacts": enable_artifacts_buildcache,
}
)
return needs_list
@@ -401,6 +410,12 @@ def __init__(self, ci_config, spec_labels, stages):
self.ir = {
"jobs": {},
"temporary-storage-url-prefix": self.ci_config.get(
"temporary-storage-url-prefix", None
),
"enable-artifacts-buildcache": self.ci_config.get(
"enable-artifacts-buildcache", False
),
"rebuild-index": self.ci_config.get("rebuild-index", True),
"broken-specs-url": self.ci_config.get("broken-specs-url", None),
"broken-tests-packages": self.ci_config.get("broken-tests-packages", []),
@@ -683,13 +698,14 @@ def generate_gitlab_ci_yaml(
prune_dag=False,
check_index_only=False,
artifacts_root=None,
remote_mirror_override=None,
):
"""Generate a gitlab yaml file to run a dynamic child pipeline from
the spec matrix in the active environment.
Arguments:
env (spack.environment.Environment): Activated environment object
which must contain a ci section describing how to map
which must contain a gitlab-ci section describing how to map
specs to runners
print_summary (bool): Should we print a summary of all the jobs in
the stages in which they were placed.
@@ -704,21 +720,39 @@ def generate_gitlab_ci_yaml(
artifacts_root (str): Path where artifacts like logs, environment
files (spack.yaml, spack.lock), etc should be written. GitLab
requires this to be within the project directory.
remote_mirror_override (str): Typically only needed when one spack.yaml
is used to populate several mirrors with binaries, based on some
criteria. Spack protected pipelines populate different mirrors based
on branch name, facilitated by this option. DEPRECATED
"""
with spack.concretize.disable_compiler_existence_check():
with env.write_transaction():
env.concretize()
env.write()
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
# Get the joined "ci" config with all of the current scopes resolved
ci_config = cfg.get("ci")
config_deprecated = False
if not ci_config:
raise SpackCIError("Environment does not have a `ci` configuration")
tty.warn("Environment does not have `ci` a configuration")
gitlabci_config = yaml_root.get("gitlab-ci")
if not gitlabci_config:
tty.die("Environment yaml does not have `gitlab-ci` config section. Cannot recover.")
tty.warn(
"The `gitlab-ci` configuration is deprecated in favor of `ci`.\n",
"To update run \n\t$ spack env update /path/to/ci/spack.yaml",
)
translate_deprecated_config(gitlabci_config)
ci_config = gitlabci_config
config_deprecated = True
# Default target is gitlab...and only target is gitlab
if not ci_config.get("target", "gitlab") == "gitlab":
raise SpackCIError('Spack CI module only generates target "gitlab"')
tty.die('Spack CI module only generates target "gitlab"')
cdash_config = cfg.get("cdash")
cdash_handler = CDashHandler(cdash_config) if "build-group" in cdash_config else None
@@ -779,6 +813,12 @@ def generate_gitlab_ci_yaml(
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE", None)
copy_only_pipeline = spack_pipeline_type == "spack_copy_only"
if copy_only_pipeline and config_deprecated:
tty.warn(
"SPACK_PIPELINE_TYPE=spack_copy_only is not supported when using\n",
"deprecated ci configuration, a no-op pipeline will be generated\n",
"instead.",
)
def ensure_expected_target_path(path):
"""Returns passed paths with all Windows path separators exchanged
@@ -797,16 +837,38 @@ def ensure_expected_target_path(path):
return path
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
deprecated_mirror_config = False
buildcache_destination = None
if "buildcache-destination" not in pipeline_mirrors:
raise SpackCIError("spack ci generate requires a mirror named 'buildcache-destination'")
if "buildcache-destination" in pipeline_mirrors:
if remote_mirror_override:
tty.die(
"Using the deprecated --buildcache-destination cli option and "
"having a mirror named 'buildcache-destination' at the same time "
"is not allowed"
)
buildcache_destination = pipeline_mirrors["buildcache-destination"]
else:
deprecated_mirror_config = True
# TODO: This will be an error in Spack 0.23
buildcache_destination = pipeline_mirrors["buildcache-destination"]
# TODO: Remove this block in spack 0.23
remote_mirror_url = None
if deprecated_mirror_config:
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
tty.die("spack ci generate requires an env containing a mirror")
ci_mirrors = yaml_root["mirrors"]
mirror_urls = [url for url in ci_mirrors.values()]
remote_mirror_url = mirror_urls[0]
spack_buildcache_copy = os.environ.get("SPACK_COPY_BUILDCACHE", None)
if spack_buildcache_copy:
buildcache_copies = {}
buildcache_copy_src_prefix = buildcache_destination.fetch_url
buildcache_copy_src_prefix = (
buildcache_destination.fetch_url
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
buildcache_copy_dest_prefix = spack_buildcache_copy
# Check for a list of "known broken" specs that we should not bother
@@ -816,10 +878,55 @@ def ensure_expected_target_path(path):
if "broken-specs-url" in ci_config:
broken_specs_url = ci_config["broken-specs-url"]
enable_artifacts_buildcache = False
if "enable-artifacts-buildcache" in ci_config:
tty.warn("Support for enable-artifacts-buildcache will be removed in Spack 0.23")
enable_artifacts_buildcache = ci_config["enable-artifacts-buildcache"]
rebuild_index_enabled = True
if "rebuild-index" in ci_config and ci_config["rebuild-index"] is False:
rebuild_index_enabled = False
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
tty.warn("Support for temporary-storage-url-prefix will be removed in Spack 0.23")
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
# generate.
# TODO: Remove this block in Spack 0.23
mirrors_to_check = None
if deprecated_mirror_config and remote_mirror_override:
if spack_pipeline_type == "spack_protected_branch":
# Overriding the main mirror in this case might result
# in skipping jobs on a release pipeline because specs are
# up to date in develop. Eventually we want to notice and take
# advantage of this by scheduling a job to copy the spec from
# develop to the release, but until we have that, this makes
# sure we schedule a rebuild job if the spec isn't already in
# override mirror.
mirrors_to_check = {"override": remote_mirror_override}
# If we have a remote override and we want generate pipeline using
# --check-index-only, then the override mirror needs to be added to
# the configured mirrors when bindist.update() is run, or else we
# won't fetch its index and include in our local cache.
spack.mirror.add(
spack.mirror.Mirror(remote_mirror_override, name="ci_pr_mirror"),
cfg.default_modify_scope(),
)
# TODO: Remove this block in Spack 0.23
shared_pr_mirror = None
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
stack_name = os.environ.get("SPACK_CI_STACK_NAME", "")
shared_pr_mirror = url_util.join(SHARED_PR_MIRROR_URL, stack_name)
spack.mirror.add(
spack.mirror.Mirror(shared_pr_mirror, name="ci_shared_pr_mirror"),
cfg.default_modify_scope(),
)
pipeline_artifacts_dir = artifacts_root
if not pipeline_artifacts_dir:
proj_dir = os.environ.get("CI_PROJECT_DIR", os.getcwd())
@@ -828,8 +935,9 @@ def ensure_expected_target_path(path):
pipeline_artifacts_dir = os.path.abspath(pipeline_artifacts_dir)
concrete_env_dir = os.path.join(pipeline_artifacts_dir, "concrete_environment")
# Copy the environment manifest file into the concrete environment directory,
# along with the spack.lock file.
# Now that we've added the mirrors we know about, they should be properly
# reflected in the environment manifest file, so copy that into the
# concrete environment directory, along with the spack.lock file.
if not os.path.exists(concrete_env_dir):
os.makedirs(concrete_env_dir)
shutil.copyfile(env.manifest_path, os.path.join(concrete_env_dir, "spack.yaml"))
@@ -854,12 +962,18 @@ def ensure_expected_target_path(path):
env_includes.extend(include_scopes)
env_yaml_root["spack"]["include"] = [ensure_expected_target_path(i) for i in env_includes]
if "gitlab-ci" in env_yaml_root["spack"] and "ci" not in env_yaml_root["spack"]:
env_yaml_root["spack"]["ci"] = env_yaml_root["spack"].pop("gitlab-ci")
translate_deprecated_config(env_yaml_root["spack"]["ci"])
with open(os.path.join(concrete_env_dir, "spack.yaml"), "w") as fd:
fd.write(syaml.dump_config(env_yaml_root, default_flow_style=False))
job_log_dir = os.path.join(pipeline_artifacts_dir, "logs")
job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction")
job_test_dir = os.path.join(pipeline_artifacts_dir, "tests")
# TODO: Remove this line in Spack 0.23
local_mirror_dir = os.path.join(pipeline_artifacts_dir, "mirror")
user_artifacts_dir = os.path.join(pipeline_artifacts_dir, "user_data")
# We communicate relative paths to the downstream jobs to avoid issues in
@@ -873,6 +987,8 @@ def ensure_expected_target_path(path):
rel_job_log_dir = os.path.relpath(job_log_dir, ci_project_dir)
rel_job_repro_dir = os.path.relpath(job_repro_dir, ci_project_dir)
rel_job_test_dir = os.path.relpath(job_test_dir, ci_project_dir)
# TODO: Remove this line in Spack 0.23
rel_local_mirror_dir = os.path.join(local_mirror_dir, ci_project_dir)
rel_user_artifacts_dir = os.path.relpath(user_artifacts_dir, ci_project_dir)
# Speed up staging by first fetching binary indices from all mirrors
@@ -934,7 +1050,7 @@ def ensure_expected_target_path(path):
continue
up_to_date_mirrors = bindist.get_mirrors_for_spec(
spec=release_spec, index_only=check_index_only
spec=release_spec, mirrors_to_check=mirrors_to_check, index_only=check_index_only
)
spec_record.rebuild = not up_to_date_mirrors
@@ -978,14 +1094,25 @@ def main_script_replacements(cmd):
job_object["needs"] = []
if spec_label in dependencies:
# In this case, "needs" is only used for scheduling
# purposes, so we only get the direct dependencies.
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_jobs.append(spec_labels[dep_label])
if enable_artifacts_buildcache:
# Get dependencies transitively, so they're all
# available in the artifacts buildcache.
dep_jobs = [d for d in release_spec.traverse(deptype="all", root=False)]
else:
# In this case, "needs" is only used for scheduling
# purposes, so we only get the direct dependencies.
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_jobs.append(spec_labels[dep_label])
job_object["needs"].extend(
_format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions)
_format_job_needs(
dep_jobs,
build_group,
prune_dag,
rebuild_decisions,
enable_artifacts_buildcache,
)
)
rebuild_spec = spec_record.rebuild
@@ -1067,6 +1194,19 @@ def main_script_replacements(cmd):
},
)
# TODO: Remove this block in Spack 0.23
if enable_artifacts_buildcache:
bc_root = os.path.join(local_mirror_dir, "build_cache")
job_object["artifacts"]["paths"].extend(
[
os.path.join(bc_root, p)
for p in [
bindist.tarball_name(release_spec, ".spec.json"),
bindist.tarball_directory_name(release_spec),
]
]
)
job_object["stage"] = stage_name
job_object["retry"] = {"max": 2, "when": JOB_RETRY_CONDITIONS}
job_object["interruptible"] = True
@@ -1081,7 +1221,15 @@ def main_script_replacements(cmd):
job_id += 1
if print_summary:
_print_staging_summary(spec_labels, stages, rebuild_decisions)
_print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions)
# Clean up remote mirror override if enabled
# TODO: Remove this block in Spack 0.23
if deprecated_mirror_config:
if remote_mirror_override:
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
tty.debug(f"{job_id} build jobs generated in {stage_id} stages")
@@ -1103,7 +1251,7 @@ def main_script_replacements(cmd):
"when": ["runner_system_failure", "stuck_or_timeout_failure", "script_failure"],
}
if copy_only_pipeline:
if copy_only_pipeline and not config_deprecated:
stage_names.append("copy")
sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"])
sync_job["stage"] = "copy"
@@ -1113,12 +1261,17 @@ def main_script_replacements(cmd):
if "variables" not in sync_job:
sync_job["variables"] = {}
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = buildcache_destination.fetch_url
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = (
buildcache_destination.fetch_url
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
if "buildcache-source" not in pipeline_mirrors:
raise SpackCIError("Copy-only pipelines require a mirror named 'buildcache-source'")
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
if "buildcache-source" in pipeline_mirrors:
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
else:
# TODO: Remove this condition in Spack 0.23
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
sync_job["dependencies"] = []
@@ -1126,6 +1279,27 @@ def main_script_replacements(cmd):
job_id += 1
if job_id > 0:
# TODO: Remove this block in Spack 0.23
if temp_storage_url_prefix:
# There were some rebuild jobs scheduled, so we will need to
# schedule a job to clean up the temporary storage location
# associated with this pipeline.
stage_names.append("cleanup-temp-storage")
cleanup_job = copy.deepcopy(spack_ci_ir["jobs"]["cleanup"]["attributes"])
cleanup_job["stage"] = "cleanup-temp-storage"
cleanup_job["when"] = "always"
cleanup_job["retry"] = service_job_retries
cleanup_job["interruptible"] = True
cleanup_job["script"] = _unpack_script(
cleanup_job["script"],
op=lambda cmd: cmd.replace("mirror_prefix", temp_storage_url_prefix),
)
cleanup_job["dependencies"] = []
output_object["cleanup"] = cleanup_job
if (
"script" in spack_ci_ir["jobs"]["signing"]["attributes"]
and spack_pipeline_type == "spack_protected_branch"
@@ -1142,9 +1316,11 @@ def main_script_replacements(cmd):
signing_job["interruptible"] = True
if "variables" not in signing_job:
signing_job["variables"] = {}
signing_job["variables"][
"SPACK_BUILDCACHE_DESTINATION"
] = buildcache_destination.push_url
signing_job["variables"]["SPACK_BUILDCACHE_DESTINATION"] = (
buildcache_destination.push_url # need the s3 url for aws s3 sync
if buildcache_destination
else remote_mirror_override or remote_mirror_url
)
signing_job["dependencies"] = []
output_object["sign-pkgs"] = signing_job
@@ -1155,7 +1331,9 @@ def main_script_replacements(cmd):
final_job = spack_ci_ir["jobs"]["reindex"]["attributes"]
final_job["stage"] = "stage-rebuild-index"
target_mirror = buildcache_destination.push_url
target_mirror = remote_mirror_override or remote_mirror_url
if buildcache_destination:
target_mirror = buildcache_destination.push_url
final_job["script"] = _unpack_script(
final_job["script"],
op=lambda cmd: cmd.replace("{index_target_mirror}", target_mirror),
@@ -1181,11 +1359,17 @@ def main_script_replacements(cmd):
"SPACK_CONCRETE_ENV_DIR": rel_concrete_env_dir,
"SPACK_VERSION": spack_version,
"SPACK_CHECKOUT_VERSION": version_to_clone,
# TODO: Remove this line in Spack 0.23
"SPACK_REMOTE_MIRROR_URL": remote_mirror_url,
"SPACK_JOB_LOG_DIR": rel_job_log_dir,
"SPACK_JOB_REPRO_DIR": rel_job_repro_dir,
"SPACK_JOB_TEST_DIR": rel_job_test_dir,
# TODO: Remove this line in Spack 0.23
"SPACK_LOCAL_MIRROR_DIR": rel_local_mirror_dir,
"SPACK_PIPELINE_TYPE": str(spack_pipeline_type),
"SPACK_CI_STACK_NAME": os.environ.get("SPACK_CI_STACK_NAME", "None"),
# TODO: Remove this line in Spack 0.23
"SPACK_CI_SHARED_PR_MIRROR_URL": shared_pr_mirror or "None",
"SPACK_REBUILD_CHECK_UP_TO_DATE": str(prune_dag),
"SPACK_REBUILD_EVERYTHING": str(rebuild_everything),
"SPACK_REQUIRE_SIGNING": os.environ.get("SPACK_REQUIRE_SIGNING", "False"),
@@ -1194,6 +1378,10 @@ def main_script_replacements(cmd):
for item, val in output_vars.items():
output_vars[item] = ensure_expected_target_path(val)
# TODO: Remove this block in Spack 0.23
if deprecated_mirror_config and remote_mirror_override:
(output_object["variables"]["SPACK_REMOTE_MIRROR_OVERRIDE"]) = remote_mirror_override
spack_stack_name = os.environ.get("SPACK_CI_STACK_NAME", None)
if spack_stack_name:
output_object["variables"]["SPACK_CI_STACK_NAME"] = spack_stack_name
@@ -1220,8 +1408,15 @@ def main_script_replacements(cmd):
noop_job["retry"] = 0
noop_job["allow_failure"] = True
tty.debug("No specs to rebuild, generating no-op job")
output_object = {"no-specs-to-rebuild": noop_job}
if copy_only_pipeline and config_deprecated:
tty.debug("Generating no-op job as copy-only is unsupported here.")
noop_job["script"] = [
'echo "copy-only pipelines are not supported with deprecated ci configs"'
]
output_object = {"unsupported-copy": noop_job}
else:
tty.debug("No specs to rebuild, generating no-op job")
output_object = {"no-specs-to-rebuild": noop_job}
# Ensure the child pipeline always runs
output_object["workflow"] = {"rules": [{"when": "always"}]}
@@ -2259,6 +2454,83 @@ def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optiona
reporter.test_skipped_report(report_dir, spec, reason)
class SpackCIError(spack.error.SpackError):
def __init__(self, msg):
super().__init__(msg)
def translate_deprecated_config(config):
# Remove all deprecated keys from config
mappings = config.pop("mappings", [])
match_behavior = config.pop("match_behavior", "first")
build_job = {}
if "image" in config:
build_job["image"] = config.pop("image")
if "tags" in config:
build_job["tags"] = config.pop("tags")
if "variables" in config:
build_job["variables"] = config.pop("variables")
# Scripts always override in old CI
if "before_script" in config:
build_job["before_script:"] = config.pop("before_script")
if "script" in config:
build_job["script:"] = config.pop("script")
if "after_script" in config:
build_job["after_script:"] = config.pop("after_script")
signing_job = None
if "signing-job-attributes" in config:
signing_job = {"signing-job": config.pop("signing-job-attributes")}
service_job_attributes = None
if "service-job-attributes" in config:
service_job_attributes = config.pop("service-job-attributes")
# If this config already has pipeline-gen do not more
if "pipeline-gen" in config:
return True if mappings or build_job or signing_job or service_job_attributes else False
config["target"] = "gitlab"
config["pipeline-gen"] = []
pipeline_gen = config["pipeline-gen"]
# Build Job
submapping = []
for section in mappings:
submapping_section = {"match": section["match"]}
if "runner-attributes" in section:
remapped_attributes = {}
if match_behavior == "first":
for key, value in section["runner-attributes"].items():
# Scripts always override in old CI
if key == "script":
remapped_attributes["script:"] = value
elif key == "before_script":
remapped_attributes["before_script:"] = value
elif key == "after_script":
remapped_attributes["after_script:"] = value
else:
remapped_attributes[key] = value
else:
# Handle "merge" behavior be allowing scripts to merge in submapping section
remapped_attributes = section["runner-attributes"]
submapping_section["build-job"] = remapped_attributes
if "remove-attributes" in section:
# Old format only allowed tags in this section, so no extra checks are needed
submapping_section["build-job-remove"] = section["remove-attributes"]
submapping.append(submapping_section)
pipeline_gen.append({"submapping": submapping, "match_behavior": match_behavior})
if build_job:
pipeline_gen.append({"build-job": build_job})
# Signing Job
if signing_job:
pipeline_gen.append(signing_job)
# Service Jobs
if service_job_attributes:
pipeline_gen.append({"reindex-job": service_job_attributes})
pipeline_gen.append({"noop-job": service_job_attributes})
pipeline_gen.append({"cleanup-job": service_job_attributes})
return True

View File

@@ -8,7 +8,6 @@
import os
import re
import sys
from collections import Counter
from typing import List, Union
import llnl.string
@@ -18,7 +17,6 @@
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
import spack.concretize
import spack.config # breaks a cycle.
import spack.environment as ev
import spack.error
@@ -175,66 +173,10 @@ def parse_specs(
arg_string = " ".join([quote_kvp(arg) for arg in args])
specs = spack.parser.parse(arg_string)
if not concretize:
return specs
to_concretize = [(s, None) for s in specs]
return _concretize_spec_pairs(to_concretize, tests=tests)
def _concretize_spec_pairs(to_concretize, tests=False):
"""Helper method that concretizes abstract specs from a list of abstract,concrete pairs.
Any spec with a concrete spec associated with it will concretize to that spec. Any spec
with ``None`` for its concrete spec will be newly concretized. This method respects unification
rules from config."""
unify = spack.config.get("concretizer:unify", False)
# Special case for concretizing a single spec
if len(to_concretize) == 1:
abstract, concrete = to_concretize[0]
return [concrete or abstract.concretized()]
# Special case if every spec is either concrete or has an abstract hash
if all(
concrete or abstract.concrete or abstract.abstract_hash
for abstract, concrete in to_concretize
):
# Get all the concrete specs
ret = [
concrete or (abstract if abstract.concrete else abstract.lookup_hash())
for abstract, concrete in to_concretize
]
# If unify: true, check that specs don't conflict
# Since all concrete, "when_possible" is not relevant
if unify is True: # True, "when_possible", False are possible values
runtimes = spack.repo.PATH.packages_with_tags("runtime")
specs_per_name = Counter(
spec.name
for spec in traverse.traverse_nodes(
ret, deptype=("link", "run"), key=traverse.by_dag_hash
)
if spec.name not in runtimes # runtimes are allowed multiple times
)
conflicts = sorted(name for name, count in specs_per_name.items() if count > 1)
if conflicts:
raise spack.error.SpecError(
"Specs conflict and `concretizer:unify` is configured true.",
f" specs depend on multiple versions of {', '.join(conflicts)}",
)
return ret
# Standard case
concretize_method = spack.concretize.concretize_separately # unify: false
if unify is True:
concretize_method = spack.concretize.concretize_together
elif unify == "when_possible":
concretize_method = spack.concretize.concretize_together_when_possible
concretized = concretize_method(to_concretize, tests=tests)
return [concrete for _, concrete in concretized]
for spec in specs:
if concretize:
spec.concretize(tests=tests)
return specs
def matching_spec_from_env(spec):
@@ -250,22 +192,6 @@ def matching_spec_from_env(spec):
return spec.concretized()
def matching_specs_from_env(specs):
"""
Same as ``matching_spec_from_env`` but respects spec unification rules.
For each spec, if there is a matching spec in the environment it is used. If no
matching spec is found, this will return the given spec but concretized in the
context of the active environment and other given specs, with unification rules applied.
"""
env = ev.active_environment()
spec_pairs = [(spec, env.matching_spec(spec) if env else None) for spec in specs]
additional_concrete_specs = (
[(concrete, concrete) for _, concrete in env.concretized_specs()] if env else []
)
return _concretize_spec_pairs(spec_pairs + additional_concrete_specs)[: len(spec_pairs)]
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
"""Given a spec, figure out which installed package it refers to.
@@ -583,18 +509,6 @@ def __init__(self, name):
super().__init__("{0} is not a permissible Spack command name.".format(name))
class MultipleSpecsMatch(Exception):
"""Raised when multiple specs match a constraint, in a context where
this is not allowed.
"""
class NoSpecMatches(Exception):
"""Raised when no spec matches a constraint, in a context where
this is not allowed.
"""
########################################
# argparse types for argument validation
########################################

View File

@@ -19,23 +19,12 @@
def setup_parser(subparser):
# DEPRECATED: equivalent to --generic --target
subparser.add_argument(
"-g",
"--generic-target",
action="store_true",
help="show the best generic target (deprecated)",
"-g", "--generic-target", action="store_true", help="show the best generic target"
)
subparser.add_argument(
"--known-targets", action="store_true", help="show a list of all known targets and exit"
)
target_type = subparser.add_mutually_exclusive_group()
target_type.add_argument(
"--family", action="store_true", help="print generic ISA (x86_64, aarch64, ppc64le, ...)"
)
target_type.add_argument(
"--generic", action="store_true", help="print feature level (x86_64_v3, armv8.4a, ...)"
)
parts = subparser.add_mutually_exclusive_group()
parts2 = subparser.add_mutually_exclusive_group()
parts.add_argument(
@@ -91,7 +80,6 @@ def display_target_group(header, target_group):
def arch(parser, args):
if args.generic_target:
# TODO: add deprecation warning in 0.24
print(archspec.cpu.host().generic)
return
@@ -108,10 +96,6 @@ def arch(parser, args):
host_platform = spack.platforms.host()
host_os = host_platform.operating_system(os_args)
host_target = host_platform.target(target_args)
if args.family:
host_target = host_target.family
elif args.generic:
host_target = host_target.generic
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
if args.platform:

View File

@@ -62,6 +62,13 @@ def setup_parser(subparser):
"path to the file where generated jobs file should be written. "
"default is .gitlab-ci.yml in the root of the repository",
)
generate.add_argument(
"--copy-to",
default=None,
help="path to additional directory for job files\n\n"
"this option provides an absolute path to a directory where the generated "
"jobs yaml file should be copied. default is not to copy",
)
generate.add_argument(
"--optimize",
action="store_true",
@@ -76,6 +83,12 @@ def setup_parser(subparser):
default=False,
help="(DEPRECATED) disable DAG scheduling (use 'plain' dependencies)",
)
generate.add_argument(
"--buildcache-destination",
default=None,
help="override the mirror configured in the environment\n\n"
"allows for pushing binaries from the generated pipeline to a different location",
)
prune_group = generate.add_mutually_exclusive_group()
prune_group.add_argument(
"--prune-dag",
@@ -201,10 +214,20 @@ def ci_generate(args):
env = spack.cmd.require_active_env(cmd_name="ci generate")
if args.copy_to:
tty.warn("The flag --copy-to is deprecated and will be removed in Spack 0.23")
if args.buildcache_destination:
tty.warn(
"The flag --buildcache-destination is deprecated and will be removed in Spack 0.23"
)
output_file = args.output_file
copy_yaml_to = args.copy_to
prune_dag = args.prune_dag
index_only = args.index_only
artifacts_root = args.artifacts_root
buildcache_destination = args.buildcache_destination
if not output_file:
output_file = os.path.abspath(".gitlab-ci.yml")
@@ -222,8 +245,15 @@ def ci_generate(args):
prune_dag=prune_dag,
check_index_only=index_only,
artifacts_root=artifacts_root,
remote_mirror_override=buildcache_destination,
)
if copy_yaml_to:
copy_to_dir = os.path.dirname(copy_yaml_to)
if not os.path.exists(copy_to_dir):
os.makedirs(copy_to_dir)
shutil.copyfile(output_file, copy_yaml_to)
def ci_reindex(args):
"""rebuild the buildcache index for the remote mirror
@@ -268,13 +298,22 @@ def ci_rebuild(args):
job_log_dir = os.environ.get("SPACK_JOB_LOG_DIR")
job_test_dir = os.environ.get("SPACK_JOB_TEST_DIR")
repro_dir = os.environ.get("SPACK_JOB_REPRO_DIR")
# TODO: Remove this in Spack 0.23
local_mirror_dir = os.environ.get("SPACK_LOCAL_MIRROR_DIR")
concrete_env_dir = os.environ.get("SPACK_CONCRETE_ENV_DIR")
ci_pipeline_id = os.environ.get("CI_PIPELINE_ID")
ci_job_name = os.environ.get("CI_JOB_NAME")
signing_key = os.environ.get("SPACK_SIGNING_KEY")
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
# TODO: Remove this in Spack 0.23
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
# TODO: Remove this in Spack 0.23
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
# TODO: Remove this in Spack 0.23
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
require_signing = os.environ.get("SPACK_REQUIRE_SIGNING")
@@ -294,10 +333,12 @@ def ci_rebuild(args):
job_log_dir = os.path.join(ci_project_dir, job_log_dir)
job_test_dir = os.path.join(ci_project_dir, job_test_dir)
repro_dir = os.path.join(ci_project_dir, repro_dir)
local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)
# Debug print some of the key environment variables we should have received
tty.debug("pipeline_artifacts_dir = {0}".format(pipeline_artifacts_dir))
tty.debug("remote_mirror_url = {0}".format(remote_mirror_url))
tty.debug("job_spec_pkg_name = {0}".format(job_spec_pkg_name))
# Query the environment manifest to find out whether we're reporting to a
@@ -329,11 +370,51 @@ def ci_rebuild(args):
full_rebuild = True if rebuild_everything and rebuild_everything.lower() == "true" else False
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
deprecated_mirror_config = False
buildcache_destination = None
if "buildcache-destination" not in pipeline_mirrors:
tty.die("spack ci rebuild requires a mirror named 'buildcache-destination")
if "buildcache-destination" in pipeline_mirrors:
buildcache_destination = pipeline_mirrors["buildcache-destination"]
else:
deprecated_mirror_config = True
# TODO: This will be an error in Spack 0.23
buildcache_destination = pipeline_mirrors["buildcache-destination"]
# If no override url exists, then just push binary package to the
# normal remote mirror url.
# TODO: Remove in Spack 0.23
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
if buildcache_destination:
buildcache_mirror_url = buildcache_destination.push_url
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
# artifacts from upstream to downstream jobs.
# TODO: Remove this in Spack 0.23
pipeline_mirror_url = None
# TODO: Remove this in Spack 0.23
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id)
# TODO: Remove this in Spack 0.23
enable_artifacts_mirror = False
if "enable-artifacts-buildcache" in ci_config:
enable_artifacts_mirror = ci_config["enable-artifacts-buildcache"]
if enable_artifacts_mirror or (
spack_is_pr_pipeline and not enable_artifacts_mirror and not temp_storage_url_prefix
):
# If you explicitly enabled the artifacts buildcache feature, or
# if this is a PR pipeline but you did not enable either of the
# per-pipeline temporary storage features, we force the use of
# artifacts buildcache. Otherwise jobs will not have binary
# dependencies from previous stages available since we do not
# allow pushing binaries to the remote mirror during PR pipelines.
enable_artifacts_mirror = True
pipeline_mirror_url = url_util.path_to_file_url(local_mirror_dir)
mirror_msg = "artifact buildcache enabled, mirror url: {0}".format(pipeline_mirror_url)
tty.debug(mirror_msg)
# Get the concrete spec to be built by this job.
try:
@@ -408,7 +489,48 @@ def ci_rebuild(args):
fd.write(spack_info.encode("utf8"))
fd.write(b"\n")
matches = None if full_rebuild else bindist.get_mirrors_for_spec(job_spec, index_only=False)
pipeline_mirrors = []
# If we decided there should be a temporary storage mechanism, add that
# mirror now so it's used when we check for a hash match already
# built for this spec.
# TODO: Remove this block in Spack 0.23
if pipeline_mirror_url:
mirror = spack.mirror.Mirror(pipeline_mirror_url, name=spack_ci.TEMP_STORAGE_MIRROR_NAME)
spack.mirror.add(mirror, cfg.default_modify_scope())
pipeline_mirrors.append(pipeline_mirror_url)
# Check configured mirrors for a built spec with a matching hash
# TODO: Remove this block in Spack 0.23
mirrors_to_check = None
if remote_mirror_override:
if spack_pipeline_type == "spack_protected_branch":
# Passing "mirrors_to_check" below means we *only* look in the override
# mirror to see if we should skip building, which is what we want.
mirrors_to_check = {"override": remote_mirror_override}
# Adding this mirror to the list of configured mirrors means dependencies
# could be installed from either the override mirror or any other configured
# mirror (e.g. remote_mirror_url which is defined in the environment or
# pipeline_mirror_url), which is also what we want.
spack.mirror.add(
spack.mirror.Mirror(remote_mirror_override, name="mirror_override"),
cfg.default_modify_scope(),
)
pipeline_mirrors.append(remote_mirror_override)
# TODO: Remove this in Spack 0.23
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
if shared_pr_mirror_url != "None":
pipeline_mirrors.append(shared_pr_mirror_url)
matches = (
None
if full_rebuild
else bindist.get_mirrors_for_spec(
job_spec, mirrors_to_check=mirrors_to_check, index_only=False
)
)
if matches:
# Got a hash match on at least one configured mirror. All
@@ -420,10 +542,25 @@ def ci_rebuild(args):
tty.msg("No need to rebuild {0}, found hash match at: ".format(job_spec_pkg_name))
for match in matches:
tty.msg(" {0}".format(match["mirror_url"]))
# TODO: Remove this block in Spack 0.23
if enable_artifacts_mirror:
matching_mirror = matches[0]["mirror_url"]
build_cache_dir = os.path.join(local_mirror_dir, "build_cache")
tty.debug("Getting {0} buildcache from {1}".format(job_spec_pkg_name, matching_mirror))
tty.debug("Downloading to {0}".format(build_cache_dir))
bindist.download_single_spec(job_spec, build_cache_dir, mirror_url=matching_mirror)
# Now we are done and successful
return 0
# Before beginning the install, if this is a "rebuild everything" pipeline, we
# only want to keep the mirror being used by the current pipeline as it's binary
# package destination. This ensures that the when we rebuild everything, we only
# consume binary dependencies built in this pipeline.
# TODO: Remove this in Spack 0.23
if deprecated_mirror_config and full_rebuild:
spack_ci.remove_other_mirrors(pipeline_mirrors, cfg.default_modify_scope())
# No hash match anywhere means we need to rebuild spec
# Start with spack arguments
@@ -544,11 +681,17 @@ def ci_rebuild(args):
cdash_handler.copy_test_results(reports_dir, job_test_dir)
if install_exit_code == 0:
# If the install succeeded, push it to the buildcache destination. Failure to push
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
# will result in a non-zero exit code. Pushing is best-effort.
mirror_urls = [buildcache_mirror_url]
# TODO: Remove this block in Spack 0.23
if pipeline_mirror_url:
mirror_urls.append(pipeline_mirror_url)
for result in spack_ci.create_buildcache(
input_spec=job_spec,
destination_mirror_urls=[buildcache_destination.push_url],
destination_mirror_urls=mirror_urls,
sign_binaries=spack_ci.can_sign_binaries(),
):
if not result.success:

View File

@@ -105,8 +105,7 @@ def clean(parser, args):
# Then do the cleaning falling through the cases
if args.specs:
specs = spack.cmd.parse_specs(args.specs, concretize=False)
specs = spack.cmd.matching_specs_from_env(specs)
specs = list(spack.cmd.matching_spec_from_env(x) for x in specs)
for spec in specs:
msg = "Cleaning build stage [{0}]"
tty.msg(msg.format(spec.short_spec))

View File

@@ -581,51 +581,23 @@ def add_concretizer_args(subparser):
def add_connection_args(subparser, add_help):
def add_argument_string_or_variable(parser, arg: str, *, deprecate_str: bool = True, **kwargs):
group = parser.add_mutually_exclusive_group()
group.add_argument(arg, **kwargs)
# Update help string
if "help" in kwargs:
kwargs["help"] = "environment variable containing " + kwargs["help"]
group.add_argument(arg + "-variable", **kwargs)
s3_connection_parser = subparser.add_argument_group("S3 Connection")
add_argument_string_or_variable(
s3_connection_parser,
"--s3-access-key-id",
help="ID string to use to connect to this S3 mirror",
subparser.add_argument(
"--s3-access-key-id", help="ID string to use to connect to this S3 mirror"
)
add_argument_string_or_variable(
s3_connection_parser,
"--s3-access-key-secret",
help="secret string to use to connect to this S3 mirror",
subparser.add_argument(
"--s3-access-key-secret", help="secret string to use to connect to this S3 mirror"
)
add_argument_string_or_variable(
s3_connection_parser,
"--s3-access-token",
help="access token to use to connect to this S3 mirror",
subparser.add_argument(
"--s3-access-token", help="access token to use to connect to this S3 mirror"
)
s3_connection_parser.add_argument(
subparser.add_argument(
"--s3-profile", help="S3 profile name to use to connect to this S3 mirror", default=None
)
s3_connection_parser.add_argument(
subparser.add_argument(
"--s3-endpoint-url", help="endpoint URL to use to connect to this S3 mirror"
)
oci_connection_parser = subparser.add_argument_group("OCI Connection")
add_argument_string_or_variable(
oci_connection_parser,
"--oci-username",
deprecate_str=False,
help="username to use to connect to this OCI mirror",
)
add_argument_string_or_variable(
oci_connection_parser,
"--oci-password",
help="password to use to connect to this OCI mirror",
)
subparser.add_argument("--oci-username", help="username to use to connect to this OCI mirror")
subparser.add_argument("--oci-password", help="password to use to connect to this OCI mirror")
def use_buildcache(cli_arg_value):
@@ -688,32 +660,34 @@ def mirror_name_or_url(m):
# accidentally to a dir in the current working directory.
# If there's a \ or / in the name, it's interpreted as a path or url.
if "/" in m or "\\" in m or m in (".", ".."):
if "/" in m or "\\" in m:
return spack.mirror.Mirror(m)
# Otherwise, the named mirror is required to exist.
try:
return spack.mirror.require_mirror_name(m)
except ValueError as e:
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
raise argparse.ArgumentTypeError(
str(e) + ". Did you mean {}?".format(os.path.join(".", m))
)
def mirror_url(url):
try:
return spack.mirror.Mirror.from_url(url)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
raise argparse.ArgumentTypeError(str(e))
def mirror_directory(path):
try:
return spack.mirror.Mirror.from_local_path(path)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
raise argparse.ArgumentTypeError(str(e))
def mirror_name(name):
try:
return spack.mirror.require_mirror_name(name)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
raise argparse.ArgumentTypeError(str(e))

View File

@@ -99,5 +99,5 @@ def deconcretize(parser, args):
" Use `spack deconcretize --all` to deconcretize ALL specs.",
)
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
deconcretize_specs(args, specs)

View File

@@ -10,12 +10,11 @@
import sys
import tempfile
from pathlib import Path
from typing import List, Optional, Set
from typing import List, Optional
import llnl.string as string
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.symlink import islink, symlink
from llnl.util.tty.colify import colify
from llnl.util.tty.color import cescape, colorize
@@ -51,8 +50,6 @@
"update",
"revert",
"depfile",
"track",
"untrack",
]
@@ -60,41 +57,35 @@
# env create
#
def env_create_setup_parser(subparser):
"""create a new environment
create a new environment or, optionally, copy an existing environment
a manifest file results in a new abstract environment while a lock file
creates a new concrete environment
"""
subparser.add_argument(
"env_name", metavar="env", help="name or directory of the new environment"
)
"""create a new environment"""
subparser.add_argument("env_name", metavar="env", help="name or directory of environment")
subparser.add_argument(
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
)
subparser.add_argument(
"--keep-relative",
action="store_true",
help="copy envfile's relative develop paths verbatim",
help="copy relative develop paths verbatim into the new environment"
" when initializing from envfile",
)
view_opts = subparser.add_mutually_exclusive_group()
view_opts.add_argument(
"--without-view", action="store_true", help="do not maintain a view for this environment"
)
view_opts.add_argument(
"--with-view", help="maintain view at WITH_VIEW (vs. environment's directory)"
"--with-view",
help="specify that this environment should maintain a view at the"
" specified path (by default the view is maintained in the"
" environment directory)",
)
subparser.add_argument(
"envfile",
nargs="?",
default=None,
help="manifest or lock file (ends with '.json' or '.lock')",
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
subparser.add_argument(
"--include-concrete",
action="append",
help="copy concrete specs from INCLUDE_CONCRETE's environment",
"--include-concrete", action="append", help="name of old environment to copy specs from"
)
@@ -182,7 +173,7 @@ def _env_create(
# env activate
#
def env_activate_setup_parser(subparser):
"""set the active environment"""
"""set the current environment"""
shells = subparser.add_mutually_exclusive_group()
shells.add_argument(
"--sh",
@@ -222,14 +213,14 @@ def env_activate_setup_parser(subparser):
view_options = subparser.add_mutually_exclusive_group()
view_options.add_argument(
"-v",
"--with-view",
"-v",
metavar="name",
help="set runtime environment variables for the named view",
help="set runtime environment variables for specific view",
)
view_options.add_argument(
"-V",
"--without-view",
"-V",
action="store_true",
help="do not set runtime environment variables for any view",
)
@@ -239,14 +230,14 @@ def env_activate_setup_parser(subparser):
"--prompt",
action="store_true",
default=False,
help="add the active environment to the command line prompt",
help="decorate the command line prompt when activating",
)
subparser.add_argument(
"--temp",
action="store_true",
default=False,
help="create and activate in a temporary directory",
help="create and activate an environment in a temporary directory",
)
subparser.add_argument(
"--create",
@@ -258,12 +249,13 @@ def env_activate_setup_parser(subparser):
"--envfile",
nargs="?",
default=None,
help="manifest or lock file (ends with '.json' or '.lock')",
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
subparser.add_argument(
"--keep-relative",
action="store_true",
help="copy envfile's relative develop paths verbatim when create",
help="copy relative develop paths verbatim into the new environment"
" when initializing from envfile",
)
subparser.add_argument(
"-d",
@@ -277,7 +269,10 @@ def env_activate_setup_parser(subparser):
dest="env_name",
nargs="?",
default=None,
help=("name or directory of the environment being activated"),
help=(
"name of managed environment or directory of the independent env"
" (when using --dir/-d) to activate"
),
)
@@ -390,7 +385,7 @@ def env_activate(args):
# env deactivate
#
def env_deactivate_setup_parser(subparser):
"""deactivate the active environment"""
"""deactivate any active environment in the shell"""
shells = subparser.add_mutually_exclusive_group()
shells.add_argument(
"--sh",
@@ -449,253 +444,104 @@ def env_deactivate(args):
sys.stdout.write(cmds)
#
# env track
#
def env_track_setup_parser(subparser):
"""track an environment from a directory in Spack"""
subparser.add_argument("-n", "--name", help="custom environment name")
subparser.add_argument("dir", help="path to environment")
arguments.add_common_arguments(subparser, ["yes_to_all"])
def env_track(args):
src_path = os.path.abspath(args.dir)
if not ev.is_env_dir(src_path):
tty.die("Cannot track environment. Path doesn't contain an environment")
if args.name:
name = args.name
else:
name = os.path.basename(src_path)
try:
dst_path = ev.environment_dir_from_name(name, exists_ok=False)
except ev.SpackEnvironmentError:
tty.die(
f"An environment named {name} already exists. Set a name with:"
"\n\n"
f" spack env track --name NAME {src_path}\n"
)
symlink(src_path, dst_path)
tty.msg(f"Tracking environment in {src_path}")
tty.msg(
"You can now activate this environment with the following command:\n\n"
f" spack env activate {name}\n"
)
#
# env remove & untrack helpers
#
def filter_managed_env_names(env_names: Set[str]) -> Set[str]:
tracked_env_names = {e for e in env_names if islink(ev.environment_dir_from_name(e))}
managed_env_names = env_names - set(tracked_env_names)
num_managed_envs = len(managed_env_names)
managed_envs_str = " ".join(managed_env_names)
if num_managed_envs >= 2:
tty.error(
f"The following are not tracked environments. "
"To remove them completely run,"
"\n\n"
f" spack env rm {managed_envs_str}\n"
)
elif num_managed_envs > 0:
tty.error(
f"'{managed_envs_str}' is not a tracked env. "
"To remove it completely run,"
"\n\n"
f" spack env rm {managed_envs_str}\n"
)
return tracked_env_names
def get_valid_envs(env_names: Set[str]) -> Set[ev.Environment]:
valid_envs = set()
for env_name in env_names:
try:
env = ev.read(env_name)
valid_envs.add(env)
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
pass
return valid_envs
def _env_untrack_or_remove(
env_names: List[str], remove: bool = False, force: bool = False, yes_to_all: bool = False
):
all_env_names = set(ev.all_environment_names())
known_env_names = set(env_names).intersection(all_env_names)
unknown_env_names = set(env_names) - known_env_names
# print error for unknown environments
for env_name in unknown_env_names:
tty.error(f"Environment '{env_name}' does not exist")
# if only unlinking is allowed, remove all environments
# which do not point internally at symlinks
if not remove:
env_names_to_remove = filter_managed_env_names(known_env_names)
else:
env_names_to_remove = known_env_names
# initalize all environments with valid spack.yaml configs
all_valid_envs = get_valid_envs(all_env_names)
# build a task list of environments and bad env names to remove
envs_to_remove = [e for e in all_valid_envs if e.name in env_names_to_remove]
bad_env_names_to_remove = env_names_to_remove - {e.name for e in envs_to_remove}
for remove_env in envs_to_remove:
for env in all_valid_envs:
# don't check if an environment is included to itself
if env.name == remove_env.name:
continue
# check if an environment is included un another
if remove_env.path in env.included_concrete_envs:
msg = f"Environment '{remove_env.name}' is used by environment '{env.name}'"
if force:
tty.warn(msg)
else:
tty.error(msg)
envs_to_remove.remove(remove_env)
# ask the user if they really want to remove the known environments
# force should do the same as yes to all here following the symantics of rm
if not (yes_to_all or force) and (envs_to_remove or bad_env_names_to_remove):
environments = string.plural(len(env_names_to_remove), "environment", show_n=False)
envs = string.comma_and(list(env_names_to_remove))
answer = tty.get_yes_or_no(
f"Really {'remove' if remove else 'untrack'} {environments} {envs}?", default=False
)
if not answer:
tty.die("Will not remove any environments")
# keep track of the environments we remove for later printing the exit code
removed_env_names = []
for env in envs_to_remove:
name = env.name
if not force and env.active:
tty.error(
f"Environment '{name}' can't be "
f"{'removed' if remove else 'untracked'} while activated."
)
continue
# Get path to check if environment is a tracked / symlinked environment
if islink(env.path):
real_env_path = os.path.realpath(env.path)
os.unlink(env.path)
tty.msg(
f"Sucessfully untracked environment '{name}', "
"but it can still be found at:\n\n"
f" {real_env_path}\n"
)
else:
env.destroy()
tty.msg(f"Successfully removed environment '{name}'")
removed_env_names.append(env.name)
for bad_env_name in bad_env_names_to_remove:
shutil.rmtree(
spack.environment.environment.environment_dir_from_name(bad_env_name, exists_ok=True)
)
tty.msg(f"Successfully removed environment '{bad_env_name}'")
removed_env_names.append(env.name)
# Following the design of linux rm we should exit with a status of 1
# anytime we cannot delete every environment the user asks for.
# However, we should still process all the environments we know about
# and delete them instead of failing on the first unknown enviornment.
if len(removed_env_names) < len(known_env_names):
sys.exit(1)
#
# env untrack
#
def env_untrack_setup_parser(subparser):
"""track an environment from a directory in Spack"""
subparser.add_argument("env", nargs="+", help="tracked environment name")
subparser.add_argument(
"-f", "--force", action="store_true", help="force unlink even when environment is active"
)
arguments.add_common_arguments(subparser, ["yes_to_all"])
def env_untrack(args):
_env_untrack_or_remove(
env_names=args.env, force=args.force, yes_to_all=args.yes_to_all, remove=False
)
#
# env remove
#
def env_remove_setup_parser(subparser):
"""remove managed environment(s)
remove existing environment(s) managed by Spack
directory environments and manifests embedded in repositories must be
removed manually
"""
subparser.add_argument(
"rm_env", metavar="env", nargs="+", help="name(s) of the environment(s) being removed"
)
"""remove an existing environment"""
subparser.add_argument("rm_env", metavar="env", nargs="+", help="environment(s) to remove")
arguments.add_common_arguments(subparser, ["yes_to_all"])
subparser.add_argument(
"-f",
"--force",
action="store_true",
help="force removal even when included in other environment(s)",
help="remove the environment even if it is included in another environment",
)
def env_remove(args):
"""remove existing environment(s)"""
_env_untrack_or_remove(
env_names=args.rm_env, remove=True, force=args.force, yes_to_all=args.yes_to_all
)
"""Remove a *named* environment.
This removes an environment managed by Spack. Directory environments
and manifests embedded in repositories should be removed manually.
"""
remove_envs = []
valid_envs = []
bad_envs = []
for env_name in ev.all_environment_names():
try:
env = ev.read(env_name)
valid_envs.append(env)
if env_name in args.rm_env:
remove_envs.append(env)
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
if env_name in args.rm_env:
bad_envs.append(env_name)
# Check if remove_env is included from another env before trying to remove
for env in valid_envs:
for remove_env in remove_envs:
# don't check if environment is included to itself
if env.name == remove_env.name:
continue
if remove_env.path in env.included_concrete_envs:
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
if args.force:
tty.warn(msg)
else:
tty.die(msg)
if not args.yes_to_all:
environments = string.plural(len(args.rm_env), "environment", show_n=False)
envs = string.comma_and(args.rm_env)
answer = tty.get_yes_or_no(f"Really remove {environments} {envs}?", default=False)
if not answer:
tty.die("Will not remove any environments")
for env in remove_envs:
name = env.name
if env.active:
tty.die(f"Environment {name} can't be removed while activated.")
env.destroy()
tty.msg(f"Successfully removed environment '{name}'")
for bad_env_name in bad_envs:
shutil.rmtree(
spack.environment.environment.environment_dir_from_name(bad_env_name, exists_ok=True)
)
tty.msg(f"Successfully removed environment '{bad_env_name}'")
#
# env rename
#
def env_rename_setup_parser(subparser):
"""rename an existing environment
rename a managed environment or move an independent/directory environment
operation cannot be performed to or from an active environment
"""
"""rename an existing environment"""
subparser.add_argument(
"mv_from", metavar="from", help="current name or directory of the environment"
"mv_from", metavar="from", help="name (or path) of existing environment"
)
subparser.add_argument(
"mv_to", metavar="to", help="new name (or path) for existing environment"
)
subparser.add_argument("mv_to", metavar="to", help="new name or directory for the environment")
subparser.add_argument(
"-d",
"--dir",
action="store_true",
help="positional arguments are environment directory paths",
help="the specified arguments correspond to directory paths",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
help="force renaming even if overwriting an existing environment",
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
)
def env_rename(args):
"""rename or move an existing environment"""
"""Rename an environment.
This renames a managed environment or moves an independent environment.
"""
# Directory option has been specified
if args.dir:
@@ -744,7 +590,7 @@ def env_rename(args):
# env list
#
def env_list_setup_parser(subparser):
"""list all managed environments"""
"""list managed environments"""
def env_list(args):
@@ -780,14 +626,13 @@ def actions():
# env view
#
def env_view_setup_parser(subparser):
"""manage the environment's view
provide the path when enabling a view with a non-default path
"""
"""manage a view associated with the environment"""
subparser.add_argument(
"action", choices=ViewAction.actions(), help="action to take for the environment's view"
)
subparser.add_argument("view_path", nargs="?", help="view's non-default path when enabling it")
subparser.add_argument(
"view_path", nargs="?", help="when enabling a view, optionally set the path manually"
)
def env_view(args):
@@ -815,7 +660,7 @@ def env_view(args):
# env status
#
def env_status_setup_parser(subparser):
"""print active environment status"""
"""print whether there is an active environment"""
def env_status(args):
@@ -875,22 +720,14 @@ def env_loads(args):
def env_update_setup_parser(subparser):
"""update the environment manifest to the latest schema format
update the environment to the latest schema format, which may not be
readable by older versions of spack
a backup copy of the manifest is retained in case there is a need to revert
this operation
"""
"""update environments to the latest format"""
subparser.add_argument(
metavar="env", dest="update_env", help="name or directory of the environment"
metavar="env", dest="update_env", help="name or directory of the environment to activate"
)
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
def env_update(args):
"""update the manifest to the latest format"""
manifest_file = ev.manifest_file(args.update_env)
backup_file = manifest_file + ".bkp"
@@ -920,22 +757,14 @@ def env_update(args):
def env_revert_setup_parser(subparser):
"""restore the environment manifest to its previous format
revert the environment's manifest to the schema format from its last
'spack env update'
the current manifest will be overwritten by the backup copy and the backup
copy will be removed
"""
"""restore environments to their state before update"""
subparser.add_argument(
metavar="env", dest="revert_env", help="name or directory of the environment"
metavar="env", dest="revert_env", help="name or directory of the environment to activate"
)
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
def env_revert(args):
"""restore the environment manifest to its previous format"""
manifest_file = ev.manifest_file(args.revert_env)
backup_file = manifest_file + ".bkp"
@@ -967,19 +796,15 @@ def env_revert(args):
def env_depfile_setup_parser(subparser):
"""generate a depfile to exploit parallel builds across specs
requires the active environment to be concrete
"""
"""generate a depfile from the concrete environment specs"""
subparser.add_argument(
"--make-prefix",
"--make-target-prefix",
default=None,
metavar="TARGET",
help="prefix Makefile targets/variables with <TARGET>/<name>,\n"
"which can be an empty string (--make-prefix '')\n"
"defaults to the absolute path of the environment's makedeps\n"
"environment metadata dir\n",
help="prefix Makefile targets (and variables) with <TARGET>/<name>\n\nby default "
"the absolute path to the directory makedeps under the environment metadata dir is "
"used. can be set to an empty string --make-prefix ''",
)
subparser.add_argument(
"--make-disable-jobserver",
@@ -994,8 +819,8 @@ def env_depfile_setup_parser(subparser):
type=arguments.use_buildcache,
default="package:auto,dependencies:auto",
metavar="[{auto,only,never},][package:{auto,only,never},][dependencies:{auto,only,never}]",
help="use `only` to prune redundant build dependencies\n"
"option is also passed to generated spack install commands",
help="when using `only`, redundant build dependencies are pruned from the DAG\n\n"
"this flag is passed on to the generated spack install commands",
)
subparser.add_argument(
"-o",
@@ -1009,14 +834,14 @@ def env_depfile_setup_parser(subparser):
"--generator",
default="make",
choices=("make",),
help="specify the depfile type (only supports `make`)",
help="specify the depfile type\n\ncurrently only make is supported",
)
subparser.add_argument(
metavar="specs",
dest="specs",
nargs=argparse.REMAINDER,
default=None,
help="limit the generated file to matching specs",
help="generate a depfile only for matching specs in the environment",
)
@@ -1085,12 +910,7 @@ def setup_parser(subparser):
setup_parser_cmd_name = "env_%s_setup_parser" % name
setup_parser_cmd = globals()[setup_parser_cmd_name]
subsubparser = sp.add_parser(
name,
aliases=aliases,
description=setup_parser_cmd.__doc__,
help=spack.cmd.first_line(setup_parser_cmd.__doc__),
)
subsubparser = sp.add_parser(name, aliases=aliases, help=setup_parser_cmd.__doc__)
setup_parser_cmd(subsubparser)

View File

@@ -174,17 +174,17 @@ def query_arguments(args):
if (args.missing or args.only_missing) and not args.only_deprecated:
installed.append(InstallStatuses.MISSING)
predicate_fn = None
known = any
if args.unknown:
predicate_fn = lambda x: not spack.repo.PATH.exists(x.spec.name)
known = False
explicit = None
explicit = any
if args.explicit:
explicit = True
if args.implicit:
explicit = False
q_args = {"installed": installed, "predicate_fn": predicate_fn, "explicit": explicit}
q_args = {"installed": installed, "known": known, "explicit": explicit}
install_tree = args.install_tree
upstreams = spack.config.get("upstreams", {})
@@ -222,9 +222,11 @@ def decorator(spec, fmt):
def display_env(env, args, decorator, results):
"""Display extra find output when running in an environment.
In an environment, `spack find` outputs a preliminary section
showing the root specs of the environment (this is in addition
to the section listing out specs matching the query parameters).
Find in an environment outputs 2 or 3 sections:
1. Root specs
2. Concretized roots (if asked for with -c)
3. Installed specs
"""
tty.msg("In environment %s" % env.name)
@@ -297,56 +299,6 @@ def root_decorator(spec, string):
print()
def _find_query(args, env):
q_args = query_arguments(args)
concretized_but_not_installed = list()
if env:
all_env_specs = env.all_specs()
if args.constraint:
init_specs = cmd.parse_specs(args.constraint)
env_specs = env.all_matching_specs(*init_specs)
else:
env_specs = all_env_specs
spec_hashes = set(x.dag_hash() for x in env_specs)
specs_meeting_q_args = set(spack.store.STORE.db.query(hashes=spec_hashes, **q_args))
results = list()
with spack.store.STORE.db.read_transaction():
for spec in env_specs:
if not spec.installed:
concretized_but_not_installed.append(spec)
if spec in specs_meeting_q_args:
results.append(spec)
else:
results = args.specs(**q_args)
# use groups by default except with format.
if args.groups is None:
args.groups = not args.format
# Exit early with an error code if no package matches the constraint
if concretized_but_not_installed and args.show_concretized:
pass
elif results:
pass
elif args.constraint:
raise cmd.NoSpecMatches()
# If tags have been specified on the command line, filter by tags
if args.tags:
packages_with_tags = spack.repo.PATH.packages_with_tags(*args.tags)
results = [x for x in results if x.name in packages_with_tags]
concretized_but_not_installed = [
x for x in concretized_but_not_installed if x.name in packages_with_tags
]
if args.loaded:
results = cmd.filter_loaded_specs(results)
return results, concretized_but_not_installed
def find(parser, args):
env = ev.active_environment()
@@ -355,12 +307,34 @@ def find(parser, args):
if not env and args.show_concretized:
tty.die("-c / --show-concretized requires an active environment")
try:
results, concretized_but_not_installed = _find_query(args, env)
except cmd.NoSpecMatches:
# Note: this uses args.constraint vs. args.constraint_specs because
# the latter only exists if you call args.specs()
tty.die(f"No package matches the query: {' '.join(args.constraint)}")
if env:
if args.constraint:
init_specs = spack.cmd.parse_specs(args.constraint)
results = env.all_matching_specs(*init_specs)
else:
results = env.all_specs()
else:
q_args = query_arguments(args)
results = args.specs(**q_args)
decorator = make_env_decorator(env) if env else lambda s, f: f
# use groups by default except with format.
if args.groups is None:
args.groups = not args.format
# Exit early with an error code if no package matches the constraint
if not results and args.constraint:
constraint_str = " ".join(str(s) for s in args.constraint_specs)
tty.die(f"No package matches the query: {constraint_str}")
# If tags have been specified on the command line, filter by tags
if args.tags:
packages_with_tags = spack.repo.PATH.packages_with_tags(*args.tags)
results = [x for x in results if x.name in packages_with_tags]
if args.loaded:
results = spack.cmd.filter_loaded_specs(results)
if args.install_status or args.show_concretized:
status_fn = spack.spec.Spec.install_status
@@ -371,16 +345,14 @@ def find(parser, args):
if args.json:
cmd.display_specs_as_json(results, deps=args.deps)
else:
decorator = make_env_decorator(env) if env else lambda s, f: f
if not args.format:
if env:
display_env(env, args, decorator, results)
if not args.only_roots:
display_results = list(results)
if args.show_concretized:
display_results += concretized_but_not_installed
display_results = results
if not args.show_concretized:
display_results = list(x for x in results if x.installed)
cmd.display_specs(
display_results, args, decorator=decorator, all_headers=True, status_fn=status_fn
)
@@ -398,9 +370,13 @@ def find(parser, args):
concretized_suffix += " (show with `spack find -c`)"
pkg_type = "loaded" if args.loaded else "installed"
cmd.print_how_many_pkgs(results, pkg_type, suffix=installed_suffix)
spack.cmd.print_how_many_pkgs(
list(x for x in results if x.installed), pkg_type, suffix=installed_suffix
)
if env:
cmd.print_how_many_pkgs(
concretized_but_not_installed, "concretized", suffix=concretized_suffix
spack.cmd.print_how_many_pkgs(
list(x for x in results if not x.installed),
"concretized",
suffix=concretized_suffix,
)

View File

@@ -80,8 +80,8 @@ def find_matching_specs(specs, allow_multiple_matches=False):
has_errors = True
# No installed package matches the query
if len(matching) == 0 and spec is not None:
tty.die(f"{spec} does not match any installed packages.")
if len(matching) == 0 and spec is not any:
tty.die("{0} does not match any installed packages.".format(spec))
specs_from_cli.extend(matching)
@@ -98,9 +98,8 @@ def do_mark(specs, explicit):
specs (list): list of specs to be marked
explicit (bool): whether to mark specs as explicitly installed
"""
with spack.store.STORE.db.write_transaction():
for spec in specs:
spack.store.STORE.db.mark(spec, "explicit", explicit)
for spec in specs:
spack.store.STORE.db.update_explicit(spec, explicit)
def mark_specs(args, specs):
@@ -117,6 +116,6 @@ def mark(parser, args):
" Use `spack mark --all` to mark ALL packages.",
)
# [None] here handles the --all case by forcing all specs to be returned
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
# [any] here handles the --all case by forcing all specs to be returned
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
mark_specs(args, specs)

View File

@@ -231,133 +231,31 @@ def setup_parser(subparser):
)
def _configure_access_pair(
args, id_tok, id_variable_tok, secret_tok, secret_variable_tok, default=None
):
"""Configure the access_pair options"""
# Check if any of the arguments are set to update this access_pair.
# If none are set, then skip computing the new access pair
args_id = getattr(args, id_tok)
args_id_variable = getattr(args, id_variable_tok)
args_secret = getattr(args, secret_tok)
args_secret_variable = getattr(args, secret_variable_tok)
if not any([args_id, args_id_variable, args_secret, args_secret_variable]):
return None
def _default_value(id_):
if isinstance(default, list):
return default[0] if id_ == "id" else default[1]
elif isinstance(default, dict):
return default.get(id_)
else:
return None
def _default_variable(id_):
if isinstance(default, dict):
return default.get(id_ + "_variable")
else:
return None
id_ = None
id_variable = None
secret = None
secret_variable = None
# Get the value/default value if the argument of the inverse
if not args_id_variable:
id_ = getattr(args, id_tok) or _default_value("id")
if not args_id:
id_variable = getattr(args, id_variable_tok) or _default_variable("id")
if not args_secret_variable:
secret = getattr(args, secret_tok) or _default_value("secret")
if not args_secret:
secret_variable = getattr(args, secret_variable_tok) or _default_variable("secret")
if (id_ or id_variable) and (secret or secret_variable):
if secret:
if not id_:
raise SpackError("Cannot add mirror with a variable id and text secret")
return [id_, secret]
else:
return dict(
[
(("id", id_) if id_ else ("id_variable", id_variable)),
("secret_variable", secret_variable),
]
)
else:
if id_ or id_variable or secret or secret_variable is not None:
id_arg_tok = id_tok.replace("_", "-")
secret_arg_tok = secret_tok.replace("_", "-")
tty.warn(
"Expected both parts of the access pair to be specified. "
f"(i.e. --{id_arg_tok} and --{secret_arg_tok})"
)
return None
def mirror_add(args):
"""add a mirror to Spack"""
if (
args.s3_access_key_id
or args.s3_access_key_secret
or args.s3_access_token
or args.s3_access_key_id_variable
or args.s3_access_key_secret_variable
or args.s3_access_token_variable
or args.s3_profile
or args.s3_endpoint_url
or args.type
or args.oci_username
or args.oci_password
or args.oci_username_variable
or args.oci_password_variable
or args.autopush
or args.signed is not None
):
connection = {"url": args.url}
# S3 Connection
if args.s3_access_key_secret:
tty.warn(
"Configuring mirror secrets as plain text with --s3-access-key-secret is "
"deprecated. Use --s3-access-key-secret-variable instead"
)
if args.oci_password:
tty.warn(
"Configuring mirror secrets as plain text with --oci-password is deprecated. "
"Use --oci-password-variable instead"
)
access_pair = _configure_access_pair(
args,
"s3_access_key_id",
"s3_access_key_id_variable",
"s3_access_key_secret",
"s3_access_key_secret_variable",
)
if access_pair:
connection["access_pair"] = access_pair
if args.s3_access_key_id and args.s3_access_key_secret:
connection["access_pair"] = [args.s3_access_key_id, args.s3_access_key_secret]
if args.s3_access_token:
connection["access_token"] = args.s3_access_token
elif args.s3_access_token_variable:
connection["access_token_variable"] = args.s3_access_token_variable
if args.s3_profile:
connection["profile"] = args.s3_profile
if args.s3_endpoint_url:
connection["endpoint_url"] = args.s3_endpoint_url
# OCI Connection
access_pair = _configure_access_pair(
args, "oci_username", "oci_username_variable", "oci_password", "oci_password_variable"
)
if access_pair:
connection["access_pair"] = access_pair
if args.oci_username and args.oci_password:
connection["access_pair"] = [args.oci_username, args.oci_password]
if args.type:
connection["binary"] = "binary" in args.type
connection["source"] = "source" in args.type
@@ -387,35 +285,16 @@ def _configure_mirror(args):
changes = {}
if args.url:
changes["url"] = args.url
default_access_pair = entry._get_value("access_pair", direction or "fetch")
# TODO: Init access_pair args with the fetch/push/base values in the current mirror state
access_pair = _configure_access_pair(
args,
"s3_access_key_id",
"s3_access_key_id_variable",
"s3_access_key_secret",
"s3_access_key_secret_variable",
default=default_access_pair,
)
if access_pair:
changes["access_pair"] = access_pair
if args.s3_access_key_id and args.s3_access_key_secret:
changes["access_pair"] = [args.s3_access_key_id, args.s3_access_key_secret]
if args.s3_access_token:
changes["access_token"] = args.s3_access_token
if args.s3_profile:
changes["profile"] = args.s3_profile
if args.s3_endpoint_url:
changes["endpoint_url"] = args.s3_endpoint_url
access_pair = _configure_access_pair(
args,
"oci_username",
"oci_username_variable",
"oci_password",
"oci_password_variable",
default=default_access_pair,
)
if access_pair:
changes["access_pair"] = access_pair
if args.oci_username and args.oci_password:
changes["access_pair"] = [args.oci_username, args.oci_password]
if getattr(args, "signed", None) is not None:
changes["signed"] = args.signed
if getattr(args, "autopush", None) is not None:

View File

@@ -19,7 +19,6 @@
import spack.modules
import spack.modules.common
import spack.repo
from spack.cmd import MultipleSpecsMatch, NoSpecMatches
from spack.cmd.common import arguments
description = "manipulate module files"
@@ -92,6 +91,18 @@ def add_loads_arguments(subparser):
arguments.add_common_arguments(subparser, ["recurse_dependencies"])
class MultipleSpecsMatch(Exception):
"""Raised when multiple specs match a constraint, in a context where
this is not allowed.
"""
class NoSpecMatches(Exception):
"""Raised when no spec matches a constraint, in a context where
this is not allowed.
"""
def one_spec_or_raise(specs):
"""Ensures exactly one spec has been selected, or raises the appropriate
exception.
@@ -367,10 +378,7 @@ def refresh(module_type, specs, args):
def modules_cmd(parser, args, module_type, callbacks=callbacks):
# Qualifiers to be used when querying the db for specs
constraint_qualifiers = {
"refresh": {
"installed": True,
"predicate_fn": lambda x: spack.repo.PATH.exists(x.spec.name),
}
"refresh": {"installed": True, "known": lambda x: not spack.repo.PATH.exists(x)}
}
query_args = constraint_qualifiers.get(args.subparser_name, {})

View File

@@ -33,9 +33,8 @@ def patch(parser, args):
spack.config.set("config:checksum", False, scope="command_line")
specs = spack.cmd.parse_specs(args.specs, concretize=False)
specs = spack.cmd.matching_specs_from_env(specs)
for spec in specs:
_patch(spec.package)
_patch(spack.cmd.matching_spec_from_env(spec).package)
def _patch_env(env: ev.Environment):

View File

@@ -3,6 +3,7 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import re
import sys
@@ -11,12 +12,13 @@
import spack
import spack.cmd
import spack.cmd.spec
import spack.cmd.common.arguments
import spack.config
import spack.environment
import spack.hash_types as ht
import spack.solver.asp as asp
import spack.spec
from spack.cmd.common import arguments
description = "concretize a specs using an ASP solver"
section = "developer"
@@ -39,6 +41,42 @@ def setup_parser(subparser):
" solutions models found by asp program\n"
" all all of the above",
)
# Below are arguments w.r.t. spec display (like spack spec)
arguments.add_common_arguments(subparser, ["long", "very_long", "namespaces"])
install_status_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
subparser.add_argument(
"-y",
"--yaml",
action="store_const",
dest="format",
default=None,
const="yaml",
help="print concrete spec as yaml",
)
subparser.add_argument(
"-j",
"--json",
action="store_const",
dest="format",
default=None,
const="json",
help="print concrete spec as json",
)
subparser.add_argument(
"-c",
"--cover",
action="store",
default="nodes",
choices=["nodes", "edges", "paths"],
help="how extensively to traverse the DAG (default: nodes)",
)
subparser.add_argument(
"-t", "--types", action="store_true", default=False, help="show dependency types"
)
subparser.add_argument(
"--timers",
action="store_true",
@@ -48,8 +86,9 @@ def setup_parser(subparser):
subparser.add_argument(
"--stats", action="store_true", default=False, help="print out statistics from clingo"
)
subparser.add_argument("specs", nargs=argparse.REMAINDER, help="specs of packages")
spack.cmd.spec.setup_parser(subparser)
spack.cmd.common.arguments.add_concretizer_args(subparser)
def _process_result(result, show, required_format, kwargs):
@@ -125,12 +164,11 @@ def solve(parser, args):
# If we have an active environment, pick the specs from there
env = spack.environment.active_environment()
if args.specs:
specs = spack.cmd.parse_specs(args.specs)
elif env:
specs = list(env.user_specs)
else:
tty.die("spack solve requires at least one spec or an active environment")
if env and args.specs:
msg = "cannot give explicit specs when an environment is active"
raise RuntimeError(msg)
specs = list(env.user_specs) if env else spack.cmd.parse_specs(args.specs)
solver = asp.Solver()
output = sys.stdout if "asp" in show else None

View File

@@ -96,25 +96,26 @@ def spec(parser, args):
if args.install_status:
tree_context = spack.store.STORE.db.read_transaction
env = ev.active_environment()
# Use command line specified specs, otherwise try to use environment specs.
if args.specs:
input_specs = spack.cmd.parse_specs(args.specs)
concretized_specs = spack.cmd.parse_specs(args.specs, concretize=True)
specs = list(zip(input_specs, concretized_specs))
elif env:
env.concretize()
specs = env.concretized_specs()
else:
env = ev.active_environment()
if env:
env.concretize()
specs = env.concretized_specs()
if not args.format:
# environments are printed together in a combined tree() invocation,
# except when using --yaml or --json, which we print spec by spec below.
tree_kwargs["key"] = spack.traverse.by_dag_hash
tree_kwargs["hashes"] = args.long or args.very_long
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
return
else:
tty.die("spack spec requires at least one spec or an active environment")
if not args.format:
tree_kwargs["key"] = spack.traverse.by_dag_hash
tree_kwargs["hashes"] = args.long or args.very_long
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
return
else:
tty.die("spack spec requires at least one spec or an active environment")
for input, output in specs:
# With --yaml or --json, just print the raw specs to output

View File

@@ -47,8 +47,8 @@ def stage(parser, args):
if len(specs) > 1 and custom_path:
tty.die("`--path` requires a single spec, but multiple were provided")
specs = spack.cmd.matching_specs_from_env(specs)
for spec in specs:
spec = spack.cmd.matching_spec_from_env(spec)
pkg = spec.package
if custom_path:

View File

@@ -165,7 +165,7 @@ def test_run(args):
if args.fail_fast:
spack.config.set("config:fail_fast", True, scope="command_line")
explicit = args.explicit or None
explicit = args.explicit or any
explicit_str = "explicitly " if args.explicit else ""
# Get specs to test

View File

@@ -90,7 +90,6 @@ def find_matching_specs(
env: optional active environment
specs: list of specs to be matched against installed packages
allow_multiple_matches: if True multiple matches are admitted
origin: origin of the spec
Return:
list: list of specs
@@ -99,7 +98,7 @@ def find_matching_specs(
hashes = env.all_hashes() if env else None
# List of specs that match expressions given via command line
specs_from_cli: List["spack.spec.Spec"] = []
specs_from_cli = []
has_errors = False
for spec in specs:
install_query = [InstallStatuses.INSTALLED, InstallStatuses.DEPRECATED]
@@ -117,7 +116,7 @@ def find_matching_specs(
has_errors = True
# No installed package matches the query
if len(matching) == 0 and spec is not None:
if len(matching) == 0 and spec is not any:
if env:
pkg_type = "packages in environment '%s'" % env.name
else:
@@ -214,7 +213,7 @@ def get_uninstall_list(args, specs: List[spack.spec.Spec], env: Optional[ev.Envi
# Gets the list of installed specs that match the ones given via cli
# args.all takes care of the case where '-a' is given in the cli
matching_specs = find_matching_specs(env, specs, args.all, origin=args.origin)
matching_specs = find_matching_specs(env, specs, args.all)
dependent_specs = installed_dependents(matching_specs)
all_uninstall_specs = matching_specs + dependent_specs if args.dependents else matching_specs
other_dependent_envs = dependent_environments(all_uninstall_specs, current_env=env)
@@ -302,6 +301,6 @@ def uninstall(parser, args):
" Use `spack uninstall --all` to uninstall ALL packages.",
)
# [None] here handles the --all case by forcing all specs to be returned
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
# [any] here handles the --all case by forcing all specs to be returned
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
uninstall_specs(args, specs)

View File

@@ -4,23 +4,20 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import contextlib
import hashlib
import itertools
import json
import os
import platform
import re
import shutil
import sys
import tempfile
from typing import Dict, List, Optional, Sequence
from typing import List, Optional, Sequence
import llnl.path
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import path_contains_subdirectory, paths_containing_libs
import spack.caches
import spack.error
import spack.schema.environment
import spack.spec
@@ -29,7 +26,6 @@
import spack.util.module_cmd
import spack.version
from spack.util.environment import filter_system_paths
from spack.util.file_cache import FileCache
__all__ = ["Compiler"]
@@ -38,7 +34,7 @@
@llnl.util.lang.memoized
def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()) -> str:
def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
"""Invokes the compiler at a given path passing a single
version argument and returns the output.
@@ -61,7 +57,7 @@ def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()) -
return output
def get_compiler_version_output(compiler_path, *args, **kwargs) -> str:
def get_compiler_version_output(compiler_path, *args, **kwargs):
"""Wrapper for _get_compiler_version_output()."""
# This ensures that we memoize compiler output by *absolute path*,
# not just executable name. If we don't do this, and the path changes
@@ -279,7 +275,7 @@ def __init__(
operating_system,
target,
paths,
modules: Optional[List[str]] = None,
modules=None,
alias=None,
environment=None,
extra_rpaths=None,
@@ -294,7 +290,6 @@ def __init__(
self.environment = environment or {}
self.extra_rpaths = extra_rpaths or []
self.enable_implicit_rpaths = enable_implicit_rpaths
self.cache = COMPILER_CACHE
self.cc = paths[0]
self.cxx = paths[1]
@@ -395,11 +390,15 @@ def real_version(self):
E.g. C++11 flag checks.
"""
real_version_str = self.cache.get(self).real_version
if not real_version_str or real_version_str == "unknown":
return self.version
return spack.version.StandardVersion.from_string(real_version_str)
if not self._real_version:
try:
real_version = spack.version.Version(self.get_real_version())
if real_version == spack.version.Version("unknown"):
return self.version
self._real_version = real_version
except spack.util.executable.ProcessError:
self._real_version = self.version
return self._real_version
def implicit_rpaths(self) -> List[str]:
if self.enable_implicit_rpaths is False:
@@ -428,11 +427,6 @@ def default_dynamic_linker(self) -> Optional[str]:
@property
def default_libc(self) -> Optional["spack.spec.Spec"]:
"""Determine libc targeted by the compiler from link line"""
# technically this should be testing the target platform of the compiler, but we don't have
# that, so stick to host platform for now.
if sys.platform in ("darwin", "win32"):
return None
dynamic_linker = self.default_dynamic_linker
if not dynamic_linker:
@@ -451,23 +445,19 @@ def required_libs(self):
@property
def compiler_verbose_output(self) -> Optional[str]:
"""Verbose output from compiling a dummy C source file. Output is cached."""
return self.cache.get(self).c_compiler_output
if not hasattr(self, "_compile_c_source_output"):
self._compile_c_source_output = self._compile_dummy_c_source()
return self._compile_c_source_output
def _compile_dummy_c_source(self) -> Optional[str]:
if self.cc:
cc = self.cc
ext = "c"
else:
cc = self.cxx
ext = "cc"
cc = self.cc if self.cc else self.cxx
if not cc or not self.verbose_flag:
return None
try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, f"main.{ext}")
fin = os.path.join(tmpdir, "main.c")
with open(fin, "w") as csource:
csource.write(
@@ -569,7 +559,7 @@ def fc_pic_flag(self):
# Note: This is not a class method. The class methods are used to detect
# compilers on PATH based systems, and do not set up the run environment of
# the compiler. This method can be called on `module` based systems as well
def get_real_version(self) -> str:
def get_real_version(self):
"""Query the compiler for its version.
This is the "real" compiler version, regardless of what is in the
@@ -579,17 +569,14 @@ def get_real_version(self) -> str:
modifications) to enable the compiler to run properly on any platform.
"""
cc = spack.util.executable.Executable(self.cc)
try:
with self.compiler_environment():
output = cc(
self.version_argument,
output=str,
error=str,
ignore_errors=tuple(self.ignore_version_errors),
)
return self.extract_version_from_output(output)
except spack.util.executable.ProcessError:
return "unknown"
with self.compiler_environment():
output = cc(
self.version_argument,
output=str,
error=str,
ignore_errors=tuple(self.ignore_version_errors),
)
return self.extract_version_from_output(output)
@property
def prefix(self):
@@ -616,7 +603,7 @@ def default_version(cls, cc):
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output: str) -> str:
def extract_version_from_output(cls, output):
"""Extracts the version from compiler's output."""
match = re.search(cls.version_regex, output)
return match.group(1) if match else "unknown"
@@ -745,106 +732,3 @@ def __init__(self, compiler, feature, flag_name, ver_string=None):
)
+ " implement the {0} property and submit a pull request or issue.".format(flag_name),
)
class CompilerCacheEntry:
"""Deserialized cache entry for a compiler"""
__slots__ = ["c_compiler_output", "real_version"]
def __init__(self, c_compiler_output: Optional[str], real_version: str):
self.c_compiler_output = c_compiler_output
self.real_version = real_version
@classmethod
def from_dict(cls, data: Dict[str, Optional[str]]):
if not isinstance(data, dict):
raise ValueError(f"Invalid {cls.__name__} data")
c_compiler_output = data.get("c_compiler_output")
real_version = data.get("real_version")
if not isinstance(real_version, str) or not isinstance(
c_compiler_output, (str, type(None))
):
raise ValueError(f"Invalid {cls.__name__} data")
return cls(c_compiler_output, real_version)
class CompilerCache:
"""Base class for compiler output cache. Default implementation does not cache anything."""
def value(self, compiler: Compiler) -> Dict[str, Optional[str]]:
return {
"c_compiler_output": compiler._compile_dummy_c_source(),
"real_version": compiler.get_real_version(),
}
def get(self, compiler: Compiler) -> CompilerCacheEntry:
return CompilerCacheEntry.from_dict(self.value(compiler))
class FileCompilerCache(CompilerCache):
"""Cache for compiler output, which is used to determine implicit link paths, the default libc
version, and the compiler version."""
name = os.path.join("compilers", "compilers.json")
def __init__(self, cache: "FileCache") -> None:
self.cache = cache
self.cache.init_entry(self.name)
self._data: Dict[str, Dict[str, Optional[str]]] = {}
def _get_entry(self, key: str) -> Optional[CompilerCacheEntry]:
try:
return CompilerCacheEntry.from_dict(self._data[key])
except ValueError:
del self._data[key]
except KeyError:
pass
return None
def get(self, compiler: Compiler) -> CompilerCacheEntry:
# Cache hit
try:
with self.cache.read_transaction(self.name) as f:
assert f is not None
self._data = json.loads(f.read())
assert isinstance(self._data, dict)
except (json.JSONDecodeError, AssertionError):
self._data = {}
key = self._key(compiler)
value = self._get_entry(key)
if value is not None:
return value
# Cache miss
with self.cache.write_transaction(self.name) as (old, new):
try:
assert old is not None
self._data = json.loads(old.read())
assert isinstance(self._data, dict)
except (json.JSONDecodeError, AssertionError):
self._data = {}
# Use cache entry that may have been created by another process in the meantime.
entry = self._get_entry(key)
# Finally compute the cache entry
if entry is None:
self._data[key] = self.value(compiler)
entry = CompilerCacheEntry.from_dict(self._data[key])
new.write(json.dumps(self._data, separators=(",", ":")))
return entry
def _key(self, compiler: Compiler) -> str:
as_bytes = json.dumps(compiler.to_dict(), separators=(",", ":")).encode("utf-8")
return hashlib.sha256(as_bytes).hexdigest()
def _make_compiler_cache():
return FileCompilerCache(spack.caches.MISC_CACHE)
COMPILER_CACHE: CompilerCache = llnl.util.lang.Singleton(_make_compiler_cache) # type: ignore

View File

@@ -116,5 +116,5 @@ def fflags(self):
def _handle_default_flag_addtions(self):
# This is a known issue for AOCC 3.0 see:
# https://developer.amd.com/wp-content/resources/AOCC-3.0-Install-Guide.pdf
if self.version.satisfies(ver("3.0.0")):
if self.real_version.satisfies(ver("3.0.0")):
return "-Wno-unused-command-line-argument " "-mllvm -eliminate-similar-expr=false"

View File

@@ -124,8 +124,9 @@ def setup_custom_environment(self, pkg, env):
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
# Always pass flags to disable deprecation warnings, since these warnings can
# confuse tools that parse the output of compiler commands (e.g. version checks).
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")

View File

@@ -151,14 +151,11 @@ def setup_custom_environment(self, pkg, env):
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
# Always pass flags to disable deprecation warnings, since these warnings can
# confuse tools that parse the output of compiler commands (e.g. version checks).
# This is really only needed for Fortran, since oneapi@ should be using either
# icx+icpx+ifx or icx+icpx+ifort. But to be on the safe side (some users may
# want to try to swap icpx against icpc, for example), and since the Intel LLVM
# compilers accept these diag-disable flags, we apply them for all compilers.
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
# 2024 release bumped the libsycl version because of an ABI

View File

@@ -2,20 +2,14 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""High-level functions to concretize list of specs"""
import sys
import time
"""
(DEPRECATED) Used to contain the code for the original concretizer
"""
from contextlib import contextmanager
from typing import Iterable, Optional, Sequence, Tuple, Union
from itertools import chain
import llnl.util.tty as tty
import spack.compilers
import spack.config
import spack.error
import spack.repo
import spack.util.parallel
from spack.spec import ArchSpec, CompilerSpec, Spec
CHECK_COMPILER_EXISTENCE = True
@@ -36,167 +30,67 @@ def enable_compiler_existence_check():
CHECK_COMPILER_EXISTENCE = saved
SpecPair = Tuple[Spec, Spec]
SpecLike = Union[Spec, str]
TestsType = Union[bool, Iterable[str]]
def find_spec(spec, condition, default=None):
"""Searches the dag from spec in an intelligent order and looks
for a spec that matches a condition"""
# First search parents, then search children
deptype = ("build", "link")
dagiter = chain(
spec.traverse(direction="parents", deptype=deptype, root=False),
spec.traverse(direction="children", deptype=deptype, root=False),
)
visited = set()
for relative in dagiter:
if condition(relative):
return relative
visited.add(id(relative))
# Then search all other relatives in the DAG *except* spec
for relative in spec.root.traverse(deptype="all"):
if relative is spec:
continue
if id(relative) in visited:
continue
if condition(relative):
return relative
# Finally search spec itself.
if condition(spec):
return spec
return default # Nothing matched the condition; return default.
def concretize_specs_together(
abstract_specs: Sequence[SpecLike], tests: TestsType = False
) -> Sequence[Spec]:
def concretize_specs_together(*abstract_specs, **kwargs):
"""Given a number of specs as input, tries to concretize them together.
Args:
abstract_specs: abstract specs to be concretized
tests: list of package names for which to consider tests dependencies. If True, all nodes
will have test dependencies. If False, test dependencies will be disregarded.
tests (bool or list or set): False to run no tests, True to test
all packages, or a list of package names to run tests for some
*abstract_specs: abstract specs to be concretized, given either
as Specs or strings
Returns:
List of concretized specs
"""
import spack.solver.asp
allow_deprecated = spack.config.get("config:deprecated", False)
solver = spack.solver.asp.Solver()
result = solver.solve(abstract_specs, tests=tests, allow_deprecated=allow_deprecated)
result = solver.solve(
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
)
return [s.copy() for s in result.specs]
def concretize_together(
spec_list: Sequence[SpecPair], tests: TestsType = False
) -> Sequence[SpecPair]:
"""Given a number of specs as input, tries to concretize them together.
Args:
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
already concrete spec or None if not yet concretized
tests: list of package names for which to consider tests dependencies. If True, all nodes
will have test dependencies. If False, test dependencies will be disregarded.
"""
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
abstract_specs = [abstract for abstract, _ in spec_list]
concrete_specs = concretize_specs_together(to_concretize, tests=tests)
return list(zip(abstract_specs, concrete_specs))
def concretize_together_when_possible(
spec_list: Sequence[SpecPair], tests: TestsType = False
) -> Sequence[SpecPair]:
"""Given a number of specs as input, tries to concretize them together to the extent possible.
See documentation for ``unify: when_possible`` concretization for the precise definition of
"to the extent possible".
Args:
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
already concrete spec or None if not yet concretized
tests: list of package names for which to consider tests dependencies. If True, all nodes
will have test dependencies. If False, test dependencies will be disregarded.
"""
import spack.solver.asp
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
old_concrete_to_abstract = {
concrete: abstract for (abstract, concrete) in spec_list if concrete
}
result_by_user_spec = {}
solver = spack.solver.asp.Solver()
allow_deprecated = spack.config.get("config:deprecated", False)
for result in solver.solve_in_rounds(
to_concretize, tests=tests, allow_deprecated=allow_deprecated
):
result_by_user_spec.update(result.specs_by_input)
# If the "abstract" spec is a concrete spec from the previous concretization
# translate it back to an abstract spec. Otherwise, keep the abstract spec
return [
(old_concrete_to_abstract.get(abstract, abstract), concrete)
for abstract, concrete in sorted(result_by_user_spec.items())
]
def concretize_separately(
spec_list: Sequence[SpecPair], tests: TestsType = False
) -> Sequence[SpecPair]:
"""Concretizes the input specs separately from each other.
Args:
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
already concrete spec or None if not yet concretized
tests: list of package names for which to consider tests dependencies. If True, all nodes
will have test dependencies. If False, test dependencies will be disregarded.
"""
import spack.bootstrap
to_concretize = [abstract for abstract, concrete in spec_list if not concrete]
args = [
(i, str(abstract), tests)
for i, abstract in enumerate(to_concretize)
if not abstract.concrete
]
ret = [(i, abstract) for i, abstract in enumerate(to_concretize) if abstract.concrete]
# Ensure we don't try to bootstrap clingo in parallel
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
# Ensure all the indexes have been built or updated, since
# otherwise the processes in the pool may timeout on waiting
# for a write lock. We do this indirectly by retrieving the
# provider index, which should in turn trigger the update of
# all the indexes if there's any need for that.
_ = spack.repo.PATH.provider_index
# Ensure we have compilers in compilers.yaml to avoid that
# processes try to write the config file in parallel
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
# Early return if there is nothing to do
if len(args) == 0:
# Still have to combine the things that were passed in as abstract with the things
# that were passed in as pairs
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
(abstract, concrete) for abstract, concrete in spec_list if concrete
]
# Solve the environment in parallel on Linux
# TODO: support parallel concretization on macOS and Windows
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
for j, (i, concrete, duration) in enumerate(
spack.util.parallel.imap_unordered(
_concretize_task, args, processes=num_procs, debug=tty.is_debug(), maxtaskperchild=1
)
):
ret.append((i, concrete))
percentage = (j + 1) / len(args) * 100
tty.verbose(
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
f"{to_concretize[i].colored_str}"
)
sys.stdout.flush()
# Add specs in original order
ret.sort(key=lambda x: x[0])
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
(abstract, concrete) for abstract, concrete in spec_list if concrete
]
def _concretize_task(packed_arguments: Tuple[int, str, TestsType]) -> Tuple[int, Spec, float]:
index, spec_str, tests = packed_arguments
with tty.SuppressOutput(msg_enabled=False):
start = time.time()
spec = Spec(spec_str).concretized(tests=tests)
return index, spec, time.time() - start
class UnavailableCompilerVersionError(spack.error.SpackError):
"""Raised when there is no available compiler that satisfies a
compiler spec."""
def __init__(self, compiler_spec: CompilerSpec, arch: Optional[ArchSpec] = None) -> None:
err_msg = f"No compilers with spec {compiler_spec} found"
def __init__(self, compiler_spec, arch=None):
err_msg = "No compilers with spec {0} found".format(compiler_spec)
if arch:
err_msg += f" for operating system {arch.os} and target {arch.target}."
err_msg += " for operating system {0} and target {1}.".format(arch.os, arch.target)
super().__init__(
err_msg,

View File

@@ -427,10 +427,6 @@ def __init__(self, *scopes: ConfigScope) -> None:
self.push_scope(scope)
self.format_updates: Dict[str, List[ConfigScope]] = collections.defaultdict(list)
def ensure_unwrapped(self) -> "Configuration":
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
return self
@_config_mutator
def push_scope(self, scope: ConfigScope) -> None:
"""Add a higher precedence scope to the Configuration."""
@@ -718,7 +714,7 @@ def print_section(self, section: str, blame: bool = False, *, scope=None) -> Non
@contextlib.contextmanager
def override(
path_or_scope: Union[ConfigScope, str], value: Optional[Any] = None
) -> Generator[Configuration, None, None]:
) -> Generator[Union[lang.Singleton, Configuration], None, None]:
"""Simple way to override config settings within a context.
Arguments:
@@ -756,7 +752,13 @@ def override(
assert scope is overrides
def _add_platform_scope(cfg: Configuration, name: str, path: str, writable: bool = True) -> None:
#: configuration scopes added on the command line set by ``spack.main.main()``
COMMAND_LINE_SCOPES: List[str] = []
def _add_platform_scope(
cfg: Union[Configuration, lang.Singleton], name: str, path: str, writable: bool = True
) -> None:
"""Add a platform-specific subdirectory for the current platform."""
platform = spack.platforms.host().name
scope = DirectoryConfigScope(
@@ -790,7 +792,9 @@ def config_paths_from_entry_points() -> List[Tuple[str, str]]:
return config_paths
def _add_command_line_scopes(cfg: Configuration, command_line_scopes: List[str]) -> None:
def _add_command_line_scopes(
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
) -> None:
"""Add additional scopes from the --config-scope argument, either envs or dirs."""
import spack.environment.environment as env # circular import
@@ -860,11 +864,18 @@ def create() -> Configuration:
# Each scope can have per-platfom overrides in subdirectories
_add_platform_scope(cfg, name, path)
# add command-line scopes
_add_command_line_scopes(cfg, COMMAND_LINE_SCOPES)
# we make a special scope for spack commands so that they can
# override configuration options.
cfg.push_scope(InternalConfigScope("command_line"))
return cfg
#: This is the singleton configuration instance for Spack.
CONFIG: Configuration = lang.Singleton(create) # type: ignore
CONFIG: Union[Configuration, lang.Singleton] = lang.Singleton(create)
def add_from_file(filename: str, scope: Optional[str] = None) -> None:

View File

@@ -32,7 +32,6 @@
Container,
Dict,
Generator,
Iterable,
List,
NamedTuple,
Optional,
@@ -291,6 +290,55 @@ def __reduce__(self):
return ForbiddenLock, tuple()
_QUERY_DOCSTRING = """
Args:
query_spec: queries iterate through specs in the database and
return those that satisfy the supplied ``query_spec``. If
query_spec is `any`, This will match all specs in the
database. If it is a spec, we'll evaluate
``spec.satisfies(query_spec)``
known (bool or None): Specs that are "known" are those
for which Spack can locate a ``package.py`` file -- i.e.,
Spack "knows" how to install them. Specs that are unknown may
represent packages that existed in a previous version of
Spack, but have since either changed their name or
been removed
installed (bool or InstallStatus or typing.Iterable or None):
if ``True``, includes only installed
specs in the search; if ``False`` only missing specs, and if
``any``, all specs in database. If an InstallStatus or iterable
of InstallStatus, returns specs whose install status
(installed, deprecated, or missing) matches (one of) the
InstallStatus. (default: True)
explicit (bool or None): A spec that was installed
following a specific user request is marked as explicit. If
instead it was pulled-in as a dependency of a user requested
spec it's considered implicit.
start_date (datetime.datetime or None): filters the query
discarding specs that have been installed before ``start_date``.
end_date (datetime.datetime or None): filters the query discarding
specs that have been installed after ``end_date``.
hashes (Container): list or set of hashes that we can use to
restrict the search
in_buildcache (bool or None): Specs that are marked in
this database as part of an associated binary cache are
``in_buildcache``. All other specs are not. This field is used
for querying mirror indices. Default is ``any``.
Returns:
list of specs that match the query
"""
class LockConfiguration(NamedTuple):
"""Data class to configure locks in Database objects
@@ -556,9 +604,6 @@ def _path(self, spec: "spack.spec.Spec") -> pathlib.Path:
return self.dir / f"{spec.name}-{spec.dag_hash()}"
SelectType = Callable[[InstallRecord], bool]
class Database:
#: Fields written for each install record
record_fields: Tuple[str, ...] = DEFAULT_INSTALL_RECORD_FIELDS
@@ -1200,7 +1245,7 @@ def _add(
self._data[key].explicit = explicit
@_autospec
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False, allow_missing=False) -> None:
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
"""Add spec at path to database, locking and reading DB to sync.
``add()`` will lock and read from the DB on disk.
@@ -1209,7 +1254,7 @@ def add(self, spec: "spack.spec.Spec", *, explicit: bool = False, allow_missing=
# TODO: ensure that spec is concrete?
# Entire add is transactional.
with self.write_transaction():
self._add(spec, explicit=explicit, allow_missing=allow_missing)
self._add(spec, explicit=explicit)
def _get_matching_spec_key(self, spec: "spack.spec.Spec", **kwargs) -> str:
"""Get the exact spec OR get a single spec that matches."""
@@ -1336,7 +1381,7 @@ def _deprecate(self, spec: "spack.spec.Spec", deprecator: "spack.spec.Spec") ->
self._data[spec_key] = spec_rec
@_autospec
def mark(self, spec: "spack.spec.Spec", key: str, value: Any) -> None:
def mark(self, spec: "spack.spec.Spec", key, value) -> None:
"""Mark an arbitrary record on a spec."""
with self.write_transaction():
return self._mark(spec, key, value)
@@ -1480,51 +1525,62 @@ def get_by_hash(self, dag_hash, default=None, installed=any):
def _query(
self,
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
*,
predicate_fn: Optional[SelectType] = None,
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
explicit: Optional[bool] = None,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
hashes: Optional[Iterable[str]] = None,
in_buildcache: Optional[bool] = None,
origin: Optional[str] = None,
) -> List["spack.spec.Spec"]:
query_spec=any,
known=any,
installed=True,
explicit=any,
start_date=None,
end_date=None,
hashes=None,
in_buildcache=any,
origin=None,
):
"""Run a query on the database."""
# Restrict the set of records over which we iterate first
matching_hashes = self._data
if hashes is not None:
matching_hashes = {h: self._data[h] for h in hashes if h in self._data}
# TODO: Specs are a lot like queries. Should there be a
# TODO: wildcard spec object, and should specs have attributes
# TODO: like installed and known that can be queried? Or are
# TODO: these really special cases that only belong here?
if isinstance(query_spec, str):
query_spec = spack.spec.Spec(query_spec)
if query_spec is not any:
if not isinstance(query_spec, spack.spec.Spec):
query_spec = spack.spec.Spec(query_spec)
if query_spec is not None and query_spec.concrete:
hash_key = query_spec.dag_hash()
if hash_key not in matching_hashes:
return []
matching_hashes = {hash_key: matching_hashes[hash_key]}
# Just look up concrete specs with hashes; no fancy search.
if query_spec.concrete:
# TODO: handling of hashes restriction is not particularly elegant.
hash_key = query_spec.dag_hash()
if hash_key in self._data and (not hashes or hash_key in hashes):
return [self._data[hash_key].spec]
else:
return []
# Abstract specs require more work -- currently we test
# against everything.
results = []
start_date = start_date or datetime.datetime.min
end_date = end_date or datetime.datetime.max
# save specs whose name doesn't match for last, to avoid a virtual check
deferred = []
for rec in matching_hashes.values():
for key, rec in self._data.items():
if hashes is not None and rec.spec.dag_hash() not in hashes:
continue
if origin and not (origin == rec.origin):
continue
if not rec.install_type_matches(installed):
continue
if in_buildcache is not None and rec.in_buildcache != in_buildcache:
if in_buildcache is not any and rec.in_buildcache != in_buildcache:
continue
if explicit is not None and rec.explicit != explicit:
if explicit is not any and rec.explicit != explicit:
continue
if predicate_fn is not None and not predicate_fn(rec):
if known is not any and known(rec.spec.name):
continue
if start_date or end_date:
@@ -1532,7 +1588,7 @@ def _query(
if not (start_date < inst_date < end_date):
continue
if query_spec is None or query_spec.concrete:
if query_spec is any:
results.append(rec.spec)
continue
@@ -1550,118 +1606,36 @@ def _query(
# If we did fine something, the query spec can't be virtual b/c we matched an actual
# package installation, so skip the virtual check entirely. If we *didn't* find anything,
# check all the deferred specs *if* the query is virtual.
if not results and query_spec is not None and deferred and query_spec.virtual:
if not results and query_spec is not any and deferred and query_spec.virtual:
results = [spec for spec in deferred if spec.satisfies(query_spec)]
return results
def query_local(
self,
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
*,
predicate_fn: Optional[SelectType] = None,
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
explicit: Optional[bool] = None,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
hashes: Optional[List[str]] = None,
in_buildcache: Optional[bool] = None,
origin: Optional[str] = None,
) -> List["spack.spec.Spec"]:
"""Queries the local Spack database.
if _query.__doc__ is None:
_query.__doc__ = ""
_query.__doc__ += _QUERY_DOCSTRING
This function doesn't guarantee any sorting of the returned data for performance reason,
since comparing specs for __lt__ may be an expensive operation.
def query_local(self, *args, **kwargs):
"""Query only the local Spack database.
Args:
query_spec: if query_spec is ``None``, match all specs in the database.
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
whether that record is selected for the query. It can be used to craft criteria
that need some data for selection not provided by the Database itself.
installed: if ``True``, includes only installed specs in the search. If ``False`` only
missing specs, and if ``any``, all specs in database. If an InstallStatus or
iterable of InstallStatus, returns specs whose install status matches at least
one of the InstallStatus.
explicit: a spec that was installed following a specific user request is marked as
explicit. If instead it was pulled-in as a dependency of a user requested spec
it's considered implicit.
start_date: if set considers only specs installed from the starting date.
end_date: if set considers only specs installed until the ending date.
in_buildcache: specs that are marked in this database as part of an associated binary
cache are ``in_buildcache``. All other specs are not. This field is used for
querying mirror indices. By default, it does not check this status.
hashes: list of hashes used to restrict the search
origin: origin of the spec
This function doesn't guarantee any sorting of the returned
data for performance reason, since comparing specs for __lt__
may be an expensive operation.
"""
with self.read_transaction():
return self._query(
query_spec,
predicate_fn=predicate_fn,
installed=installed,
explicit=explicit,
start_date=start_date,
end_date=end_date,
hashes=hashes,
in_buildcache=in_buildcache,
origin=origin,
)
return self._query(*args, **kwargs)
def query(
self,
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
*,
predicate_fn: Optional[SelectType] = None,
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
explicit: Optional[bool] = None,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
in_buildcache: Optional[bool] = None,
hashes: Optional[List[str]] = None,
origin: Optional[str] = None,
install_tree: str = "all",
):
"""Queries the Spack database including all upstream databases.
if query_local.__doc__ is None:
query_local.__doc__ = ""
query_local.__doc__ += _QUERY_DOCSTRING
Args:
query_spec: if query_spec is ``None``, match all specs in the database.
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
def query(self, *args, **kwargs):
"""Query the Spack database including all upstream databases.
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
whether that record is selected for the query. It can be used to craft criteria
that need some data for selection not provided by the Database itself.
installed: if ``True``, includes only installed specs in the search. If ``False`` only
missing specs, and if ``any``, all specs in database. If an InstallStatus or
iterable of InstallStatus, returns specs whose install status matches at least
one of the InstallStatus.
explicit: a spec that was installed following a specific user request is marked as
explicit. If instead it was pulled-in as a dependency of a user requested spec
it's considered implicit.
start_date: if set considers only specs installed from the starting date.
end_date: if set considers only specs installed until the ending date.
in_buildcache: specs that are marked in this database as part of an associated binary
cache are ``in_buildcache``. All other specs are not. This field is used for
querying mirror indices. By default, it does not check this status.
hashes: list of hashes used to restrict the search
install_tree: query 'all' (default), 'local', 'upstream', or upstream path
origin: origin of the spec
Additional Arguments:
install_tree (str): query 'all' (default), 'local', 'upstream', or upstream path
"""
install_tree = kwargs.pop("install_tree", "all")
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
if install_tree not in valid_trees:
msg = "Invalid install_tree argument to Database.query()\n"
@@ -1677,54 +1651,28 @@ def query(
# queries for upstream DBs need to *not* lock - we may not
# have permissions to do this and the upstream DBs won't know about
# us anyway (so e.g. they should never uninstall specs)
upstream_results.extend(
upstream_db._query(
query_spec,
predicate_fn=predicate_fn,
installed=installed,
explicit=explicit,
start_date=start_date,
end_date=end_date,
hashes=hashes,
in_buildcache=in_buildcache,
origin=origin,
)
or []
)
upstream_results.extend(upstream_db._query(*args, **kwargs) or [])
local_results: Set["spack.spec.Spec"] = set()
local_results = []
if install_tree in ("all", "local") or self.root == install_tree:
local_results = set(
self.query_local(
query_spec,
predicate_fn=predicate_fn,
installed=installed,
explicit=explicit,
start_date=start_date,
end_date=end_date,
hashes=hashes,
in_buildcache=in_buildcache,
origin=origin,
)
)
local_results = set(self.query_local(*args, **kwargs))
results = list(local_results) + list(x for x in upstream_results if x not in local_results)
return sorted(results)
def query_one(
self,
query_spec: Optional[Union[str, "spack.spec.Spec"]],
predicate_fn: Optional[SelectType] = None,
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
) -> Optional["spack.spec.Spec"]:
if query.__doc__ is None:
query.__doc__ = ""
query.__doc__ += _QUERY_DOCSTRING
def query_one(self, query_spec, known=any, installed=True):
"""Query for exactly one spec that matches the query spec.
Returns None if no installed package matches.
Raises an assertion error if more than one spec matches the
query. Returns None if no installed package matches.
Raises:
AssertionError: if more than one spec matches the query.
"""
concrete_specs = self.query(query_spec, predicate_fn=predicate_fn, installed=installed)
concrete_specs = self.query(query_spec, known=known, installed=installed)
assert len(concrete_specs) <= 1
return concrete_specs[0] if concrete_specs else None
@@ -1771,6 +1719,24 @@ def root(key, record):
if id(rec.spec) not in needed and rec.installed
]
def update_explicit(self, spec, explicit):
"""
Update the spec's explicit state in the database.
Args:
spec (spack.spec.Spec): the spec whose install record is being updated
explicit (bool): ``True`` if the package was requested explicitly
by the user, ``False`` if it was pulled in as a dependency of
an explicit package.
"""
rec = self.get_record(spec)
if explicit != rec.explicit:
with self.write_transaction():
message = "{s.name}@{s.version} : marking the package {0}"
status = "explicit" if explicit else "implicit"
tty.debug(message.format(status, s=spec))
rec.explicit = explicit
class NoUpstreamVisitor:
"""Gives edges to upstream specs, but does follow edges from upstream specs."""

View File

@@ -64,7 +64,6 @@ class OpenMpi(Package):
"DirectiveMeta",
"DisableRedistribute",
"version",
"conditional",
"conflicts",
"depends_on",
"extends",
@@ -77,7 +76,6 @@ class OpenMpi(Package):
"build_system",
"requires",
"redistribute",
"can_splice",
]
_patch_order_index = 0
@@ -506,43 +504,6 @@ def _execute_provides(pkg: "spack.package_base.PackageBase"):
return _execute_provides
@directive("splice_specs")
def can_splice(
target: SpecType, *, when: SpecType, match_variants: Union[None, str, List[str]] = None
):
"""Packages can declare whether they are ABI-compatible with another package
and thus can be spliced into concrete versions of that package.
Args:
target: The spec that the current package is ABI-compatible with.
when: An anonymous spec constraining current package for when it is
ABI-compatible with target.
match_variants: A list of variants that must match
between target spec and current package, with special value '*'
which matches all variants. Example: a variant is defined on both
packages called json, and they are ABI-compatible whenever they agree on
the json variant (regardless of whether it is turned on or off). Note
that this cannot be applied to multi-valued variants and multi-valued
variants will be skipped by '*'.
"""
def _execute_can_splice(pkg: "spack.package_base.PackageBase"):
when_spec = _make_when_spec(when)
if isinstance(match_variants, str) and match_variants != "*":
raise ValueError(
"* is the only valid string for match_variants "
"if looking to provide a single variant, use "
f"[{match_variants}] instead"
)
if when_spec is None:
return
pkg.splice_specs[when_spec] = (spack.spec.Spec(target), match_variants)
return _execute_can_splice
@directive("patches")
def patch(
url_or_filename: str,
@@ -616,15 +577,6 @@ def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependenc
return _execute_patch
def conditional(*values: List[Any], when: Optional[WhenType] = None):
"""Conditional values that can be used in variant declarations."""
# _make_when_spec returns None when the condition is statically false.
when = _make_when_spec(when)
return spack.variant.ConditionalVariantValues(
spack.variant.ConditionalValue(x, when=when) for x in values
)
@directive("variants")
def variant(
name: str,

View File

@@ -10,7 +10,6 @@
import llnl.util.lang
import spack.error
import spack.repo
import spack.spec
#: Names of possible directives. This list is mostly populated using the @directive decorator.
@@ -64,7 +63,7 @@ def __init__(cls, name, bases, attr_dict):
# The instance is being initialized: if it is a package we must ensure
# that the directives are called to set it up.
if cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
if "spack.pkg" in cls.__module__:
# Ensure the presence of the dictionaries associated with the directives.
# All dictionaries are defaultdicts that create lists for missing keys.
for d in DirectiveMeta._directive_dict_names:

View File

@@ -473,7 +473,6 @@
active_environment,
all_environment_names,
all_environments,
as_env_dir,
create,
create_in_dir,
deactivate,
@@ -481,7 +480,6 @@
default_view_name,
display_specs,
environment_dir_from_name,
environment_from_name_or_dir,
exists,
initialize_environment_dir,
installed_specs,
@@ -509,7 +507,6 @@
"active_environment",
"all_environment_names",
"all_environments",
"as_env_dir",
"create",
"create_in_dir",
"deactivate",
@@ -517,7 +514,6 @@
"default_view_name",
"display_specs",
"environment_dir_from_name",
"environment_from_name_or_dir",
"exists",
"initialize_environment_dir",
"installed_specs",

View File

@@ -11,19 +11,22 @@
import re
import shutil
import stat
import sys
import time
import urllib.parse
import urllib.request
import warnings
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import llnl.util.tty.color as clr
from llnl.util.link_tree import ConflictingSpecsError
from llnl.util.symlink import islink, readlink, symlink
from llnl.util.symlink import readlink, symlink
import spack
import spack.caches
import spack.compilers
import spack.concretize
import spack.config
import spack.deptypes as dt
@@ -42,6 +45,7 @@
import spack.util.environment
import spack.util.hash
import spack.util.lock as lk
import spack.util.parallel
import spack.util.path
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
@@ -53,8 +57,6 @@
from spack.spec_list import SpecList
from spack.util.path import substitute_path_variables
SpecPair = spack.concretize.SpecPair
#: environment variable used to indicate the active environment
spack_env_var = "SPACK_ENV"
@@ -275,22 +277,6 @@ def is_env_dir(path):
return os.path.isdir(path) and os.path.exists(os.path.join(path, manifest_name))
def as_env_dir(name_or_dir):
"""Translate an environment name or directory to the environment directory"""
if is_env_dir(name_or_dir):
return name_or_dir
else:
validate_env_name(name_or_dir)
if not exists(name_or_dir):
raise SpackEnvironmentError("no such environment '%s'" % name_or_dir)
return root(name_or_dir)
def environment_from_name_or_dir(name_or_dir):
"""Get an environment with the supplied name."""
return Environment(as_env_dir(name_or_dir))
def read(name):
"""Get an environment with the supplied name."""
validate_env_name(name)
@@ -668,7 +654,7 @@ def from_dict(base_path, d):
@property
def _current_root(self):
if not islink(self.root):
if not os.path.islink(self.root):
return None
root = readlink(self.root)
@@ -1508,7 +1494,7 @@ def deconcretize(self, spec: spack.spec.Spec, concrete: bool = True):
def _get_specs_to_concretize(
self,
) -> Tuple[List[spack.spec.Spec], List[spack.spec.Spec], List[SpecPair]]:
) -> Tuple[Set[spack.spec.Spec], Set[spack.spec.Spec], List[spack.spec.Spec]]:
"""Compute specs to concretize for unify:true and unify:when_possible.
This includes new user specs and any already concretized specs.
@@ -1518,20 +1504,23 @@ def _get_specs_to_concretize(
"""
# Exit early if the set of concretized specs is the set of user specs
new_user_specs = list(set(self.user_specs) - set(self.concretized_user_specs))
kept_user_specs = list(set(self.user_specs) & set(self.concretized_user_specs))
kept_user_specs += self.included_user_specs
new_user_specs = set(self.user_specs) - set(self.concretized_user_specs)
kept_user_specs = set(self.user_specs) & set(self.concretized_user_specs)
if not new_user_specs:
return new_user_specs, kept_user_specs, []
specs_to_concretize = [(s, None) for s in new_user_specs] + [
(abstract, concrete)
concrete_specs_to_keep = [
concrete
for abstract, concrete in self.concretized_specs()
if abstract in kept_user_specs
]
specs_to_concretize = list(new_user_specs) + concrete_specs_to_keep
return new_user_specs, kept_user_specs, specs_to_concretize
def _concretize_together_where_possible(self, tests: bool = False) -> Sequence[SpecPair]:
def _concretize_together_where_possible(
self, tests: bool = False
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
# Avoid cyclic dependency
import spack.solver.asp
@@ -1540,26 +1529,36 @@ def _concretize_together_where_possible(self, tests: bool = False) -> Sequence[S
if not new_user_specs:
return []
old_concrete_to_abstract = {
concrete: abstract for (abstract, concrete) in self.concretized_specs()
}
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
ret = []
result = spack.concretize.concretize_together_when_possible(
specs_to_concretize, tests=tests
)
for abstract, concrete in result:
# Only add to the environment if it's from this environment (not included in)
if abstract in self.user_specs:
self._add_concrete_spec(abstract, concrete)
result_by_user_spec = {}
solver = spack.solver.asp.Solver()
allow_deprecated = spack.config.get("config:deprecated", False)
for result in solver.solve_in_rounds(
specs_to_concretize, tests=tests, allow_deprecated=allow_deprecated
):
result_by_user_spec.update(result.specs_by_input)
# Return only the new specs
result = []
for abstract, concrete in sorted(result_by_user_spec.items()):
# If the "abstract" spec is a concrete spec from the previous concretization
# translate it back to an abstract spec. Otherwise, keep the abstract spec
abstract = old_concrete_to_abstract.get(abstract, abstract)
if abstract in new_user_specs:
ret.append((abstract, concrete))
result.append((abstract, concrete))
self._add_concrete_spec(abstract, concrete)
return ret
return result
def _concretize_together(self, tests: bool = False) -> Sequence[SpecPair]:
def _concretize_together(
self, tests: bool = False
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
"""Concretization strategy that concretizes all the specs
in the same DAG.
"""
@@ -1573,8 +1572,8 @@ def _concretize_together(self, tests: bool = False) -> Sequence[SpecPair]:
self.specs_by_hash = {}
try:
concretized_specs = spack.concretize.concretize_together(
specs_to_concretize, tests=tests
concrete_specs: List[spack.spec.Spec] = spack.concretize.concretize_specs_together(
*specs_to_concretize, tests=tests
)
except spack.error.UnsatisfiableSpecError as e:
# "Enhance" the error message for multiple root specs, suggest a less strict
@@ -1592,13 +1591,14 @@ def _concretize_together(self, tests: bool = False) -> Sequence[SpecPair]:
)
raise
# set() | set() does not preserve ordering, even though sets are ordered
ordered_user_specs = list(new_user_specs) + list(kept_user_specs)
concretized_specs = [x for x in zip(ordered_user_specs, concrete_specs)]
for abstract, concrete in concretized_specs:
# Don't add if it's just included
if abstract in self.user_specs:
self._add_concrete_spec(abstract, concrete)
self._add_concrete_spec(abstract, concrete)
# Return the portion of the return value that is new
return concretized_specs[: len(new_user_specs)]
# zip truncates the longer list, which is exactly what we want here
return list(zip(new_user_specs, concrete_specs))
def _concretize_separately(self, tests=False):
"""Concretization strategy that concretizes separately one
@@ -1620,16 +1620,71 @@ def _concretize_separately(self, tests=False):
concrete = old_specs_by_hash[h]
self._add_concrete_spec(s, concrete, new=False)
to_concretize = [
(root, None) for root in self.user_specs if root not in old_concretized_user_specs
]
concretized_specs = spack.concretize.concretize_separately(to_concretize, tests=tests)
# Concretize any new user specs that we haven't concretized yet
args, root_specs, i = [], [], 0
for uspec in self.user_specs:
if uspec not in old_concretized_user_specs:
root_specs.append(uspec)
args.append((i, str(uspec), tests))
i += 1
by_hash = {}
for abstract, concrete in concretized_specs:
self._add_concrete_spec(abstract, concrete)
# Ensure we don't try to bootstrap clingo in parallel
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
# Ensure all the indexes have been built or updated, since
# otherwise the processes in the pool may timeout on waiting
# for a write lock. We do this indirectly by retrieving the
# provider index, which should in turn trigger the update of
# all the indexes if there's any need for that.
_ = spack.repo.PATH.provider_index
# Ensure we have compilers in compilers.yaml to avoid that
# processes try to write the config file in parallel
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
# Early return if there is nothing to do
if len(args) == 0:
return []
# Solve the environment in parallel on Linux
start = time.time()
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
# TODO: support parallel concretization on macOS and Windows
msg = "Starting concretization"
if sys.platform not in ("darwin", "win32") and num_procs > 1:
msg += f" pool with {num_procs} processes"
tty.msg(msg)
batch = []
for j, (i, concrete, duration) in enumerate(
spack.util.parallel.imap_unordered(
_concretize_task,
args,
processes=num_procs,
debug=tty.is_debug(),
maxtaskperchild=1,
)
):
batch.append((i, concrete))
percentage = (j + 1) / len(args) * 100
tty.verbose(
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
f"{root_specs[i].colored_str}"
)
sys.stdout.flush()
# Add specs in original order
batch.sort(key=lambda x: x[0])
by_hash = {} # for attaching information on test dependencies
for root, (_, concrete) in zip(root_specs, batch):
self._add_concrete_spec(root, concrete)
by_hash[concrete.dag_hash()] = concrete
finish = time.time()
tty.msg(f"Environment concretized in {finish - start:.2f} seconds")
# Unify the specs objects, so we get correct references to all parents
self._read_lockfile_dict(self._to_lockfile_dict())
@@ -1649,7 +1704,11 @@ def _concretize_separately(self, tests=False):
test_dependency.copy(), depflag=dt.TEST, virtuals=current_edge.virtuals
)
return concretized_specs
results = [
(abstract, self.specs_by_hash[h])
for abstract, h in zip(self.concretized_user_specs, self.concretized_order)
]
return results
@property
def default_view(self):
@@ -1897,16 +1956,17 @@ def install_specs(self, specs: Optional[List[Spec]] = None, **install_args):
specs = specs if specs is not None else roots
# Extend the set of specs to overwrite with modified dev specs and their parents
install_args["overwrite"] = {
*install_args.get("overwrite", ()),
*self._dev_specs_that_need_overwrite(),
}
overwrite: Set[str] = set()
overwrite.update(install_args.get("overwrite", []), self._dev_specs_that_need_overwrite())
install_args["overwrite"] = overwrite
# Only environment roots are marked explicit
install_args["explicit"] = {
*install_args.get("explicit", ()),
*(s.dag_hash() for s in roots),
}
explicit: Set[str] = set()
explicit.update(
install_args.get("explicit", []),
(s.dag_hash() for s in specs),
(s.dag_hash() for s in roots),
)
install_args["explicit"] = explicit
PackageInstaller([spec.package for spec in specs], **install_args).install()
@@ -2456,6 +2516,14 @@ def display_specs(specs):
print(tree_string)
def _concretize_task(packed_arguments) -> Tuple[int, Spec, float]:
index, spec_str, tests = packed_arguments
with tty.SuppressOutput(msg_enabled=False):
start = time.time()
spec = Spec(spec_str).concretized(tests=tests)
return index, spec, time.time() - start
def make_repo_path(root):
"""Make a RepoPath from the repo subdirectories in an environment."""
path = spack.repo.RepoPath(cache=spack.caches.MISC_CACHE)

View File

@@ -48,6 +48,8 @@ def activate_header(env, shell, prompt=None, view: Optional[str] = None):
cmds += 'set "SPACK_ENV=%s"\n' % env.path
if view:
cmds += 'set "SPACK_ENV_VIEW=%s"\n' % view
# TODO: despacktivate
# TODO: prompt
elif shell == "pwsh":
cmds += "$Env:SPACK_ENV='%s'\n" % env.path
if view:

View File

@@ -33,7 +33,6 @@
from llnl.util.tty.color import colorize
import spack.config
import spack.directory_layout
import spack.paths
import spack.projections
import spack.relocate
@@ -51,7 +50,7 @@
_projections_path = ".spack/projections.yaml"
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional[spack.spec.Spec]], None]
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional["spack.spec.Spec"]], None]
def view_symlink(src: str, dst: str, *args, **kwargs) -> None:
@@ -63,7 +62,7 @@ def view_hardlink(src: str, dst: str, *args, **kwargs) -> None:
def view_copy(
src: str, dst: str, view: "FilesystemView", spec: Optional[spack.spec.Spec] = None
src: str, dst: str, view: "FilesystemView", spec: Optional["spack.spec.Spec"] = None
) -> None:
"""
Copy a file from src to dst.
@@ -161,7 +160,7 @@ class FilesystemView:
def __init__(
self,
root: str,
layout: spack.directory_layout.DirectoryLayout,
layout: "spack.directory_layout.DirectoryLayout",
*,
projections: Optional[Dict] = None,
ignore_conflicts: bool = False,
@@ -183,10 +182,7 @@ def __init__(
# Setup link function to include view
self.link_type = link_type
self._link = function_for_link_type(link_type)
def link(self, src: str, dst: str, spec: Optional[spack.spec.Spec] = None) -> None:
self._link(src, dst, self, spec)
self.link = ft.partial(function_for_link_type(link_type), view=self)
def add_specs(self, *specs, **kwargs):
"""
@@ -287,7 +283,7 @@ class YamlFilesystemView(FilesystemView):
def __init__(
self,
root: str,
layout: spack.directory_layout.DirectoryLayout,
layout: "spack.directory_layout.DirectoryLayout",
*,
projections: Optional[Dict] = None,
ignore_conflicts: bool = False,

View File

@@ -21,40 +21,43 @@
features.
"""
import importlib
import types
from typing import List, Optional
from llnl.util.lang import ensure_last, list_modules
import spack.paths
class _HookRunner:
#: Order in which hooks are executed
HOOK_ORDER = [
"spack.hooks.module_file_generation",
"spack.hooks.licensing",
"spack.hooks.sbang",
"spack.hooks.windows_runtime_linkage",
"spack.hooks.drop_redundant_rpaths",
"spack.hooks.absolutify_elf_sonames",
"spack.hooks.permissions_setters",
# after all mutations to the install prefix, write metadata
"spack.hooks.write_install_manifest",
# after all metadata is written
"spack.hooks.autopush",
]
#: Contains all hook modules after first call, shared among all HookRunner objects
_hooks: Optional[List[types.ModuleType]] = None
#: Stores all hooks on first call, shared among
#: all HookRunner objects
_hooks = None
def __init__(self, hook_name):
self.hook_name = hook_name
@classmethod
def _populate_hooks(cls):
# Lazily populate the list of hooks
cls._hooks = []
relative_names = list(list_modules(spack.paths.hooks_path))
# Ensure that write_install_manifest comes last
ensure_last(relative_names, "absolutify_elf_sonames", "write_install_manifest")
for name in relative_names:
module_name = __name__ + "." + name
module_obj = importlib.import_module(module_name)
cls._hooks.append((module_name, module_obj))
@property
def hooks(self) -> List[types.ModuleType]:
def hooks(self):
if not self._hooks:
self._hooks = [importlib.import_module(module_name) for module_name in self.HOOK_ORDER]
self._populate_hooks()
return self._hooks
def __call__(self, *args, **kwargs):
for module in self.hooks:
for _, module in self.hooks:
if hasattr(module, self.hook_name):
hook = getattr(module, self.hook_name)
if hasattr(hook, "__call__"):

View File

@@ -412,7 +412,7 @@ def _process_external_package(pkg: "spack.package_base.PackageBase", explicit: b
tty.debug(f"{pre} already registered in DB")
record = spack.store.STORE.db.get_record(spec)
if explicit and not record.explicit:
spack.store.STORE.db.mark(spec, "explicit", True)
spack.store.STORE.db.update_explicit(spec, explicit)
except KeyError:
# If not, register it and generate the module file.
@@ -1507,8 +1507,8 @@ def _prepare_for_install(self, task: Task) -> None:
self._update_installed(task)
# Only update the explicit entry once for the explicit package
if task.explicit and not rec.explicit:
spack.store.STORE.db.mark(task.pkg.spec, "explicit", True)
if task.explicit:
spack.store.STORE.db.update_explicit(task.pkg.spec, True)
def _cleanup_all_tasks(self) -> None:
"""Cleanup all tasks to include releasing their locks."""

View File

@@ -489,7 +489,6 @@ def make_argument_parser(**kwargs):
help="add stacktraces to all printed statements",
)
parser.add_argument(
"-t",
"--backtrace",
action="store_true",
default="SPACK_BACKTRACE" in os.environ,
@@ -911,6 +910,13 @@ def _main(argv=None):
# Make spack load / env activate work on macOS
restore_macos_dyld_vars()
# make spack.config aware of any command line configuration scopes
if args.config_scopes:
spack.config.COMMAND_LINE_SCOPES = args.config_scopes
# ensure options on spack command come before everything
setup_main_options(args)
# activate an environment if one was specified on the command line
env_format_error = None
if not args.no_env:
@@ -924,12 +930,6 @@ def _main(argv=None):
e.print_context()
env_format_error = e
# Push scopes from the command line last
if args.config_scopes:
spack.config._add_command_line_scopes(spack.config.CONFIG, args.config_scopes)
spack.config.CONFIG.push_scope(spack.config.InternalConfigScope("command_line"))
setup_main_options(args)
# ------------------------------------------------------------------------
# Things that require configuration should go below here
# ------------------------------------------------------------------------

View File

@@ -18,7 +18,7 @@
import sys
import traceback
import urllib.parse
from typing import Any, Dict, Optional, Tuple, Union
from typing import List, Optional, Union
import llnl.url
import llnl.util.symlink
@@ -29,6 +29,7 @@
import spack.config
import spack.error
import spack.fetch_strategy
import spack.mirror
import spack.oci.image
import spack.repo
import spack.spec
@@ -88,8 +89,9 @@ def from_url(url: str):
"""Create an anonymous mirror by URL. This method validates the URL."""
if not urllib.parse.urlparse(url).scheme in supported_url_schemes:
raise ValueError(
f'"{url}" is not a valid mirror URL. '
f"Scheme must be one of {supported_url_schemes}."
'"{}" is not a valid mirror URL. Scheme must be once of {}.'.format(
url, ", ".join(supported_url_schemes)
)
)
return Mirror(url)
@@ -153,66 +155,8 @@ def push_url(self):
"""Get the valid, canonicalized fetch URL"""
return self.get_url("push")
def ensure_mirror_usable(self, direction: str = "push"):
access_pair = self._get_value("access_pair", direction)
access_token_variable = self._get_value("access_token_variable", direction)
errors = []
# Verify that the credentials that are variables expand
if access_pair and isinstance(access_pair, dict):
if "id_variable" in access_pair and access_pair["id_variable"] not in os.environ:
errors.append(f"id_variable {access_pair['id_variable']} not set in environment")
if "secret_variable" in access_pair:
if access_pair["secret_variable"] not in os.environ:
errors.append(
f"environment variable `{access_pair['secret_variable']}` "
"(secret_variable) not set"
)
if access_token_variable:
if access_token_variable not in os.environ:
errors.append(
f"environment variable `{access_pair['access_token_variable']}` "
"(access_token_variable) not set"
)
if errors:
msg = f"invalid {direction} configuration for mirror {self.name}: "
msg += "\n ".join(errors)
raise spack.mirror.MirrorError(msg)
def _update_connection_dict(self, current_data: dict, new_data: dict, top_level: bool):
# Only allow one to exist in the config
if "access_token" in current_data and "access_token_variable" in new_data:
current_data.pop("access_token")
elif "access_token_variable" in current_data and "access_token" in new_data:
current_data.pop("access_token_variable")
# If updating to a new access_pair that is the deprecated list, warn
warn_deprecated_access_pair = False
if "access_pair" in new_data:
warn_deprecated_access_pair = isinstance(new_data["access_pair"], list)
# If the not updating the current access_pair, and it is the deprecated list, warn
elif "access_pair" in current_data:
warn_deprecated_access_pair = isinstance(current_data["access_pair"], list)
if warn_deprecated_access_pair:
tty.warn(
f"in mirror {self.name}: support for plain text secrets in config files "
"(access_pair: [id, secret]) is deprecated and will be removed in a future Spack "
"version. Use environment variables instead (access_pair: "
"{id: ..., secret_variable: ...})"
)
keys = [
"url",
"access_pair",
"access_token",
"access_token_variable",
"profile",
"endpoint_url",
]
keys = ["url", "access_pair", "access_token", "profile", "endpoint_url"]
if top_level:
keys += ["binary", "source", "signed", "autopush"]
changed = False
@@ -328,53 +272,11 @@ def get_url(self, direction: str) -> str:
return _url_or_path_to_url(url)
def get_credentials(self, direction: str) -> Dict[str, Any]:
"""Get the mirror credentials from the mirror config
Args:
direction: fetch or push mirror config
Returns:
Dictionary from credential type string to value
Credential Type Map:
access_token -> str
access_pair -> tuple(str,str)
profile -> str
"""
creddict: Dict[str, Any] = {}
access_token = self.get_access_token(direction)
if access_token:
creddict["access_token"] = access_token
access_pair = self.get_access_pair(direction)
if access_pair:
creddict.update({"access_pair": access_pair})
profile = self.get_profile(direction)
if profile:
creddict["profile"] = profile
return creddict
def get_access_token(self, direction: str) -> Optional[str]:
tok = self._get_value("access_token_variable", direction)
if tok:
return os.environ.get(tok)
else:
return self._get_value("access_token", direction)
return None
return self._get_value("access_token", direction)
def get_access_pair(self, direction: str) -> Optional[Tuple[str, str]]:
pair = self._get_value("access_pair", direction)
if isinstance(pair, (tuple, list)) and len(pair) == 2:
return (pair[0], pair[1]) if all(pair) else None
elif isinstance(pair, dict):
id_ = os.environ.get(pair["id_variable"]) if "id_variable" in pair else pair["id"]
secret = os.environ.get(pair["secret_variable"])
return (id_, secret) if id_ and secret else None
else:
return None
def get_access_pair(self, direction: str) -> Optional[List]:
return self._get_value("access_pair", direction)
def get_profile(self, direction: str) -> Optional[str]:
return self._get_value("profile", direction)
@@ -855,9 +757,9 @@ def create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats):
def require_mirror_name(mirror_name):
"""Find a mirror by name and raise if it does not exist"""
mirror = MirrorCollection().get(mirror_name)
mirror = spack.mirror.MirrorCollection().get(mirror_name)
if not mirror:
raise ValueError(f'no mirror named "{mirror_name}"')
raise ValueError('no mirror named "{0}"'.format(mirror_name))
return mirror

View File

@@ -527,9 +527,7 @@ def use_name(self):
parts = name.split("/")
name = os.path.join(*parts)
# Add optional suffixes based on constraints
path_elements = [name]
path_elements.extend(map(self.spec.format, self.conf.suffixes))
return "-".join(path_elements)
return "-".join([name, *map(self.spec.format_path, self.conf.suffixes)])
@property
def filename(self):

View File

@@ -377,10 +377,9 @@ def credentials_from_mirrors(
# Prefer push credentials over fetch. Unlikely that those are different
# but our config format allows it.
for direction in ("push", "fetch"):
pair = mirror.get_credentials(direction).get("access_pair")
if not pair:
pair = mirror.get_access_pair(direction)
if pair is None:
continue
url = mirror.get_url(direction)
if not url.startswith("oci://"):
continue

View File

@@ -103,7 +103,12 @@
from spack.spec import InvalidSpecDetected, Spec
from spack.util.executable import *
from spack.util.filesystem import file_command, fix_darwin_install_name, mime_type
from spack.variant import any_combination_of, auto_or_any_combination_of, disjoint_sets
from spack.variant import (
any_combination_of,
auto_or_any_combination_of,
conditional,
disjoint_sets,
)
from spack.version import Version, ver
# These are just here for editor support; they will be replaced when the build env

View File

@@ -32,6 +32,7 @@
from llnl.util.lang import classproperty, memoized
from llnl.util.link_tree import LinkTree
import spack.build_environment
import spack.builder
import spack.compilers
import spack.config
@@ -49,6 +50,7 @@
import spack.store
import spack.url
import spack.util.environment
import spack.util.executable
import spack.util.path
import spack.util.web
from spack.error import InstallError, NoURLError, PackageError
@@ -622,7 +624,6 @@ class PackageBase(WindowsRPath, PackageViewMixin, RedistributionMixin, metaclass
patches: Dict["spack.spec.Spec", List["spack.patch.Patch"]]
variants: Dict["spack.spec.Spec", Dict[str, "spack.variant.Variant"]]
languages: Dict["spack.spec.Spec", Set[str]]
splice_specs: Dict["spack.spec.Spec", Tuple["spack.spec.Spec", Union[None, str, List[str]]]]
#: By default, packages are not virtual
#: Virtual packages override this attribute

View File

@@ -283,11 +283,7 @@ def modify_macho_object(cur_path, rpaths, deps, idpath, paths_to_paths):
def macholib_get_paths(cur_path):
"""Get rpaths, dependent libraries, and library id of mach-o objects."""
headers = []
try:
headers = macholib.MachO.MachO(cur_path).headers
except ValueError:
pass
headers = macholib.MachO.MachO(cur_path).headers
if not headers:
tty.warn("Failed to read Mach-O headers: {0}".format(cur_path))
commands = []

View File

@@ -39,9 +39,9 @@
import spack.error
import spack.patch
import spack.provider_index
import spack.repo
import spack.spec
import spack.tag
import spack.util.file_cache
import spack.util.git
import spack.util.naming as nm
import spack.util.path
@@ -216,9 +216,9 @@ def compute_loader(self, fullname):
def packages_path():
"""Get the test repo if it is active, otherwise the builtin repo."""
try:
return PATH.get_repo("builtin.mock").packages_path
except UnknownNamespaceError:
return PATH.get_repo("builtin").packages_path
return spack.repo.PATH.get_repo("builtin.mock").packages_path
except spack.repo.UnknownNamespaceError:
return spack.repo.PATH.get_repo("builtin").packages_path
class GitExe:
@@ -314,7 +314,7 @@ def add_package_to_git_stage(packages):
git = GitExe()
for pkg_name in packages:
filename = PATH.filename_for_package_name(pkg_name)
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
if not os.path.isfile(filename):
tty.die("No such package: %s. Path does not exist:" % pkg_name, filename)
@@ -590,7 +590,7 @@ def __init__(
self,
package_checker: FastPackageChecker,
namespace: str,
cache: spack.util.file_cache.FileCache,
cache: "spack.caches.FileCacheType",
):
self.checker = package_checker
self.packages_path = self.checker.packages_path
@@ -683,7 +683,7 @@ class RepoPath:
def __init__(
self,
*repos: Union[str, "Repo"],
cache: Optional[spack.util.file_cache.FileCache],
cache: Optional["spack.caches.FileCacheType"],
overrides: Optional[Dict[str, Any]] = None,
) -> None:
self.repos: List[Repo] = []
@@ -965,7 +965,7 @@ def __init__(
self,
root: str,
*,
cache: spack.util.file_cache.FileCache,
cache: "spack.caches.FileCacheType",
overrides: Optional[Dict[str, Any]] = None,
) -> None:
"""Instantiate a package repository from a filesystem path.
@@ -1440,7 +1440,9 @@ def _path(configuration=None):
return create(configuration=configuration)
def create(configuration: spack.config.Configuration) -> RepoPath:
def create(
configuration: Union["spack.config.Configuration", llnl.util.lang.Singleton]
) -> RepoPath:
"""Create a RepoPath from a configuration object.
Args:
@@ -1463,7 +1465,7 @@ def create(configuration: spack.config.Configuration) -> RepoPath:
#: Singleton repo path instance
PATH: RepoPath = llnl.util.lang.Singleton(_path) # type: ignore
PATH: Union[RepoPath, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(_path)
# Add the finder to sys.meta_path
REPOS_FINDER = ReposFinder()
@@ -1583,7 +1585,7 @@ def __init__(self, name, repo=None):
long_msg = "Use 'spack create' to create a new package."
if not repo:
repo = PATH
repo = spack.repo.PATH
# We need to compare the base package name
pkg_name = name.rsplit(".", 1)[-1]

View File

@@ -11,6 +11,8 @@
from llnl.util.lang import union_dicts
import spack.schema.gitlab_ci
# Schema for script fields
# List of lists and/or strings
# This is similar to what is allowed in
@@ -135,8 +137,39 @@ def job_schema(name: str):
}
)
# TODO: Remove in Spack 0.23
ci_properties = {
"anyOf": [
{
"type": "object",
"additionalProperties": False,
# "required": ["mappings"],
"properties": union_dicts(
core_shared_properties, {"enable-artifacts-buildcache": {"type": "boolean"}}
),
},
{
"type": "object",
"additionalProperties": False,
# "required": ["mappings"],
"properties": union_dicts(
core_shared_properties, {"temporary-storage-url-prefix": {"type": "string"}}
),
},
]
}
#: Properties for inclusion in other schemas
properties: Dict[str, Any] = {"ci": core_shared_properties}
properties: Dict[str, Any] = {
"ci": {
"oneOf": [
# TODO: Replace with core-shared-properties in Spack 0.23
ci_properties,
# Allow legacy format under `ci` for `config update ci`
spack.schema.gitlab_ci.gitlab_ci_properties,
]
}
}
#: Full schema with metadata
schema = {
@@ -146,3 +179,21 @@ def job_schema(name: str):
"additionalProperties": False,
"properties": properties,
}
def update(data):
import llnl.util.tty as tty
import spack.ci
import spack.environment as ev
# Warn if deprecated section is still in the environment
ci_env = ev.active_environment()
if ci_env:
env_config = ci_env.manifest[ev.TOP_LEVEL_KEY]
if "gitlab-ci" in env_config:
tty.die("Error: `gitlab-ci` section detected with `ci`, these are not compatible")
# Detect if the ci section is using the new pipeline-gen
# If it is, assume it has already been converted
return spack.ci.translate_deprecated_config(data)

View File

@@ -61,10 +61,7 @@
"target": {"type": "string"},
"alias": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"modules": {
"anyOf": [
{"type": "null"},
{"type": "array", "items": {"type": "string"}},
]
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
},
"implicit_rpaths": implicit_rpaths,
"environment": spack.schema.environment.definition,

View File

@@ -33,14 +33,8 @@
"properties": {
"type": {
"type": "string",
"enum": [
"local",
"buildcache",
"external",
"environment",
],
"enum": ["local", "buildcache", "external"],
},
"path": {"type": "string"},
"include": LIST_OF_SPECS,
"exclude": LIST_OF_SPECS,
},
@@ -78,8 +72,7 @@
"transitive": {"type": "boolean", "default": False},
},
},
},
"automatic": {"type": "boolean"},
}
},
},
"duplicates": {

View File

@@ -12,6 +12,7 @@
from llnl.util.lang import union_dicts
import spack.schema.gitlab_ci # DEPRECATED
import spack.schema.merged
from .spec_list import spec_list_schema
@@ -19,21 +20,21 @@
#: Top level key in a manifest file
TOP_LEVEL_KEY = "spack"
include_concrete = {"type": "array", "default": [], "items": {"type": "string"}}
properties: Dict[str, Any] = {
"spack": {
"type": "object",
"default": {},
"additionalProperties": False,
"properties": union_dicts(
# Include deprecated "gitlab-ci" section
spack.schema.gitlab_ci.properties,
# merged configuration scope schemas
spack.schema.merged.properties,
# extra environment schema properties
{
"include": {"type": "array", "default": [], "items": {"type": "string"}},
"specs": spec_list_schema,
"include_concrete": include_concrete,
"include_concrete": {"type": "array", "default": [], "items": {"type": "string"}},
},
),
}
@@ -57,6 +58,15 @@ def update(data):
Returns:
True if data was changed, False otherwise
"""
import spack.ci
if "gitlab-ci" in data:
data["ci"] = data.pop("gitlab-ci")
if "ci" in data:
return spack.ci.translate_deprecated_config(data["ci"])
# There are not currently any deprecated attributes in this section
# that have not been removed
return False

View File

@@ -0,0 +1,125 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for gitlab-ci.yaml configuration file.
.. literalinclude:: ../spack/schema/gitlab_ci.py
:lines: 15-
"""
from typing import Any, Dict
from llnl.util.lang import union_dicts
image_schema = {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"name": {"type": "string"},
"entrypoint": {"type": "array", "items": {"type": "string"}},
},
},
]
}
runner_attributes_schema_items = {
"image": image_schema,
"tags": {"type": "array", "items": {"type": "string"}},
"variables": {"type": "object", "patternProperties": {r"[\w\d\-_\.]+": {"type": "string"}}},
"before_script": {"type": "array", "items": {"type": "string"}},
"script": {"type": "array", "items": {"type": "string"}},
"after_script": {"type": "array", "items": {"type": "string"}},
}
runner_selector_schema = {
"type": "object",
"additionalProperties": True,
"required": ["tags"],
"properties": runner_attributes_schema_items,
}
remove_attributes_schema = {
"type": "object",
"additionalProperties": False,
"required": ["tags"],
"properties": {"tags": {"type": "array", "items": {"type": "string"}}},
}
core_shared_properties = union_dicts(
runner_attributes_schema_items,
{
"bootstrap": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{
"type": "object",
"additionalProperties": False,
"required": ["name"],
"properties": {
"name": {"type": "string"},
"compiler-agnostic": {"type": "boolean", "default": False},
},
},
]
},
},
"match_behavior": {"type": "string", "enum": ["first", "merge"], "default": "first"},
"mappings": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": False,
"required": ["match"],
"properties": {
"match": {"type": "array", "items": {"type": "string"}},
"remove-attributes": remove_attributes_schema,
"runner-attributes": runner_selector_schema,
},
},
},
"service-job-attributes": runner_selector_schema,
"signing-job-attributes": runner_selector_schema,
"rebuild-index": {"type": "boolean"},
"broken-specs-url": {"type": "string"},
"broken-tests-packages": {"type": "array", "items": {"type": "string"}},
},
)
gitlab_ci_properties = {
"anyOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["mappings"],
"properties": union_dicts(
core_shared_properties, {"enable-artifacts-buildcache": {"type": "boolean"}}
),
},
{
"type": "object",
"additionalProperties": False,
"required": ["mappings"],
"properties": union_dicts(
core_shared_properties, {"temporary-storage-url-prefix": {"type": "string"}}
),
},
]
}
#: Properties for inclusion in other schemas
properties: Dict[str, Any] = {"gitlab-ci": gitlab_ci_properties}
#: Full schema with metadata
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Spack gitlab-ci configuration file schema",
"type": "object",
"additionalProperties": False,
"properties": properties,
}

View File

@@ -15,42 +15,14 @@
"url": {"type": "string"},
# todo: replace this with named keys "username" / "password" or "id" / "secret"
"access_pair": {
"oneOf": [
{
"type": "array",
"items": {"minItems": 2, "maxItems": 2, "type": ["string", "null"]},
}, # deprecated
{
"type": "object",
"required": ["secret_variable"],
# Only allow id or id_variable to be set, not both
"oneOf": [{"required": ["id"]}, {"required": ["id_variable"]}],
"properties": {
"id": {"type": "string"},
"id_variable": {"type": "string"},
"secret_variable": {"type": "string"},
},
},
]
"type": "array",
"items": {"type": ["string", "null"], "minItems": 2, "maxItems": 2},
},
"access_token": {"type": ["string", "null"]},
"profile": {"type": ["string", "null"]},
"endpoint_url": {"type": ["string", "null"]},
"access_token": {"type": ["string", "null"]}, # deprecated
"access_token_variable": {"type": ["string", "null"]},
}
connection_ext = {
"deprecatedProperties": [
{
"names": ["access_token"],
"message": "Use of plain text `access_token` in mirror config is deprecated, use "
"environment variables instead (access_token_variable)",
"error": False,
}
]
}
#: Mirror connection inside pull/push keys
fetch_and_push = {
"anyOf": [
@@ -59,7 +31,6 @@
"type": "object",
"additionalProperties": False,
"properties": {**connection}, # type: ignore
**connection_ext, # type: ignore
},
]
}
@@ -78,7 +49,6 @@
"autopush": {"type": "boolean"},
**connection, # type: ignore
},
**connection_ext, # type: ignore
}
#: Properties for inclusion in other schemas
@@ -100,28 +70,3 @@
"additionalProperties": False,
"properties": properties,
}
def update(data):
import jsonschema
errors = []
def check_access_pair(name, section):
if not section or not isinstance(section, dict):
return
if "access_token" in section and "access_token_variable" in section:
errors.append(
f'{name}: mirror credential "access_token" conflicts with "access_token_variable"'
)
# Check all of the sections
for name, section in data.items():
check_access_pair(name, section)
if isinstance(section, dict):
check_access_pair(name, section.get("fetch"))
check_access_pair(name, section.get("push"))
if errors:
raise jsonschema.ValidationError("\n".join(errors))

View File

@@ -27,6 +27,7 @@
import spack
import spack.binary_distribution
import spack.bootstrap.core
import spack.compilers
import spack.concretize
import spack.config
@@ -52,7 +53,6 @@
from .core import (
AspFunction,
AspVar,
NodeArgument,
ast_sym,
ast_type,
@@ -515,8 +515,6 @@ def _compute_specs_from_answer_set(self):
best = min(self.answers)
opt, _, answer = best
for input_spec in self.abstract_specs:
# The specs must be unified to get here, so it is safe to associate any satisfying spec
# with the input. Multiple inputs may be matched to the same concrete spec
node = SpecBuilder.make_node(pkg=input_spec.name)
if input_spec.virtual:
providers = [
@@ -525,14 +523,12 @@ def _compute_specs_from_answer_set(self):
node = SpecBuilder.make_node(pkg=providers[0])
candidate = answer.get(node)
if candidate and candidate.satisfies(input_spec):
self._concrete_specs.append(answer[node])
self._concrete_specs_by_input[input_spec] = answer[node]
elif candidate and candidate.build_spec.satisfies(input_spec):
tty.warn(
"explicit splice configuration has caused the concretized spec"
f" {candidate} not to satisfy the input spec {input_spec}"
)
if candidate and candidate.build_spec.satisfies(input_spec):
if not candidate.satisfies(input_spec):
tty.warn(
"explicit splice configuration has caused the concretized spec"
f" {candidate} not to satisfy the input spec {input_spec}"
)
self._concrete_specs.append(answer[node])
self._concrete_specs_by_input[input_spec] = answer[node]
else:
@@ -818,7 +814,7 @@ def solve(self, setup, specs, reuse=None, output=None, control=None, allow_depre
solve, and the internal statistics from clingo.
"""
# avoid circular import
import spack.bootstrap.core
import spack.bootstrap
output = output or DEFAULT_OUTPUT_CONFIGURATION
timer = spack.util.timer.Timer()
@@ -857,8 +853,6 @@ def solve(self, setup, specs, reuse=None, output=None, control=None, allow_depre
self.control.load(os.path.join(parent_dir, "libc_compatibility.lp"))
else:
self.control.load(os.path.join(parent_dir, "os_compatibility.lp"))
if setup.enable_splicing:
self.control.load(os.path.join(parent_dir, "splices.lp"))
timer.stop("load")
@@ -893,7 +887,6 @@ def on_model(model):
result.satisfiable = solve_result.satisfiable
if result.satisfiable:
timer.start("construct_specs")
# get the best model
builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible)
min_cost, best_model = min(models)
@@ -918,8 +911,7 @@ def on_model(model):
# record the possible dependencies in the solve
result.possible_dependencies = setup.pkgs
timer.stop("construct_specs")
timer.stop()
elif cores:
result.control = self.control
result.cores.extend(cores)
@@ -1171,9 +1163,6 @@ def __init__(self, tests: bool = False):
# list of unique libc specs targeted by compilers (or an educated guess if no compiler)
self.libcs: List[spack.spec.Spec] = []
# If true, we have to load the code for synthesizing splices
self.enable_splicing: bool = spack.config.CONFIG.get("concretizer:splice:automatic")
def pkg_version_rules(self, pkg):
"""Output declared versions of a package.
@@ -1344,10 +1333,6 @@ def pkg_rules(self, pkg, tests):
# dependencies
self.package_dependencies_rules(pkg)
# splices
if self.enable_splicing:
self.package_splice_rules(pkg)
# virtual preferences
self.virtual_preferences(
pkg.name,
@@ -1448,13 +1433,14 @@ def define_variant(
for value in sorted(values):
pkg_fact(fn.variant_possible_value(vid, value))
# we're done here for unconditional values
if not isinstance(value, vt.ConditionalValue):
# when=True means unconditional, so no need for conditional values
if getattr(value, "when", True) is True:
continue
# make a spec indicating whether the variant has this conditional value
variant_has_value = spack.spec.Spec()
variant_has_value.variants[name] = spack.variant.AbstractVariant(name, value.value)
# now we have to handle conditional values
quoted_value = spack.parser.quote_if_needed(str(value))
vstring = f"{name}={quoted_value}"
variant_has_value = spack.spec.Spec(vstring)
if value.when:
# the conditional value is always "possible", but it imposes its when condition as
@@ -1465,12 +1451,10 @@ def define_variant(
imposed_spec=value.when,
required_name=pkg.name,
imposed_name=pkg.name,
msg=f"{pkg.name} variant {name} has value '{value.value}' when {value.when}",
msg=f"{pkg.name} variant {name} has value '{quoted_value}' when {value.when}",
)
else:
vstring = f"{name}='{value.value}'"
# We know the value is never allowed statically (when was None), but we can't just
# We know the value is never allowed statically (when was false), but we can't just
# ignore it b/c it could come in as a possible value and we need a good error msg.
# So, it's a conflict -- if the value is somehow used, it'll trigger an error.
trigger_id = self.condition(
@@ -1686,94 +1670,6 @@ def dependency_holds(input_spec, requirements):
self.gen.newline()
def _gen_match_variant_splice_constraints(
self,
pkg,
cond_spec: "spack.spec.Spec",
splice_spec: "spack.spec.Spec",
hash_asp_var: "AspVar",
splice_node,
match_variants: List[str],
):
# If there are no variants to match, no constraints are needed
variant_constraints = []
for i, variant_name in enumerate(match_variants):
vari_defs = pkg.variant_definitions(variant_name)
# the spliceable config of the package always includes the variant
if vari_defs != [] and any(cond_spec.satisfies(s) for (s, _) in vari_defs):
variant = vari_defs[0][1]
if variant.multi:
continue # cannot automatically match multi-valued variants
value_var = AspVar(f"VariValue{i}")
attr_constraint = fn.attr("variant_value", splice_node, variant_name, value_var)
hash_attr_constraint = fn.hash_attr(
hash_asp_var, "variant_value", splice_spec.name, variant_name, value_var
)
variant_constraints.append(attr_constraint)
variant_constraints.append(hash_attr_constraint)
return variant_constraints
def package_splice_rules(self, pkg):
self.gen.h2("Splice rules")
for i, (cond, (spec_to_splice, match_variants)) in enumerate(
sorted(pkg.splice_specs.items())
):
with named_spec(cond, pkg.name):
self.version_constraints.add((cond.name, cond.versions))
self.version_constraints.add((spec_to_splice.name, spec_to_splice.versions))
hash_var = AspVar("Hash")
splice_node = fn.node(AspVar("NID"), cond.name)
when_spec_attrs = [
fn.attr(c.args[0], splice_node, *(c.args[2:]))
for c in self.spec_clauses(cond, body=True, required_from=None)
if c.args[0] != "node"
]
splice_spec_hash_attrs = [
fn.hash_attr(hash_var, *(c.args))
for c in self.spec_clauses(spec_to_splice, body=True, required_from=None)
if c.args[0] != "node"
]
if match_variants is None:
variant_constraints = []
elif match_variants == "*":
filt_match_variants = set()
for map in pkg.variants.values():
for k in map:
filt_match_variants.add(k)
filt_match_variants = list(filt_match_variants)
variant_constraints = self._gen_match_variant_splice_constraints(
pkg, cond, spec_to_splice, hash_var, splice_node, filt_match_variants
)
else:
if any(
v in cond.variants or v in spec_to_splice.variants for v in match_variants
):
raise Exception(
"Overlap between match_variants and explicitly set variants"
)
variant_constraints = self._gen_match_variant_splice_constraints(
pkg, cond, spec_to_splice, hash_var, splice_node, match_variants
)
rule_head = fn.abi_splice_conditions_hold(
i, splice_node, spec_to_splice.name, hash_var
)
rule_body_components = (
[
# splice_set_fact,
fn.attr("node", splice_node),
fn.installed_hash(spec_to_splice.name, hash_var),
]
+ when_spec_attrs
+ splice_spec_hash_attrs
+ variant_constraints
)
rule_body = ",\n ".join(str(r) for r in rule_body_components)
rule = f"{rule_head} :-\n {rule_body}."
self.gen.append(rule)
self.gen.newline()
def virtual_preferences(self, pkg_name, func):
"""Call func(vspec, provider, i) for each of pkg's provider prefs."""
config = spack.config.get("packages")
@@ -2132,12 +2028,9 @@ def _spec_clauses(
for variant_def in variant_defs:
self.variant_values_from_specs.add((spec.name, id(variant_def), value))
clauses.append(f.variant_value(spec.name, vname, value))
if variant.propagate:
clauses.append(f.propagate(spec.name, fn.variant_value(vname, value)))
if self.pkg_class(spec.name).has_variant(vname):
clauses.append(f.variant_value(spec.name, vname, value))
else:
clauses.append(f.variant_value(spec.name, vname, value))
# compiler and compiler version
if spec.compiler:
@@ -2636,9 +2529,8 @@ def concrete_specs(self):
for h, spec in self.reusable_and_possible.explicit_items():
# this indicates that there is a spec like this installed
self.gen.fact(fn.installed_hash(spec.name, h))
# indirection layer between hash constraints and imposition to allow for splicing
for pred in self.spec_clauses(spec, body=True, required_from=None):
self.gen.fact(fn.hash_attr(h, *pred.args))
# this describes what constraints it imposes on the solve
self.impose(h, spec, body=True)
self.gen.newline()
# Declare as possible parts of specs that are not in package.py
# - Add versions to possible versions
@@ -2724,7 +2616,6 @@ def setup(
)
for name, info in env.dev_specs.items()
)
specs = tuple(specs) # ensure compatible types to add
self.gen.h1("Reusable concrete specs")
@@ -3579,14 +3470,6 @@ def consume_facts(self):
self._setup.effect_rules()
# This should be a dataclass, but dataclasses don't work on Python 3.6
class Splice:
def __init__(self, splice_node: NodeArgument, child_name: str, child_hash: str):
self.splice_node = splice_node
self.child_name = child_name
self.child_hash = child_hash
class SpecBuilder:
"""Class with actions to rebuild a spec from ASP results."""
@@ -3622,11 +3505,10 @@ def make_node(*, pkg: str) -> NodeArgument:
"""
return NodeArgument(id="0", pkg=pkg)
def __init__(self, specs, hash_lookup=None):
def __init__(
self, specs: List[spack.spec.Spec], *, hash_lookup: Optional[ConcreteSpecsByHash] = None
):
self._specs: Dict[NodeArgument, spack.spec.Spec] = {}
# Matches parent nodes to splice node
self._splices: Dict[NodeArgument, List[Splice]] = {}
self._result = None
self._command_line_specs = specs
self._flag_sources: Dict[Tuple[NodeArgument, str], Set[str]] = collections.defaultdict(
@@ -3710,8 +3592,16 @@ def external_spec_selected(self, node, idx):
def depends_on(self, parent_node, dependency_node, type):
dependency_spec = self._specs[dependency_node]
edges = self._specs[parent_node].edges_to_dependencies(name=dependency_spec.name)
edges = [x for x in edges if id(x.spec) == id(dependency_spec)]
depflag = dt.flag_from_string(type)
self._specs[parent_node].add_dependency_edge(dependency_spec, depflag=depflag, virtuals=())
if not edges:
self._specs[parent_node].add_dependency_edge(
self._specs[dependency_node], depflag=depflag, virtuals=()
)
else:
edges[0].update_deptypes(depflag=depflag)
def virtual_on_edge(self, parent_node, provider_node, virtual):
dependencies = self._specs[parent_node].edges_to_dependencies(name=(provider_node.pkg))
@@ -3828,48 +3718,6 @@ def _order_index(flag_group):
def deprecated(self, node: NodeArgument, version: str) -> None:
tty.warn(f'using "{node.pkg}@{version}" which is a deprecated version')
def splice_at_hash(
self,
parent_node: NodeArgument,
splice_node: NodeArgument,
child_name: str,
child_hash: str,
):
splice = Splice(splice_node, child_name=child_name, child_hash=child_hash)
self._splices.setdefault(parent_node, []).append(splice)
def _resolve_automatic_splices(self):
"""After all of the specs have been concretized, apply all immediate
splices in size order. This ensures that all dependencies are resolved
before their parents, allowing for maximal sharing and minimal copying.
"""
fixed_specs = {}
for node, spec in sorted(self._specs.items(), key=lambda x: len(x[1])):
immediate = self._splices.get(node, [])
if not immediate and not any(
edge.spec in fixed_specs for edge in spec.edges_to_dependencies()
):
continue
new_spec = spec.copy(deps=False)
new_spec.build_spec = spec
for edge in spec.edges_to_dependencies():
depflag = edge.depflag & ~dt.BUILD
if any(edge.spec.dag_hash() == splice.child_hash for splice in immediate):
splice = [s for s in immediate if s.child_hash == edge.spec.dag_hash()][0]
new_spec.add_dependency_edge(
self._specs[splice.splice_node], depflag=depflag, virtuals=edge.virtuals
)
elif edge.spec in fixed_specs:
new_spec.add_dependency_edge(
fixed_specs[edge.spec], depflag=depflag, virtuals=edge.virtuals
)
else:
new_spec.add_dependency_edge(
edge.spec, depflag=depflag, virtuals=edge.virtuals
)
self._specs[node] = new_spec
fixed_specs[spec] = new_spec
@staticmethod
def sort_fn(function_tuple) -> Tuple[int, int]:
"""Ensure attributes are evaluated in the correct order.
@@ -3899,6 +3747,7 @@ def build_specs(self, function_tuples):
# them here so that directives that build objects (like node and
# node_compiler) are called in the right order.
self.function_tuples = sorted(set(function_tuples), key=self.sort_fn)
self._specs = {}
for name, args in self.function_tuples:
if SpecBuilder.ignored_attributes.match(name):
@@ -3928,14 +3777,10 @@ def build_specs(self, function_tuples):
continue
# if we've already gotten a concrete spec for this pkg,
# do not bother calling actions on it except for node_flag_source,
# since node_flag_source is tracking information not in the spec itself
# we also need to keep track of splicing information.
# do not bother calling actions on it
spec = self._specs.get(args[0])
if spec and spec.concrete:
do_not_ignore_attrs = ["node_flag_source", "splice_at_hash"]
if name not in do_not_ignore_attrs:
continue
continue
action(*args)
@@ -3945,7 +3790,7 @@ def build_specs(self, function_tuples):
# inject patches -- note that we' can't use set() to unique the
# roots here, because the specs aren't complete, and the hash
# function will loop forever.
roots = [spec.root for spec in self._specs.values()]
roots = [spec.root for spec in self._specs.values() if not spec.root.installed]
roots = dict((id(r), r) for r in roots)
for root in roots.values():
spack.spec.Spec.inject_patches_variant(root)
@@ -3961,8 +3806,6 @@ def build_specs(self, function_tuples):
for root in roots.values():
root._finalize_concretization()
self._resolve_automatic_splices()
for s in self._specs.values():
spack.spec.Spec.ensure_no_deprecated(s)
@@ -3977,6 +3820,7 @@ def build_specs(self, function_tuples):
)
specs = self.execute_explicit_splices()
return specs
def execute_explicit_splices(self):
@@ -3985,16 +3829,8 @@ def execute_explicit_splices(self):
for splice_set in splice_config:
target = splice_set["target"]
replacement = spack.spec.Spec(splice_set["replacement"])
if not replacement.abstract_hash:
location = getattr(
splice_set["replacement"], "_start_mark", " at unknown line number"
)
msg = f"Explicit splice replacement '{replacement}' does not include a hash.\n"
msg += f"{location}\n\n"
msg += " Splice replacements must be specified by hash"
raise InvalidSpliceError(msg)
assert replacement.abstract_hash
replacement.replace_hash()
transitive = splice_set.get("transitive", False)
splice_triples.append((target, replacement, transitive))
@@ -4005,10 +3841,6 @@ def execute_explicit_splices(self):
if target in current_spec:
# matches root or non-root
# e.g. mvapich2%gcc
# The first iteration, we need to replace the abstract hash
if not replacement.concrete:
replacement.replace_hash()
current_spec = current_spec.splice(replacement, transitive)
new_key = NodeArgument(id=key.id, pkg=current_spec.name)
specs[new_key] = current_spec
@@ -4134,7 +3966,7 @@ def selected_specs(self) -> List[spack.spec.Spec]:
return [s for s in self.factory() if self.is_selected(s)]
@staticmethod
def from_store(configuration, *, include, exclude) -> "SpecFilter":
def from_store(configuration, include, exclude) -> "SpecFilter":
"""Constructs a filter that takes the specs from the current store."""
packages = _external_config_with_implicit_externals(configuration)
is_reusable = functools.partial(_is_reusable, packages=packages, local=True)
@@ -4142,7 +3974,7 @@ def from_store(configuration, *, include, exclude) -> "SpecFilter":
return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude)
@staticmethod
def from_buildcache(configuration, *, include, exclude) -> "SpecFilter":
def from_buildcache(configuration, include, exclude) -> "SpecFilter":
"""Constructs a filter that takes the specs from the configured buildcaches."""
packages = _external_config_with_implicit_externals(configuration)
is_reusable = functools.partial(_is_reusable, packages=packages, local=False)
@@ -4150,29 +3982,6 @@ def from_buildcache(configuration, *, include, exclude) -> "SpecFilter":
factory=_specs_from_mirror, is_usable=is_reusable, include=include, exclude=exclude
)
@staticmethod
def from_environment(configuration, *, include, exclude, env) -> "SpecFilter":
packages = _external_config_with_implicit_externals(configuration)
is_reusable = functools.partial(_is_reusable, packages=packages, local=True)
factory = functools.partial(_specs_from_environment, env=env)
return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude)
@staticmethod
def from_environment_included_concrete(
configuration,
*,
include: List[str],
exclude: List[str],
env: ev.Environment,
included_concrete: str,
) -> "SpecFilter":
packages = _external_config_with_implicit_externals(configuration)
is_reusable = functools.partial(_is_reusable, packages=packages, local=True)
factory = functools.partial(
_specs_from_environment_included_concrete, env=env, included_concrete=included_concrete
)
return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude)
def _specs_from_store(configuration):
store = spack.store.create(configuration)
@@ -4190,23 +3999,6 @@ def _specs_from_mirror():
return []
def _specs_from_environment(env):
"""Return all concrete specs from the environment. This includes all included concrete"""
if env:
return [concrete for _, concrete in env.concretized_specs()]
else:
return []
def _specs_from_environment_included_concrete(env, included_concrete):
"""Return only concrete specs from the environment included from the included_concrete"""
if env:
assert included_concrete in env.included_concrete_envs
return [concrete for concrete in env.included_specs_by_hash[included_concrete].values()]
else:
return []
class ReuseStrategy(enum.Enum):
ROOTS = enum.auto()
DEPENDENCIES = enum.auto()
@@ -4236,12 +4028,6 @@ def __init__(self, configuration: spack.config.Configuration) -> None:
SpecFilter.from_buildcache(
configuration=self.configuration, include=[], exclude=[]
),
SpecFilter.from_environment(
configuration=self.configuration,
include=[],
exclude=[],
env=ev.active_environment(), # includes all concrete includes
),
]
)
else:
@@ -4256,46 +4042,7 @@ def __init__(self, configuration: spack.config.Configuration) -> None:
for source in reuse_yaml.get("from", default_sources):
include = source.get("include", default_include)
exclude = source.get("exclude", default_exclude)
if source["type"] == "environment" and "path" in source:
env_dir = ev.as_env_dir(source["path"])
active_env = ev.active_environment()
if active_env and env_dir in active_env.included_concrete_envs:
# If environment is included as a concrete environment, use the local copy
# of specs in the active environment.
# note: included concrete environments are only updated at concretization
# time, and reuse needs to matchthe included specs.
self.reuse_sources.append(
SpecFilter.from_environment_included_concrete(
self.configuration,
include=include,
exclude=exclude,
env=active_env,
included_concrete=env_dir,
)
)
else:
# If the environment is not included as a concrete environment, use the
# current specs from its lockfile.
self.reuse_sources.append(
SpecFilter.from_environment(
self.configuration,
include=include,
exclude=exclude,
env=ev.environment_from_name_or_dir(env_dir),
)
)
elif source["type"] == "environment":
# reusing from the current environment implicitly reuses from all of the
# included concrete environments
self.reuse_sources.append(
SpecFilter.from_environment(
self.configuration,
include=include,
exclude=exclude,
env=ev.active_environment(),
)
)
elif source["type"] == "local":
if source["type"] == "local":
self.reuse_sources.append(
SpecFilter.from_store(self.configuration, include=include, exclude=exclude)
)
@@ -4313,6 +4060,7 @@ def reusable_specs(self, specs: List[spack.spec.Spec]) -> List[spack.spec.Spec]:
result = []
for reuse_source in self.reuse_sources:
result.extend(reuse_source.selected_specs())
# If we only want to reuse dependencies, remove the root specs
if self.reuse_strategy == ReuseStrategy.DEPENDENCIES:
result = [spec for spec in result if not any(root in spec for root in specs)]
@@ -4343,7 +4091,7 @@ def _check_input_and_extract_concrete_specs(specs):
spack.spec.Spec.ensure_valid_variants(s)
return reusable
def solve_with_stats(
def solve(
self,
specs,
out=None,
@@ -4354,8 +4102,6 @@ def solve_with_stats(
allow_deprecated=False,
):
"""
Concretize a set of specs and track the timing and statistics for the solve
Arguments:
specs (list): List of ``Spec`` objects to solve for.
out: Optionally write the generate ASP program to a file-like object.
@@ -4367,22 +4113,15 @@ def solve_with_stats(
setup_only (bool): if True, stop after setup and don't solve (default False).
allow_deprecated (bool): allow deprecated version in the solve
"""
# Check upfront that the variants are admissible
specs = [s.lookup_hash() for s in specs]
reusable_specs = self._check_input_and_extract_concrete_specs(specs)
reusable_specs.extend(self.selector.reusable_specs(specs))
setup = SpackSolverSetup(tests=tests)
output = OutputConfiguration(timers=timers, stats=stats, out=out, setup_only=setup_only)
return self.driver.solve(
result, _, _ = self.driver.solve(
setup, specs, reuse=reusable_specs, output=output, allow_deprecated=allow_deprecated
)
def solve(self, specs, **kwargs):
"""
Convenience function for concretizing a set of specs and ignoring timing
and statistics. Uses the same kwargs as solve_with_stats.
"""
# Check upfront that the variants are admissible
result, _, _ = self.solve_with_stats(specs, **kwargs)
return result
def solve_in_rounds(
@@ -4482,11 +4221,8 @@ def __init__(self, provided, conflicts):
super().__init__(msg)
self.provided = provided
# Add attribute expected of the superclass interface
self.required = None
self.constraint_type = None
self.provided = provided
class InvalidSpliceError(spack.error.SpackError):
"""For cases in which the splice configuration is invalid."""

View File

@@ -57,12 +57,6 @@
internal_error("provider with no virtual node").
:- provider(PackageNode, _), not attr("node", PackageNode),
internal_error("provider with no real node").
:- node_has_variant(PackageNode, _, _), not attr("node", PackageNode),
internal_error("node has variant for a non-node").
:- attr("variant_set", PackageNode, _, _), not attr("node", PackageNode),
internal_error("variant_set for a non-node").
:- variant_is_propagated(PackageNode, _), not attr("node", PackageNode),
internal_error("variant_is_propagated for a non-node").
:- attr("root", node(ID, PackageNode)), ID > min_dupe_id,
internal_error("root with a non-minimal duplicate ID").
@@ -581,8 +575,7 @@ attr("virtual_on_edge", PackageNode, ProviderNode, Virtual)
% or used somewhere
:- attr("virtual_node", node(_, Virtual)),
not attr("virtual_on_incoming_edges", _, Virtual),
not attr("virtual_root", node(_, Virtual)),
internal_error("virtual node does not match incoming edge").
not attr("virtual_root", node(_, Virtual)).
attr("virtual_on_incoming_edges", ProviderNode, Virtual)
:- attr("virtual_on_edge", _, ProviderNode, Virtual).
@@ -636,8 +629,7 @@ do_not_impose(EffectID, node(X, Package))
virtual_condition_holds(_, PossibleProvider, Virtual),
PossibleProvider != ProviderNode,
explicitly_requested_root(PossibleProvider),
not explicitly_requested_root(ProviderNode),
internal_error("If a root can provide a virtual, it must be the provider").
not explicitly_requested_root(ProviderNode).
% A package cannot be the actual provider for a virtual if it does not
% fulfill the conditions to provide that virtual
@@ -780,8 +772,7 @@ required_provider(Provider, Virtual)
pkg_fact(Virtual, condition_effect(ConditionID, EffectID)),
imposed_constraint(EffectID, "node", Provider).
:- provider(node(Y, Package), node(X, Virtual)), required_provider(Provider, Virtual), Package != Provider,
internal_error("If a provider is required the concretizer must use it").
:- provider(node(Y, Package), node(X, Virtual)), required_provider(Provider, Virtual), Package != Provider.
% TODO: the following choice rule allows the solver to add compiler
% flags if their only source is from a requirement. This is overly-specific
@@ -861,8 +852,7 @@ variant_defined(PackageNode, Name) :- variant_definition(PackageNode, Name, _).
% for two or more variant definitions, this prefers the last one defined.
:- node_has_variant(node(NodeID, Package), Name, SelectedVariantID),
variant_definition(node(NodeID, Package), Name, VariantID),
VariantID > SelectedVariantID,
internal_error("If the solver picks a variant descriptor it must use that variant descriptor").
VariantID > SelectedVariantID.
% B: Associating applicable package rules with nodes
@@ -979,7 +969,6 @@ error(100, "{0} variant '{1}' cannot have values '{2}' and '{3}' as they come fr
:- attr("variant_set", node(ID, Package), Variant, Value),
not attr("variant_value", node(ID, Package), Variant, Value).
internal_error("If a variant is set to a value it must have that value").
% The rules below allow us to prefer default values for variants
% whenever possible. If a variant is set in a spec, or if it is
@@ -990,7 +979,7 @@ variant_not_default(node(ID, Package), Variant, Value)
% variants set explicitly on the CLI don't count as non-default
not attr("variant_set", node(ID, Package), Variant, Value),
% variant values forced by propagation don't count as non-default
not propagate(node(ID, Package), variant_value(Variant, Value, _)),
not propagate(node(ID, Package), variant_value(Variant, Value)),
% variants set on externals that we could use don't count as non-default
% this makes spack prefer to use an external over rebuilding with the
% default configuration
@@ -1002,7 +991,7 @@ variant_default_not_used(node(ID, Package), Variant, Value)
:- variant_default_value(node(ID, Package), Variant, Value),
node_has_variant(node(ID, Package), Variant, _),
not attr("variant_value", node(ID, Package), Variant, Value),
not propagate(node(ID, Package), variant_value(Variant, _, _)),
not propagate(node(ID, Package), variant_value(Variant, _)),
attr("node", node(ID, Package)).
% The variant is set in an external spec
@@ -1047,14 +1036,10 @@ variant_single_value(PackageNode, Variant)
% Propagation semantics
%-----------------------------------------------------------------------------
non_default_propagation(variant_value(Name, Value)) :- attr("propagate", RootNode, variant_value(Name, Value)).
% Propagation roots have a corresponding attr("propagate", ...)
propagate(RootNode, PropagatedAttribute) :- attr("propagate", RootNode, PropagatedAttribute), not non_default_propagation(PropagatedAttribute).
propagate(RootNode, PropagatedAttribute) :- attr("propagate", RootNode, PropagatedAttribute).
propagate(RootNode, PropagatedAttribute, EdgeTypes) :- attr("propagate", RootNode, PropagatedAttribute, EdgeTypes).
% Special case variants, to inject the source node in the propagated attribute
propagate(RootNode, variant_value(Name, Value, RootNode)) :- attr("propagate", RootNode, variant_value(Name, Value)).
% Propagate an attribute along edges to child nodes
propagate(ChildNode, PropagatedAttribute) :-
@@ -1076,53 +1061,21 @@ propagate(ChildNode, PropagatedAttribute, edge_types(DepType1, DepType2)) :-
% If a variant is propagated, and can be accepted, set its value
attr("variant_selected", PackageNode, Variant, Value, VariantType, VariantID) :-
propagate(PackageNode, variant_value(Variant, Value, _)),
propagate(PackageNode, variant_value(Variant, Value)),
node_has_variant(PackageNode, Variant, VariantID),
variant_type(VariantID, VariantType),
variant_possible_value(PackageNode, Variant, Value).
variant_possible_value(PackageNode, Variant, Value),
not attr("variant_set", PackageNode, Variant).
% If a variant is propagated, we cannot have extraneous values
variant_is_propagated(PackageNode, Variant) :-
attr("variant_value", PackageNode, Variant, Value),
propagate(PackageNode, variant_value(Variant, Value, _)),
propagate(PackageNode, variant_value(Variant, Value)),
not attr("variant_set", PackageNode, Variant).
:- variant_is_propagated(PackageNode, Variant),
attr("variant_selected", PackageNode, Variant, Value, _, _),
not propagate(PackageNode, variant_value(Variant, Value, _)).
error(100, "{0} and {1} cannot both propagate variant '{2}' to the shared dependency: {3}",
Package1, Package2, Variant, Dependency) :-
% The variant is a singlevalued variant
variant_single_value(node(X, Package1), Variant),
% Dependency is trying to propagate Variant with different values and is not the source package
propagate(node(Z, Dependency), variant_value(Variant, Value1, node(X, Package1))),
propagate(node(Z, Dependency), variant_value(Variant, Value2, node(Y, Package2))),
% Package1 and Package2 and their values are different
Package1 > Package2, Value1 != Value2,
not propagate(node(Z, Dependency), variant_value(Variant, _, node(Z, Dependency))).
% Cannot propagate the same variant from two different packages if one is a dependency of the other
error(100, "{0} and {1} cannot both propagate variant '{2}'", Package1, Package2, Variant) :-
% The variant is a single-valued variant
variant_single_value(node(X, Package1), Variant),
% Package1 and Package2 and their values are different
Package1 != Package2, Value1 != Value2,
% Package2 is set to propagate the value from Package1
propagate(node(Y, Package2), variant_value(Variant, Value2, node(X, Package2))),
propagate(node(Y, Package2), variant_value(Variant, Value1, node(X, Package1))),
variant_is_propagated(node(Y, Package2), Variant).
% Cannot propagate a variant if a different value was set for it in a dependency
error(100, "Cannot propagate the variant '{0}' from the package: {1} because package: {2} is set to exclude it", Variant, Source, Package) :-
% Package has a Variant and Source is propagating Variant
attr("variant_set", node(X, Package), Variant, Value1),
% The packages and values are different
Source != Package, Value1 != Value2,
% The variant is a single-valued variant
variant_single_value(node(X, Package1), Variant),
% A different value is being propagated from somewhere else
propagate(node(X, Package), variant_value(Variant, Value2, node(Y, Source))).
not propagate(PackageNode, variant_value(Variant, Value)).
%----
% Flags
@@ -1449,71 +1402,25 @@ attr("node_flag", PackageNode, NodeFlag) :- attr("node_flag_set", PackageNode, N
%-----------------------------------------------------------------------------
% Installed Packages
% Installed packages
%-----------------------------------------------------------------------------
% the solver is free to choose at most one installed hash for each package
{ attr("hash", node(ID, Package), Hash) : installed_hash(Package, Hash) } 1
:- attr("node", node(ID, Package)), internal_error("Package must resolve to at most one hash").
#defined installed_hash/2.
#defined abi_splice_conditions_hold/4.
% These are the previously concretized attributes of the installed package as
% a hash. It has the general form:
% hash_attr(Hash, Attribute, PackageName, Args*)
#defined hash_attr/3.
#defined hash_attr/4.
#defined hash_attr/5.
#defined hash_attr/6.
#defined hash_attr/7.
{ attr("hash", node(ID, PackageName), Hash): installed_hash(PackageName, Hash) } 1 :-
attr("node", node(ID, PackageName)),
internal_error("Package must resolve to at most 1 hash").
% you can't choose an installed hash for a dev spec
:- attr("hash", PackageNode, Hash), attr("variant_value", PackageNode, "dev_path", _).
% You can't install a hash, if it is not installed
:- attr("hash", node(ID, Package), Hash), not installed_hash(Package, Hash).
% This should be redundant given the constraint above
:- attr("node", PackageNode), 2 { attr("hash", PackageNode, Hash) }.
% hash_attrs are versions, but can_splice_attr are usually node_version_satisfies
hash_attr(Hash, "node_version_satisfies", PackageName, Constraint) :-
hash_attr(Hash, "version", PackageName, Version),
pkg_fact(PackageName, version_satisfies(Constraint, Version)).
% This recovers the exact semantics for hash reuse hash and depends_on are where
% splices are decided, and virtual_on_edge can result in name-changes, which is
% why they are all treated separately.
imposed_constraint(Hash, Attr, PackageName) :-
hash_attr(Hash, Attr, PackageName).
imposed_constraint(Hash, Attr, PackageName, A1) :-
hash_attr(Hash, Attr, PackageName, A1), Attr != "hash".
imposed_constraint(Hash, Attr, PackageName, Arg1, Arg2) :-
hash_attr(Hash, Attr, PackageName, Arg1, Arg2),
Attr != "depends_on",
Attr != "virtual_on_edge".
imposed_constraint(Hash, Attr, PackageName, A1, A2, A3) :-
hash_attr(Hash, Attr, PackageName, A1, A2, A3).
imposed_constraint(Hash, "hash", PackageName, Hash) :- installed_hash(PackageName, Hash).
% Without splicing, we simply recover the exact semantics
imposed_constraint(ParentHash, "hash", ChildName, ChildHash) :-
hash_attr(ParentHash, "hash", ChildName, ChildHash),
ChildHash != ParentHash,
not abi_splice_conditions_hold(_, _, ChildName, ChildHash).
imposed_constraint(Hash, "depends_on", PackageName, DepName, Type) :-
hash_attr(Hash, "depends_on", PackageName, DepName, Type),
hash_attr(Hash, "hash", DepName, DepHash),
not attr("splice_at_hash", _, _, DepName, DepHash).
imposed_constraint(Hash, "virtual_on_edge", PackageName, DepName, VirtName) :-
hash_attr(Hash, "virtual_on_edge", PackageName, DepName, VirtName),
not attr("splice_at_hash", _, _, DepName,_).
% Rules pertaining to attr("splice_at_hash") and abi_splice_conditions_hold will
% be conditionally loaded from splices.lp
impose(Hash, PackageNode) :- attr("hash", PackageNode, Hash), attr("node", PackageNode).
% If there is not a hash for a package, we build it.
build(PackageNode) :- attr("node", PackageNode), not concrete(PackageNode).
% if a hash is selected, we impose all the constraints that implies
impose(Hash, PackageNode) :- attr("hash", PackageNode, Hash).
% if we haven't selected a hash for a package, we'll be building it
build(PackageNode) :- not attr("hash", PackageNode, _), attr("node", PackageNode).
% Minimizing builds is tricky. We want a minimizing criterion
@@ -1526,7 +1433,6 @@ build(PackageNode) :- attr("node", PackageNode), not concrete(PackageNode).
% criteria for built specs -- so that they take precedence over the otherwise
% topmost-priority criterion to reuse what is installed.
%
% The priority ranges are:
% 1000+ Optimizations for concretization errors
% 300 - 1000 Highest priority optimizations for valid solutions
@@ -1552,10 +1458,12 @@ build_priority(PackageNode, 0) :- not build(PackageNode), attr("node", Package
pkg_fact(Package, version_declared(Version, Weight, "installed")),
not optimize_for_reuse().
#defined installed_hash/2.
% This statement, which is a hidden feature of clingo, let us avoid cycles in the DAG
#edge (A, B) : depends_on(A, B).
%-----------------------------------------------------------------
% Optimization to avoid errors
%-----------------------------------------------------------------

View File

@@ -44,17 +44,6 @@ def _id(thing: Any) -> Union[str, AspObject]:
return f'"{str(thing)}"'
class AspVar(AspObject):
"""Represents a variable in an ASP rule, allows for conditionally generating
rules"""
def __init__(self, name: str):
self.name = name
def __str__(self) -> str:
return str(self.name)
@lang.key_ordering
class AspFunction(AspObject):
"""A term in the ASP logic program"""
@@ -99,8 +88,6 @@ def _argify(self, arg: Any) -> Any:
return clingo().Number(arg)
elif isinstance(arg, AspFunction):
return clingo().Function(arg.name, [self._argify(x) for x in arg.args], positive=True)
elif isinstance(arg, AspVar):
return clingo().Variable(arg.name)
return clingo().String(str(arg))
def symbol(self):

View File

@@ -15,6 +15,7 @@
#show attr/4.
#show attr/5.
#show attr/6.
% names of optimization criteria
#show opt_criterion/2.

View File

@@ -1,56 +0,0 @@
% Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
% Spack Project Developers. See the top-level COPYRIGHT file for details.
%
% SPDX-License-Identifier: (Apache-2.0 OR MIT)
%=============================================================================
% These rules are conditionally loaded to handle the synthesis of spliced
% packages.
% =============================================================================
% Consider the concrete spec:
% foo@2.72%gcc@11.4 arch=linux-ubuntu22.04-icelake build_system=autotools ^bar ...
% It will emit the following facts for reuse (below is a subset)
% installed_hash("foo", "xxxyyy")
% hash_attr("xxxyyy", "hash", "foo", "xxxyyy")
% hash_attr("xxxyyy", "version", "foo", "2.72")
% hash_attr("xxxyyy", "node_os", "ubuntu22.04")
% hash_attr("xxxyyy", "hash", "bar", "zzzqqq")
% hash_attr("xxxyyy", "depends_on", "foo", "bar", "link")
% Rules that derive abi_splice_conditions_hold will be generated from
% use of the `can_splice` directive. The will have the following form:
% can_splice("foo@1.0.0+a", when="@1.0.1+a", match_variants=["b"]) --->
% abi_splice_conditions_hold(0, node(SID, "foo"), "foo", BashHash) :-
% installed_hash("foo", BaseHash),
% attr("node", node(SID, SpliceName)),
% attr("node_version_satisfies", node(SID, "foo"), "1.0.1"),
% hash_attr("hash", "node_version_satisfies", "foo", "1.0.1"),
% attr("variant_value", node(SID, "foo"), "a", "True"),
% hash_attr("hash", "variant_value", "foo", "a", "True"),
% attr("variant_value", node(SID, "foo"), "b", VariVar0),
% hash_attr("hash", "variant_value", "foo", "b", VariVar0),
% If the splice is valid (i.e. abi_splice_conditions_hold is derived) in the
% dependency of a concrete spec the solver free to choose whether to continue
% with the exact hash semantics by simply imposing the child hash, or introducing
% a spliced node as the dependency instead
{ imposed_constraint(ParentHash, "hash", ChildName, ChildHash) } :-
hash_attr(ParentHash, "hash", ChildName, ChildHash),
abi_splice_conditions_hold(_, node(SID, SpliceName), ChildName, ChildHash).
attr("splice_at_hash", ParentNode, node(SID, SpliceName), ChildName, ChildHash) :-
attr("hash", ParentNode, ParentHash),
hash_attr(ParentHash, "hash", ChildName, ChildHash),
abi_splice_conditions_hold(_, node(SID, SpliceName), ChildName, ChildHash),
ParentHash != ChildHash,
not imposed_constraint(ParentHash, "hash", ChildName, ChildHash).
% Names and virtual providers may change when a dependency is spliced in
imposed_constraint(Hash, "dependency_holds", ParentName, SpliceName, Type) :-
hash_attr(Hash, "depends_on", ParentName, DepName, Type),
hash_attr(Hash, "hash", DepName, DepHash),
attr("splice_at_hash", node(ID, ParentName), node(SID, SpliceName), DepName, DepHash).
imposed_constraint(Hash, "virtual_on_edge", ParentName, SpliceName, VirtName) :-
hash_attr(Hash, "virtual_on_edge", ParentName, DepName, VirtName),
attr("splice_at_hash", node(ID, ParentName), node(SID, SpliceName), DepName, DepHash).

View File

@@ -59,7 +59,7 @@
import re
import socket
import warnings
from typing import Any, Callable, Dict, Iterable, List, Match, Optional, Set, Tuple, Union
from typing import Any, Callable, Dict, List, Match, Optional, Set, Tuple, Union
import archspec.cpu
@@ -877,9 +877,8 @@ def constrain(self, other):
# Next, if any flags in other propagate, we force them to propagate in our case
shared = list(sorted(set(other[flag_type]) - extra_other))
for x, y in _shared_subset_pair_iterate(shared, sorted(self[flag_type])):
if y.propagate is True and x.propagate is False:
changed = True
y.propagate = False
if x.propagate:
y.propagate = True
# TODO: what happens if flag groups with a partial (but not complete)
# intersection specify different behaviors for flag propagation?
@@ -934,7 +933,6 @@ def _cmp_iter(self):
def flags():
for flag in v:
yield flag
yield flag.propagate
yield flags
@@ -965,6 +963,10 @@ def _sort_by_dep_types(dspec: DependencySpec):
return dspec.depflag
#: Enum for edge directions
EdgeDirection = lang.enum(parent=0, child=1)
@lang.lazy_lexicographic_ordering
class _EdgeMap(collections.abc.Mapping):
"""Represent a collection of edges (DependencySpec objects) in the DAG.
@@ -978,20 +980,26 @@ class _EdgeMap(collections.abc.Mapping):
__slots__ = "edges", "store_by_child"
def __init__(self, store_by_child: bool = True) -> None:
self.edges: Dict[str, List[DependencySpec]] = {}
self.store_by_child = store_by_child
def __init__(self, store_by=EdgeDirection.child):
# Sanitize input arguments
msg = 'unexpected value for "store_by" argument'
assert store_by in (EdgeDirection.child, EdgeDirection.parent), msg
def __getitem__(self, key: str) -> List[DependencySpec]:
#: This dictionary maps a package name to a list of edges
#: i.e. to a list of DependencySpec objects
self.edges = {}
self.store_by_child = store_by == EdgeDirection.child
def __getitem__(self, key):
return self.edges[key]
def __iter__(self):
return iter(self.edges)
def __len__(self) -> int:
def __len__(self):
return len(self.edges)
def add(self, edge: DependencySpec) -> None:
def add(self, edge: DependencySpec):
key = edge.spec.name if self.store_by_child else edge.parent.name
if key in self.edges:
lst = self.edges[key]
@@ -1000,8 +1008,8 @@ def add(self, edge: DependencySpec) -> None:
else:
self.edges[key] = [edge]
def __str__(self) -> str:
return f"{{deps: {', '.join(str(d) for d in sorted(self.values()))}}}"
def __str__(self):
return "{deps: %s}" % ", ".join(str(d) for d in sorted(self.values()))
def _cmp_iter(self):
for item in sorted(itertools.chain.from_iterable(self.edges.values())):
@@ -1018,32 +1026,24 @@ def copy(self):
return clone
def select(
self,
*,
parent: Optional[str] = None,
child: Optional[str] = None,
depflag: dt.DepFlag = dt.ALL,
virtuals: Optional[List[str]] = None,
) -> List[DependencySpec]:
"""Selects a list of edges and returns them.
def select(self, parent=None, child=None, depflag: dt.DepFlag = dt.ALL):
"""Select a list of edges and return them.
If an edge:
- Has *any* of the dependency types passed as argument,
- Matches the parent and/or child name
- Provides *any* of the virtuals passed as argument
- Matches the parent and/or child name, if passed
then it is selected.
The deptypes argument needs to be a flag, since the method won't
convert it for performance reason.
Args:
parent: name of the parent package
child: name of the child package
parent (str): name of the parent package
child (str): name of the child package
depflag: allowed dependency types in flag form
virtuals: list of virtuals on the edge
Returns:
List of DependencySpec objects
"""
if not depflag:
return []
@@ -1062,10 +1062,6 @@ def select(
# Filter by allowed dependency types
selected = (dep for dep in selected if not dep.depflag or (depflag & dep.depflag))
# Filter by virtuals
if virtuals is not None:
selected = (dep for dep in selected if any(v in dep.virtuals for v in virtuals))
return list(selected)
def clear(self):
@@ -1431,8 +1427,6 @@ def tree(
class Spec:
#: Cache for spec's prefix, computed lazily in the corresponding property
_prefix = None
#: Cache for spec's length, computed lazily in the corresponding property
_length = None
abstract_hash = None
@staticmethod
@@ -1476,8 +1470,8 @@ def __init__(
self.architecture = None
self.compiler = None
self.compiler_flags = FlagMap(self)
self._dependents = _EdgeMap(store_by_child=False)
self._dependencies = _EdgeMap(store_by_child=True)
self._dependents = _EdgeMap(store_by=EdgeDirection.parent)
self._dependencies = _EdgeMap(store_by=EdgeDirection.child)
self.namespace = None
# initial values for all spec hash types
@@ -1597,7 +1591,7 @@ def _get_dependency(self, name):
return deps[0]
def edges_from_dependents(
self, name=None, depflag: dt.DepFlag = dt.ALL, *, virtuals: Optional[List[str]] = None
self, name=None, depflag: dt.DepFlag = dt.ALL
) -> List[DependencySpec]:
"""Return a list of edges connecting this node in the DAG
to parents.
@@ -1605,25 +1599,20 @@ def edges_from_dependents(
Args:
name (str): filter dependents by package name
depflag: allowed dependency types
virtuals: allowed virtuals
"""
return [
d for d in self._dependents.select(parent=name, depflag=depflag, virtuals=virtuals)
]
return [d for d in self._dependents.select(parent=name, depflag=depflag)]
def edges_to_dependencies(
self, name=None, depflag: dt.DepFlag = dt.ALL, *, virtuals: Optional[List[str]] = None
self, name=None, depflag: dt.DepFlag = dt.ALL
) -> List[DependencySpec]:
"""Returns a list of edges connecting this node in the DAG to children.
"""Return a list of edges connecting this node in the DAG
to children.
Args:
name (str): filter dependencies by package name
depflag: allowed dependency types
virtuals: allowed virtuals
"""
return [
d for d in self._dependencies.select(child=name, depflag=depflag, virtuals=virtuals)
]
return [d for d in self._dependencies.select(child=name, depflag=depflag)]
@property
def edge_attributes(self) -> str:
@@ -1646,24 +1635,17 @@ def edge_attributes(self) -> str:
return f"[{result}]"
def dependencies(
self,
name=None,
deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL,
*,
virtuals: Optional[List[str]] = None,
self, name=None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL
) -> List["Spec"]:
"""Returns a list of direct dependencies (nodes in the DAG)
"""Return a list of direct dependencies (nodes in the DAG).
Args:
name: filter dependencies by package name
name (str): filter dependencies by package name
deptype: allowed dependency types
virtuals: allowed virtuals
"""
if not isinstance(deptype, dt.DepFlag):
deptype = dt.canonicalize(deptype)
return [
d.spec for d in self.edges_to_dependencies(name, depflag=deptype, virtuals=virtuals)
]
return [d.spec for d in self.edges_to_dependencies(name, depflag=deptype)]
def dependents(
self, name=None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL
@@ -2203,18 +2185,6 @@ def to_node_dict(self, hash=ht.dag_hash):
if params:
d["parameters"] = params
if params and not self.concrete:
flag_names = [
name
for name, flags in self.compiler_flags.items()
if any(x.propagate for x in flags)
]
d["propagate"] = sorted(
itertools.chain(
[v.name for v in self.variants.values() if v.propagate], flag_names
)
)
if self.external:
d["external"] = syaml.syaml_dict(
[
@@ -2387,10 +2357,16 @@ def node_dict_with_hashes(self, hash=ht.dag_hash):
spec is concrete, the full hash is added as well. If 'build' is in
the hash_type, the build hash is also added."""
node = self.to_node_dict(hash)
# All specs have at least a DAG hash
node[ht.dag_hash.name] = self.dag_hash()
if not self.concrete:
# dag_hash is lazily computed -- but if we write a spec out, we want it
# to be included. This is effectively the last chance we get to compute
# it accurately.
if self.concrete:
# all specs have at least a DAG hash
node[ht.dag_hash.name] = self.dag_hash()
else:
node["concrete"] = False
# we can also give them other hash types if we want
@@ -2830,7 +2806,7 @@ def ensure_no_deprecated(root):
msg += " For each package listed, choose another spec\n"
raise SpecDeprecatedError(msg)
def concretize(self, tests: Union[bool, Iterable[str]] = False) -> None:
def concretize(self, tests: Union[bool, List[str]] = False) -> None:
"""Concretize the current spec.
Args:
@@ -2909,7 +2885,7 @@ def _mark_concrete(self, value=True):
if (not value) and s.concrete and s.installed:
continue
elif not value:
s.clear_caches()
s.clear_cached_hashes()
s._mark_root_concrete(value)
def _finalize_concretization(self):
@@ -2958,7 +2934,7 @@ def _finalize_concretization(self):
for spec in self.traverse():
spec._cached_hash(ht.dag_hash)
def concretized(self, tests: Union[bool, Iterable[str]] = False) -> "spack.spec.Spec":
def concretized(self, tests=False):
"""This is a non-destructive version of concretize().
First clones, then returns a concrete version of this package
@@ -3022,12 +2998,7 @@ def ensure_valid_variants(spec):
pkg_variants = pkg_cls.variant_names()
# reserved names are variants that may be set on any package
# but are not necessarily recorded by the package's class
propagate_variants = [name for name, variant in spec.variants.items() if variant.propagate]
not_existing = set(spec.variants) - (
set(pkg_variants) | set(vt.reserved_names) | set(propagate_variants)
)
not_existing = set(spec.variants) - (set(pkg_variants) | set(vt.reserved_names))
if not_existing:
raise vt.UnknownVariantError(
f"No such variant {not_existing} for spec: '{spec}'", list(not_existing)
@@ -3054,10 +3025,6 @@ def constrain(self, other, deps=True):
raise spack.error.UnsatisfiableSpecError(self, other, "constrain a concrete spec")
other = self._autospec(other)
if other.concrete and other.satisfies(self):
self._dup(other)
return True
if other.abstract_hash:
if not self.abstract_hash or other.abstract_hash.startswith(self.abstract_hash):
self.abstract_hash = other.abstract_hash
@@ -3552,8 +3519,8 @@ def _dup(self, other, deps: Union[bool, dt.DepTypes, dt.DepFlag] = True, clearde
self.architecture = other.architecture.copy() if other.architecture else None
self.compiler = other.compiler.copy() if other.compiler else None
if cleardeps:
self._dependents = _EdgeMap(store_by_child=False)
self._dependencies = _EdgeMap(store_by_child=True)
self._dependents = _EdgeMap(store_by=EdgeDirection.parent)
self._dependencies = _EdgeMap(store_by=EdgeDirection.child)
self.compiler_flags = other.compiler_flags.copy()
self.compiler_flags.spec = self
self.variants = other.variants.copy()
@@ -3702,18 +3669,6 @@ def __getitem__(self, name: str):
return child
def __len__(self):
if not self.concrete:
raise spack.error.SpecError(f"Cannot get length of abstract spec: {self}")
if not self._length:
self._length = 1 + sum(len(dep) for dep in self.dependencies())
return self._length
def __bool__(self):
# Need to define this so __len__ isn't used by default
return True
def __contains__(self, spec):
"""True if this spec or some dependency satisfies the spec.
@@ -4073,7 +4028,7 @@ def format_path(
def __str__(self):
if self._concrete:
return self.format("{name}{@version}{/hash}")
return self.format("{name}{@version}{/hash:7}")
if not self._dependencies:
return self.format()
@@ -4270,7 +4225,7 @@ def _splice_detach_and_add_dependents(self, replacement, context):
for ancestor in ancestors_in_context:
# Only set it if it hasn't been spliced before
ancestor._build_spec = ancestor._build_spec or ancestor.copy()
ancestor.clear_caches(ignore=(ht.package_hash.attr,))
ancestor.clear_cached_hashes(ignore=(ht.package_hash.attr,))
for edge in ancestor.edges_to_dependencies(depflag=dt.BUILD):
if edge.depflag & ~dt.BUILD:
edge.depflag &= ~dt.BUILD
@@ -4464,7 +4419,7 @@ def mask_build_deps(in_spec):
return spec
def clear_caches(self, ignore=()):
def clear_cached_hashes(self, ignore=()):
"""
Clears all cached hashes in a Spec, while preserving other properties.
"""
@@ -4472,9 +4427,7 @@ def clear_caches(self, ignore=()):
if h.attr not in ignore:
if hasattr(self, h.attr):
setattr(self, h.attr, None)
for attr in ("_dunder_hash", "_prefix", "_length"):
if attr not in ignore:
setattr(self, attr, None)
self._dunder_hash = None
def __hash__(self):
# If the spec is concrete, we leverage the process hash and just use
@@ -4550,69 +4503,8 @@ def substitute(self, vspec):
# Set the item
super().__setitem__(vspec.name, vspec)
def partition_variants(self):
non_prop, prop = lang.stable_partition(self.values(), lambda x: not x.propagate)
# Just return the names
non_prop = [x.name for x in non_prop]
prop = [x.name for x in prop]
return non_prop, prop
def satisfies(self, other: "VariantMap") -> bool:
if self.spec.concrete:
return self._satisfies_when_self_concrete(other)
return self._satisfies_when_self_abstract(other)
def _satisfies_when_self_concrete(self, other: "VariantMap") -> bool:
non_propagating, propagating = other.partition_variants()
result = all(
name in self and self[name].satisfies(other[name]) for name in non_propagating
)
if not propagating:
return result
for node in self.spec.traverse():
if not all(
node.variants[name].satisfies(other[name])
for name in propagating
if name in node.variants
):
return False
return result
def _satisfies_when_self_abstract(self, other: "VariantMap") -> bool:
other_non_propagating, other_propagating = other.partition_variants()
self_non_propagating, self_propagating = self.partition_variants()
# First check variants without propagation set
result = all(
name in self_non_propagating
and (self[name].propagate or self[name].satisfies(other[name]))
for name in other_non_propagating
)
if result is False or (not other_propagating and not self_propagating):
return result
# Check that self doesn't contradict variants propagated by other
if other_propagating:
for node in self.spec.traverse():
if not all(
node.variants[name].satisfies(other[name])
for name in other_propagating
if name in node.variants
):
return False
# Check that other doesn't contradict variants propagated by self
if self_propagating:
for node in other.spec.traverse():
if not all(
node.variants[name].satisfies(self[name])
for name in self_propagating
if name in node.variants
):
return False
return result
def satisfies(self, other):
return all(k in self and self[k].satisfies(other[k]) for k in other)
def intersects(self, other):
return all(self[k].intersects(other[k]) for k in other if k in self)
@@ -4825,17 +4717,13 @@ def from_node_dict(cls, node):
else:
spec.compiler = None
propagated_names = node.get("propagate", [])
for name, values in node.get("parameters", {}).items():
propagate = name in propagated_names
if name in _valid_compiler_flags:
spec.compiler_flags[name] = []
for val in values:
spec.compiler_flags.add_flag(name, val, propagate)
spec.compiler_flags.add_flag(name, val, False)
else:
spec.variants[name] = vt.MultiValuedVariant.from_node_dict(
name, values, propagate=propagate
)
spec.variants[name] = vt.MultiValuedVariant.from_node_dict(name, values)
spec.external_path = None
spec.external_modules = None

View File

@@ -33,12 +33,16 @@
import spack.error
import spack.paths
import spack.spec
import spack.store
import spack.util.path
#: default installation root, relative to the Spack install path
DEFAULT_INSTALL_TREE_ROOT = os.path.join(spack.paths.opt_path, "spack")
ConfigurationType = Union["spack.config.Configuration", "llnl.util.lang.Singleton"]
def parse_install_tree(config_dict):
"""Parse config settings and return values relevant to the store object.
@@ -204,7 +208,7 @@ def __reduce__(self):
)
def create(configuration: spack.config.Configuration) -> Store:
def create(configuration: ConfigurationType) -> Store:
"""Create a store from the configuration passed as input.
Args:
@@ -237,7 +241,7 @@ def _create_global() -> Store:
#: Singleton store instance
STORE: Store = llnl.util.lang.Singleton(_create_global) # type: ignore
STORE: Union[Store, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(_create_global)
def reinitialize():
@@ -304,7 +308,7 @@ def find(
matching_specs: List[spack.spec.Spec] = []
errors = []
query_fn = query_fn or STORE.db.query
query_fn = query_fn or spack.store.STORE.db.query
for spec in constraints:
current_matches = query_fn(spec, **kwargs)
@@ -337,7 +341,7 @@ def specfile_matches(filename: str, **kwargs) -> List["spack.spec.Spec"]:
**kwargs: keyword arguments forwarded to "find"
"""
query = [spack.spec.Spec.from_specfile(filename)]
return find(query, **kwargs)
return spack.store.find(query, **kwargs)
def ensure_singleton_created() -> None:

View File

@@ -17,6 +17,7 @@
import multiprocessing
import pickle
import pydoc
import sys
from types import ModuleType
import spack.config
@@ -26,6 +27,9 @@
import spack.repo
import spack.store
_SERIALIZE = sys.platform == "win32" or (sys.version_info >= (3, 8) and sys.platform == "darwin")
patches = None
@@ -52,7 +56,7 @@ def _restore_and_run(self, fn, test_state):
fn()
def create(self):
test_state = GlobalStateMarshaler()
test_state = TestState()
return multiprocessing.Process(target=self._restore_and_run, args=(self.fn, test_state))
@@ -61,56 +65,49 @@ class PackageInstallContext:
needs to be transmitted to a child process.
"""
def __init__(self, pkg, *, ctx=None):
ctx = ctx or multiprocessing.get_context()
self.serialize = ctx.get_start_method() != "fork"
if self.serialize:
def __init__(self, pkg):
if _SERIALIZE:
self.serialized_pkg = serialize(pkg)
self.global_state = GlobalStateMarshaler()
self.serialized_env = serialize(spack.environment.active_environment())
else:
self.pkg = pkg
self.global_state = None
self.env = spack.environment.active_environment()
self.spack_working_dir = spack.paths.spack_working_dir
self.test_state = TestState()
def restore(self):
self.test_state.restore()
spack.paths.spack_working_dir = self.spack_working_dir
env = pickle.load(self.serialized_env) if self.serialize else self.env
# Activating the environment modifies the global configuration, so globals have to
# be restored afterward, in case other modifications were applied on top (e.g. from
# command line)
env = pickle.load(self.serialized_env) if _SERIALIZE else self.env
if env:
spack.environment.activate(env)
if self.serialize:
self.global_state.restore()
# Order of operation is important, since the package might be retrieved
# from a repo defined within the environment configuration
pkg = pickle.load(self.serialized_pkg) if self.serialize else self.pkg
pkg = pickle.load(self.serialized_pkg) if _SERIALIZE else self.pkg
return pkg
class GlobalStateMarshaler:
"""Class to serialize and restore global state for child processes.
Spack may modify state that is normally read from disk or command line in memory;
this object is responsible for properly serializing that state to be applied to a subprocess.
class TestState:
"""Spack tests may modify state that is normally read from disk in memory;
this object is responsible for properly serializing that state to be
applied to a subprocess. This isn't needed outside of a testing environment
but this logic is designed to behave the same inside or outside of tests.
"""
def __init__(self):
self.config = spack.config.CONFIG.ensure_unwrapped()
self.platform = spack.platforms.host
self.test_patches = store_patches()
self.store = spack.store.STORE
if _SERIALIZE:
self.config = spack.config.CONFIG
self.platform = spack.platforms.host
self.test_patches = store_patches()
self.store = spack.store.STORE
def restore(self):
spack.config.CONFIG = self.config
spack.repo.PATH = spack.repo.create(self.config)
spack.platforms.host = self.platform
spack.store.STORE = self.store
self.test_patches.restore()
if _SERIALIZE:
spack.config.CONFIG = self.config
spack.repo.PATH = spack.repo.create(self.config)
spack.platforms.host = self.platform
spack.store.STORE = self.store
self.test_patches.restore()
class TestPatches:

View File

@@ -1,234 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
""" Test ABI-based splicing of dependencies """
from typing import List
import pytest
import spack.config
import spack.deptypes as dt
import spack.package_base
import spack.paths
import spack.repo
import spack.solver.asp
from spack.installer import PackageInstaller
from spack.spec import Spec
class CacheManager:
def __init__(self, specs: List[str]) -> None:
self.req_specs = specs
self.concr_specs: List[Spec]
self.concr_specs = []
def __enter__(self):
self.concr_specs = [Spec(s).concretized() for s in self.req_specs]
for s in self.concr_specs:
PackageInstaller([s.package], fake=True, explicit=True).install()
def __exit__(self, exc_type, exc_val, exc_tb):
for s in self.concr_specs:
s.package.do_uninstall()
# MacOS and Windows only work if you pass this function pointer rather than a
# closure
def _mock_has_runtime_dependencies(_x):
return True
def _make_specs_non_buildable(specs: List[str]):
output_config = {}
for spec in specs:
output_config[spec] = {"buildable": False}
return output_config
@pytest.fixture
def splicing_setup(mutable_database, mock_packages, monkeypatch):
spack.config.set("concretizer:reuse", True)
monkeypatch.setattr(
spack.solver.asp, "_has_runtime_dependencies", _mock_has_runtime_dependencies
)
def _enable_splicing():
spack.config.set("concretizer:splice", {"automatic": True})
def _has_build_dependency(spec: Spec, name: str):
return any(s.name == name for s in spec.dependencies(None, dt.BUILD))
def test_simple_reuse(splicing_setup):
with CacheManager(["splice-z@1.0.0+compat"]):
spack.config.set("packages", _make_specs_non_buildable(["splice-z"]))
assert Spec("splice-z").concretized().satisfies(Spec("splice-z"))
def test_simple_dep_reuse(splicing_setup):
with CacheManager(["splice-z@1.0.0+compat"]):
spack.config.set("packages", _make_specs_non_buildable(["splice-z"]))
assert Spec("splice-h@1").concretized().satisfies(Spec("splice-h@1"))
def test_splice_installed_hash(splicing_setup):
cache = [
"splice-t@1 ^splice-h@1.0.0+compat ^splice-z@1.0.0",
"splice-h@1.0.2+compat ^splice-z@1.0.0",
]
with CacheManager(cache):
packages_config = _make_specs_non_buildable(["splice-t", "splice-h"])
spack.config.set("packages", packages_config)
goal_spec = Spec("splice-t@1 ^splice-h@1.0.2+compat ^splice-z@1.0.0")
with pytest.raises(Exception):
goal_spec.concretized()
_enable_splicing()
assert goal_spec.concretized().satisfies(goal_spec)
def test_splice_build_splice_node(splicing_setup):
with CacheManager(["splice-t@1 ^splice-h@1.0.0+compat ^splice-z@1.0.0+compat"]):
spack.config.set("packages", _make_specs_non_buildable(["splice-t"]))
goal_spec = Spec("splice-t@1 ^splice-h@1.0.2+compat ^splice-z@1.0.0+compat")
with pytest.raises(Exception):
goal_spec.concretized()
_enable_splicing()
assert goal_spec.concretized().satisfies(goal_spec)
def test_double_splice(splicing_setup):
cache = [
"splice-t@1 ^splice-h@1.0.0+compat ^splice-z@1.0.0+compat",
"splice-h@1.0.2+compat ^splice-z@1.0.1+compat",
"splice-z@1.0.2+compat",
]
with CacheManager(cache):
freeze_builds_config = _make_specs_non_buildable(["splice-t", "splice-h", "splice-z"])
spack.config.set("packages", freeze_builds_config)
goal_spec = Spec("splice-t@1 ^splice-h@1.0.2+compat ^splice-z@1.0.2+compat")
with pytest.raises(Exception):
goal_spec.concretized()
_enable_splicing()
assert goal_spec.concretized().satisfies(goal_spec)
# The next two tests are mirrors of one another
def test_virtual_multi_splices_in(splicing_setup):
cache = [
"depends-on-virtual-with-abi ^virtual-abi-1",
"depends-on-virtual-with-abi ^virtual-abi-2",
]
goal_specs = [
"depends-on-virtual-with-abi ^virtual-abi-multi abi=one",
"depends-on-virtual-with-abi ^virtual-abi-multi abi=two",
]
with CacheManager(cache):
spack.config.set("packages", _make_specs_non_buildable(["depends-on-virtual-with-abi"]))
for gs in goal_specs:
with pytest.raises(Exception):
Spec(gs).concretized()
_enable_splicing()
for gs in goal_specs:
assert Spec(gs).concretized().satisfies(gs)
def test_virtual_multi_can_be_spliced(splicing_setup):
cache = [
"depends-on-virtual-with-abi ^virtual-abi-multi abi=one",
"depends-on-virtual-with-abi ^virtual-abi-multi abi=two",
]
goal_specs = [
"depends-on-virtual-with-abi ^virtual-abi-1",
"depends-on-virtual-with-abi ^virtual-abi-2",
]
with CacheManager(cache):
spack.config.set("packages", _make_specs_non_buildable(["depends-on-virtual-with-abi"]))
with pytest.raises(Exception):
for gs in goal_specs:
Spec(gs).concretized()
_enable_splicing()
for gs in goal_specs:
assert Spec(gs).concretized().satisfies(gs)
def test_manyvariant_star_matching_variant_splice(splicing_setup):
cache = [
# can_splice("manyvariants@1.0.0", when="@1.0.1", match_variants="*")
"depends-on-manyvariants ^manyvariants@1.0.0+a+b c=v1 d=v2",
"depends-on-manyvariants ^manyvariants@1.0.0~a~b c=v3 d=v3",
]
goal_specs = [
Spec("depends-on-manyvariants ^manyvariants@1.0.1+a+b c=v1 d=v2"),
Spec("depends-on-manyvariants ^manyvariants@1.0.1~a~b c=v3 d=v3"),
]
with CacheManager(cache):
freeze_build_config = {"depends-on-manyvariants": {"buildable": False}}
spack.config.set("packages", freeze_build_config)
for goal in goal_specs:
with pytest.raises(Exception):
goal.concretized()
_enable_splicing()
for goal in goal_specs:
assert goal.concretized().satisfies(goal)
def test_manyvariant_limited_matching(splicing_setup):
cache = [
# can_splice("manyvariants@2.0.0+a~b", when="@2.0.1~a+b", match_variants=["c", "d"])
"depends-on-manyvariants@2.0 ^manyvariants@2.0.0+a~b c=v3 d=v2",
# can_splice("manyvariants@2.0.0 c=v1 d=v1", when="@2.0.1+a+b")
"depends-on-manyvariants@2.0 ^manyvariants@2.0.0~a~b c=v1 d=v1",
]
goal_specs = [
Spec("depends-on-manyvariants@2.0 ^manyvariants@2.0.1~a+b c=v3 d=v2"),
Spec("depends-on-manyvariants@2.0 ^manyvariants@2.0.1+a+b c=v3 d=v3"),
]
with CacheManager(cache):
freeze_build_config = {"depends-on-manyvariants": {"buildable": False}}
spack.config.set("packages", freeze_build_config)
for s in goal_specs:
with pytest.raises(Exception):
s.concretized()
_enable_splicing()
for s in goal_specs:
assert s.concretized().satisfies(s)
def test_external_splice_same_name(splicing_setup):
cache = [
"splice-h@1.0.0 ^splice-z@1.0.0+compat",
"splice-t@1.0 ^splice-h@1.0.1 ^splice-z@1.0.1+compat",
]
packages_yaml = {
"splice-z": {"externals": [{"spec": "splice-z@1.0.2+compat", "prefix": "/usr"}]}
}
goal_specs = [
Spec("splice-h@1.0.0 ^splice-z@1.0.2"),
Spec("splice-t@1.0 ^splice-h@1.0.1 ^splice-z@1.0.2"),
]
with CacheManager(cache):
spack.config.set("packages", packages_yaml)
_enable_splicing()
for s in goal_specs:
assert s.concretized().satisfies(s)
def test_spliced_build_deps_only_in_build_spec(splicing_setup):
cache = ["splice-t@1.0 ^splice-h@1.0.1 ^splice-z@1.0.0"]
goal_spec = Spec("splice-t@1.0 ^splice-h@1.0.2 ^splice-z@1.0.0")
with CacheManager(cache):
_enable_splicing()
concr_goal = goal_spec.concretized()
build_spec = concr_goal._build_spec
# Spec has been spliced
assert build_spec is not None
# Build spec has spliced build dependencies
assert _has_build_dependency(build_spec, "splice-h")
assert _has_build_dependency(build_spec, "splice-z")
# Spliced build dependencies are removed
assert len(concr_goal.dependencies(None, dt.BUILD)) == 0

View File

@@ -15,8 +15,6 @@
from llnl.util.filesystem import HeaderList, LibraryList
import spack.build_environment
import spack.compiler
import spack.compilers
import spack.config
import spack.deptypes as dt
import spack.package_base

View File

@@ -199,7 +199,7 @@ def check_args(cc, args, expected):
"""
with set_env(SPACK_TEST_COMMAND="dump-args"):
cc_modified_args = cc(*args, output=str).strip().split("\n")
assert cc_modified_args == expected
assert expected == cc_modified_args
def check_args_contents(cc, args, must_contain, must_not_contain):
@@ -272,43 +272,6 @@ def test_ld_mode(wrapper_environment):
assert dump_mode(ld, ["foo.o", "bar.o", "baz.o", "-o", "foo", "-Wl,-rpath,foo"]) == "ld"
def test_ld_unterminated_rpath(wrapper_environment):
check_args(
ld,
["foo.o", "bar.o", "baz.o", "-o", "foo", "-rpath"],
["ld", "--disable-new-dtags", "foo.o", "bar.o", "baz.o", "-o", "foo", "-rpath"],
)
def test_xlinker_unterminated_rpath(wrapper_environment):
check_args(
cc,
["foo.o", "bar.o", "baz.o", "-o", "foo", "-Xlinker", "-rpath"],
[real_cc]
+ target_args
+ [
"-Wl,--disable-new-dtags",
"foo.o",
"bar.o",
"baz.o",
"-o",
"foo",
"-Xlinker",
"-rpath",
],
)
def test_wl_unterminated_rpath(wrapper_environment):
check_args(
cc,
["foo.o", "bar.o", "baz.o", "-o", "foo", "-Wl,-rpath"],
[real_cc]
+ target_args
+ ["-Wl,--disable-new-dtags", "foo.o", "bar.o", "baz.o", "-o", "foo", "-Wl,-rpath"],
)
def test_ld_flags(wrapper_environment, wrapper_flags):
check_args(
ld,

View File

@@ -170,7 +170,7 @@ def test_remove_and_add_a_source(mutable_config):
assert not sources
# Add it back and check we restored the initial state
_bootstrap("add", "github-actions", "$spack/share/spack/bootstrap/github-actions-v0.6")
_bootstrap("add", "github-actions", "$spack/share/spack/bootstrap/github-actions-v0.5")
sources = spack.bootstrap.core.bootstrapping_sources()
assert len(sources) == 1

View File

@@ -2,6 +2,7 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import filecmp
import json
import os
import pathlib
@@ -26,6 +27,7 @@
import spack.util.spack_yaml as syaml
from spack.cmd.ci import FAILED_CREATE_BUILDCACHE_CODE
from spack.schema.buildcache_spec import schema as specfile_schema
from spack.schema.ci import schema as ci_schema
from spack.schema.database_index import schema as db_idx_schema
from spack.spec import Spec
@@ -195,7 +197,7 @@ def test_ci_generate_with_env(ci_generate_test, tmp_path, mock_binary_index):
- matrix:
- [$old-gcc-pkgs]
mirrors:
buildcache-destination: {mirror_url}
some-mirror: {mirror_url}
ci:
pipeline-gen:
- submapping:
@@ -237,9 +239,7 @@ def test_ci_generate_with_env(ci_generate_test, tmp_path, mock_binary_index):
assert "rebuild-index" in yaml_contents
rebuild_job = yaml_contents["rebuild-index"]
assert (
rebuild_job["script"][0] == f"spack buildcache update-index --keys {mirror_url.as_uri()}"
)
assert rebuild_job["script"][0] == f"spack buildcache update-index --keys {mirror_url}"
assert rebuild_job["custom_attribute"] == "custom!"
assert "variables" in yaml_contents
@@ -249,28 +249,31 @@ def test_ci_generate_with_env(ci_generate_test, tmp_path, mock_binary_index):
def test_ci_generate_with_env_missing_section(ci_generate_test, tmp_path, mock_binary_index):
"""Make sure we get a reasonable message if we omit gitlab-ci section"""
env_yaml = f"""\
_, _, output = ci_generate_test(
f"""\
spack:
specs:
- archive-files
mirrors:
buildcache-destination: {tmp_path / 'ci-mirror'}
"""
expect = "Environment does not have a `ci` configuration"
with pytest.raises(ci.SpackCIError, match=expect):
ci_generate_test(env_yaml)
some-mirror: {tmp_path / 'ci-mirror'}
""",
fail_on_error=False,
)
assert "Environment does not have `ci` a configuration" in output
def test_ci_generate_with_cdash_token(ci_generate_test, tmp_path, mock_binary_index, monkeypatch):
"""Make sure we it doesn't break if we configure cdash"""
monkeypatch.setenv("SPACK_CDASH_AUTH_TOKEN", "notreallyatokenbutshouldnotmatter")
backup_file = tmp_path / "backup-ci.yml"
spack_yaml_content = f"""\
spack:
specs:
- archive-files
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
some-mirror: {tmp_path / "ci-mirror"}
ci:
enable-artifacts-buildcache: True
pipeline-gen:
- submapping:
- match:
@@ -285,15 +288,16 @@ def test_ci_generate_with_cdash_token(ci_generate_test, tmp_path, mock_binary_in
project: Not used
site: Nothing
"""
spack_yaml, original_file, output = ci_generate_test(spack_yaml_content)
yaml_contents = syaml.load(original_file.read_text())
spack_yaml, original_file, output = ci_generate_test(
spack_yaml_content, "--copy-to", str(backup_file)
)
# That fake token should have resulted in being unable to
# That fake token should still have resulted in being unable to
# register build group with cdash, but the workload should
# still have been generated.
assert "Problem populating buildgroup" in output
expected_keys = ["rebuild-index", "stages", "variables", "workflow"]
assert all([key in yaml_contents.keys() for key in expected_keys])
assert backup_file.exists()
assert filecmp.cmp(str(original_file), str(backup_file))
def test_ci_generate_with_custom_settings(
@@ -308,7 +312,7 @@ def test_ci_generate_with_custom_settings(
specs:
- archive-files
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
some-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- submapping:
@@ -383,8 +387,9 @@ def test_ci_generate_pkg_with_deps(ci_generate_test, tmp_path, ci_base_environme
specs:
- flatten-deps
mirrors:
buildcache-destination: {tmp_path / 'ci-mirror'}
some-mirror: {tmp_path / 'ci-mirror'}
ci:
enable-artifacts-buildcache: True
pipeline-gen:
- submapping:
- match:
@@ -417,8 +422,13 @@ def test_ci_generate_pkg_with_deps(ci_generate_test, tmp_path, ci_base_environme
def test_ci_generate_for_pr_pipeline(ci_generate_test, tmp_path, monkeypatch):
"""Test generation of a PR pipeline with disabled rebuild-index"""
"""Test that PR pipelines do not include a final stage job for
rebuilding the mirror index, even if that job is specifically
configured.
"""
monkeypatch.setenv("SPACK_PIPELINE_TYPE", "spack_pull_request")
monkeypatch.setenv("SPACK_PR_BRANCH", "fake-test-branch")
monkeypatch.setattr(spack.ci, "SHARED_PR_MIRROR_URL", f"{tmp_path / 'shared-pr-mirror'}")
spack_yaml, outputfile, _ = ci_generate_test(
f"""\
@@ -426,8 +436,9 @@ def test_ci_generate_for_pr_pipeline(ci_generate_test, tmp_path, monkeypatch):
specs:
- flatten-deps
mirrors:
buildcache-destination: {tmp_path / 'ci-mirror'}
some-mirror: {tmp_path / 'ci-mirror'}
ci:
enable-artifacts-buildcache: True
pipeline-gen:
- submapping:
- match:
@@ -463,7 +474,7 @@ def test_ci_generate_with_external_pkg(ci_generate_test, tmp_path, monkeypatch):
- archive-files
- externaltest
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
some-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- submapping:
@@ -529,6 +540,7 @@ def create_rebuild_env(
broken_specs_path = scratch / "naughty-list"
mirror_url = mirror_dir.as_uri()
temp_storage_url = (tmp_path / "temp-storage").as_uri()
ci_job_url = "https://some.domain/group/project/-/jobs/42"
ci_pipeline_url = "https://some.domain/group/project/-/pipelines/7"
@@ -543,10 +555,11 @@ def create_rebuild_env(
specs:
- $packages
mirrors:
buildcache-destination: {mirror_dir}
test-mirror: {mirror_dir}
ci:
broken-specs-url: {broken_specs_path.as_uri()}
broken-tests-packages: {json.dumps([pkg_name] if broken_tests else [])}
temporary-storage-url-prefix: {temp_storage_url}
pipeline-gen:
- submapping:
- match:
@@ -698,7 +711,7 @@ def test_ci_require_signing(
specs:
- archive-files
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
test-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- submapping:
@@ -746,8 +759,9 @@ def test_ci_nothing_to_rebuild(
specs:
- $packages
mirrors:
buildcache-destination: {mirror_url}
test-mirror: {mirror_url}
ci:
enable-artifacts-buildcache: true
pipeline-gen:
- submapping:
- match:
@@ -774,20 +788,103 @@ def test_ci_nothing_to_rebuild(
"SPACK_JOB_LOG_DIR": "log_dir",
"SPACK_JOB_REPRO_DIR": "repro_dir",
"SPACK_JOB_TEST_DIR": "test_dir",
"SPACK_LOCAL_MIRROR_DIR": str(mirror_dir),
"SPACK_CONCRETE_ENV_DIR": str(tmp_path),
"SPACK_JOB_SPEC_DAG_HASH": env.concrete_roots()[0].dag_hash(),
"SPACK_JOB_SPEC_PKG_NAME": "archive-files",
"SPACK_COMPILER_ACTION": "NONE",
"SPACK_REMOTE_MIRROR_URL": mirror_url,
}
)
def fake_dl_method(spec, *args, **kwargs):
print("fake download buildcache {0}".format(spec.name))
monkeypatch.setattr(spack.binary_distribution, "download_single_spec", fake_dl_method)
ci_out = ci_cmd("rebuild", output=str)
assert "No need to rebuild archive-files" in ci_out
assert "fake download buildcache archive-files" in ci_out
env_cmd("deactivate")
def test_ci_generate_mirror_override(
tmp_path: pathlib.Path,
mutable_mock_env_path,
install_mockery,
mock_fetch,
mock_binary_index,
ci_base_environment,
):
"""Ensure that protected pipelines using --buildcache-destination do not
skip building specs that are not in the override mirror when they are
found in the main mirror."""
os.environ.update({"SPACK_PIPELINE_TYPE": "spack_protected_branch"})
mirror_url = (tmp_path / "mirror").as_uri()
with open(tmp_path / "spack.yaml", "w") as f:
f.write(
f"""
spack:
definitions:
- packages: [patchelf]
specs:
- $packages
mirrors:
test-mirror: {mirror_url}
ci:
pipeline-gen:
- submapping:
- match:
- patchelf
build-job:
tags:
- donotcare
image: donotcare
- cleanup-job:
tags:
- nonbuildtag
image: basicimage
"""
)
with working_dir(tmp_path):
env_cmd("create", "test", "./spack.yaml")
first_ci_yaml = str(tmp_path / ".gitlab-ci-1.yml")
second_ci_yaml = str(tmp_path / ".gitlab-ci-2.yml")
with ev.read("test"):
install_cmd()
buildcache_cmd("push", "-u", mirror_url, "patchelf")
buildcache_cmd("update-index", mirror_url, output=str)
# This generate should not trigger a rebuild of patchelf, since it's in
# the main mirror referenced in the environment.
ci_cmd("generate", "--check-index-only", "--output-file", first_ci_yaml)
# Because we used a mirror override (--buildcache-destination) on a
# spack protected pipeline, we expect to only look in the override
# mirror for the spec, and thus the patchelf job should be generated in
# this pipeline
ci_cmd(
"generate",
"--check-index-only",
"--output-file",
second_ci_yaml,
"--buildcache-destination",
(tmp_path / "does-not-exist").as_uri(),
)
with open(first_ci_yaml) as fd1:
first_yaml = fd1.read()
assert "no-specs-to-rebuild" in first_yaml
with open(second_ci_yaml) as fd2:
second_yaml = fd2.read()
assert "no-specs-to-rebuild" not in second_yaml
@pytest.mark.disable_clean_stage_check
def test_push_to_build_cache(
tmp_path: pathlib.Path,
@@ -814,8 +911,9 @@ def test_push_to_build_cache(
specs:
- $packages
mirrors:
buildcache-destination: {mirror_url}
test-mirror: {mirror_url}
ci:
enable-artifacts-buildcache: True
pipeline-gen:
- submapping:
- match:
@@ -951,7 +1049,7 @@ def test_ci_generate_override_runner_attrs(
- flatten-deps
- pkg-a
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
some-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- match_behavior: {match_behavior}
@@ -1091,7 +1189,7 @@ def test_ci_rebuild_index(
specs:
- callpath
mirrors:
buildcache-destination: {mirror_url}
test-mirror: {mirror_url}
ci:
pipeline-gen:
- submapping:
@@ -1147,7 +1245,7 @@ def fake_stack_changed(env_path, rev1="HEAD^", rev2="HEAD"):
- archive-files
- callpath
mirrors:
buildcache-destination: {tmp_path / 'ci-mirror'}
some-mirror: {tmp_path / 'ci-mirror'}
ci:
pipeline-gen:
- build-job:
@@ -1210,15 +1308,101 @@ def test_ci_subcommands_without_mirror(
with ev.read("test"):
# Check the 'generate' subcommand
expect = "spack ci generate requires a mirror named 'buildcache-destination'"
with pytest.raises(ci.SpackCIError, match=expect):
ci_cmd("generate", "--output-file", str(tmp_path / ".gitlab-ci.yml"))
output = ci_cmd(
"generate",
"--output-file",
str(tmp_path / ".gitlab-ci.yml"),
output=str,
fail_on_error=False,
)
assert "spack ci generate requires an env containing a mirror" in output
# Also check the 'rebuild-index' subcommand
output = ci_cmd("rebuild-index", output=str, fail_on_error=False)
assert "spack ci rebuild-index requires an env containing a mirror" in output
def test_ensure_only_one_temporary_storage():
"""Make sure 'gitlab-ci' section of env does not allow specification of
both 'enable-artifacts-buildcache' and 'temporary-storage-url-prefix'."""
gitlab_ci_template = """
ci:
{0}
pipeline-gen:
- submapping:
- match:
- notcheckedhere
build-job:
tags:
- donotcare
"""
enable_artifacts = "enable-artifacts-buildcache: True"
temp_storage = "temporary-storage-url-prefix: file:///temp/mirror"
specify_both = f"{enable_artifacts}\n {temp_storage}"
specify_neither = ""
# User can specify "enable-artifacts-buildcache" (boolean)
yaml_obj = syaml.load(gitlab_ci_template.format(enable_artifacts))
jsonschema.validate(yaml_obj, ci_schema)
# User can also specify "temporary-storage-url-prefix" (string)
yaml_obj = syaml.load(gitlab_ci_template.format(temp_storage))
jsonschema.validate(yaml_obj, ci_schema)
# However, specifying both should fail to validate
yaml_obj = syaml.load(gitlab_ci_template.format(specify_both))
with pytest.raises(jsonschema.ValidationError):
jsonschema.validate(yaml_obj, ci_schema)
# Specifying neither should be fine too, as neither of these properties
# should be required
yaml_obj = syaml.load(gitlab_ci_template.format(specify_neither))
jsonschema.validate(yaml_obj, ci_schema)
def test_ci_generate_temp_storage_url(ci_generate_test, tmp_path, mock_binary_index):
"""Verify correct behavior when using temporary-storage-url-prefix"""
_, outputfile, _ = ci_generate_test(
f"""\
spack:
specs:
- archive-files
mirrors:
some-mirror: {(tmp_path / "ci-mirror").as_uri()}
ci:
temporary-storage-url-prefix: {(tmp_path / "temp-mirror").as_uri()}
pipeline-gen:
- submapping:
- match:
- archive-files
build-job:
tags:
- donotcare
image: donotcare
- cleanup-job:
custom_attribute: custom!
"""
)
yaml_contents = syaml.load(outputfile.read_text())
assert "cleanup" in yaml_contents
cleanup_job = yaml_contents["cleanup"]
assert cleanup_job["custom_attribute"] == "custom!"
assert "script" in cleanup_job
cleanup_task = cleanup_job["script"][0]
assert cleanup_task.startswith("spack -d mirror destroy")
assert "stages" in yaml_contents
stages = yaml_contents["stages"]
# Cleanup job should be 2nd to last, just before rebuild-index
assert "stage" in cleanup_job
assert cleanup_job["stage"] == stages[-2]
def test_ci_generate_read_broken_specs_url(
tmp_path: pathlib.Path,
mutable_mock_env_path,
@@ -1255,7 +1439,7 @@ def test_ci_generate_read_broken_specs_url(
- flatten-deps
- pkg-a
mirrors:
buildcache-destination: {(tmp_path / "ci-mirror").as_uri()}
some-mirror: {(tmp_path / "ci-mirror").as_uri()}
ci:
broken-specs-url: "{broken_specs_url}"
pipeline-gen:
@@ -1300,8 +1484,9 @@ def test_ci_generate_external_signing_job(ci_generate_test, tmp_path, monkeypatc
specs:
- archive-files
mirrors:
buildcache-destination: {(tmp_path / "ci-mirror").as_uri()}
some-mirror: {(tmp_path / "ci-mirror").as_uri()}
ci:
temporary-storage-url-prefix: {(tmp_path / "temp-mirror").as_uri()}
pipeline-gen:
- submapping:
- match:
@@ -1356,7 +1541,7 @@ def test_ci_reproduce(
specs:
- $packages
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
test-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- submapping:
@@ -1487,6 +1672,106 @@ def test_cmd_first_line():
assert spack.cmd.first_line(doc) == first
legacy_spack_yaml_contents = """
spack:
definitions:
- old-gcc-pkgs:
- archive-files
- callpath
# specify ^openblas-with-lapack to ensure that builtin.mock repo flake8
# package (which can also provide lapack) is not chosen, as it violates
# a package-level check which requires exactly one fetch strategy (this
# is apparently not an issue for other tests that use it).
- hypre@0.2.15 ^openblas-with-lapack
specs:
- matrix:
- [$old-gcc-pkgs]
mirrors:
test-mirror: {mirror_url}
{key}:
match_behavior: first
mappings:
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- donotcare
image: donotcare
- match:
- arch=test-debian6-m1
runner-attributes:
tags:
- donotcare
image: donotcare
service-job-attributes:
image: donotcare
tags: [donotcare]
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
"""
@pytest.mark.regression("36409")
def test_gitlab_ci_deprecated(
tmp_path: pathlib.Path,
mutable_mock_env_path,
install_mockery,
monkeypatch,
ci_base_environment,
mock_binary_index,
):
mirror_url = (tmp_path / "ci-mirror").as_uri()
with open(tmp_path / "spack.yaml", "w") as f:
f.write(legacy_spack_yaml_contents.format(mirror_url=mirror_url, key="gitlab-ci"))
with working_dir(tmp_path):
with ev.Environment("."):
ci_cmd("generate", "--output-file", "generated-pipeline.yaml")
with open("generated-pipeline.yaml") as f:
yaml_contents = syaml.load(f)
assert "stages" in yaml_contents
assert len(yaml_contents["stages"]) == 5
assert yaml_contents["stages"][0] == "stage-0"
assert yaml_contents["stages"][4] == "stage-rebuild-index"
assert "rebuild-index" in yaml_contents
rebuild_job = yaml_contents["rebuild-index"]
expected = f"spack buildcache update-index --keys {mirror_url}"
assert rebuild_job["script"][0] == expected
assert "variables" in yaml_contents
assert "SPACK_ARTIFACTS_ROOT" in yaml_contents["variables"]
artifacts_root = yaml_contents["variables"]["SPACK_ARTIFACTS_ROOT"]
assert artifacts_root == "jobs_scratch_dir"
@pytest.mark.regression("36045")
def test_gitlab_ci_update(
tmp_path: pathlib.Path,
mutable_mock_env_path,
install_mockery,
monkeypatch,
ci_base_environment,
mock_binary_index,
):
with open(tmp_path / "spack.yaml", "w") as f:
f.write(
legacy_spack_yaml_contents.format(mirror_url=(tmp_path / "mirror").as_uri(), key="ci")
)
env_cmd("update", "-y", str(tmp_path))
with open(tmp_path / "spack.yaml") as f:
yaml_contents = syaml.load(f)
ci_root = yaml_contents["spack"]["ci"]
assert "pipeline-gen" in ci_root
def test_gitlab_config_scopes(ci_generate_test, tmp_path):
"""Test pipeline generation with real configs included"""
configs_path = os.path.join(spack_paths.share_path, "gitlab", "cloud_pipelines", "configs")
@@ -1500,7 +1785,7 @@ def test_gitlab_config_scopes(ci_generate_test, tmp_path):
specs:
- flatten-deps
mirrors:
buildcache-destination: {tmp_path / "ci-mirror"}
some-mirror: {tmp_path / "ci-mirror"}
ci:
pipeline-gen:
- build-job:
@@ -1573,7 +1858,7 @@ def dynamic_mapping_setup(tmpdir):
specs:
- pkg-a
mirrors:
buildcache-destination: https://my.fake.mirror
some-mirror: https://my.fake.mirror
ci:
pipeline-gen:
- dynamic-mapping:

View File

@@ -9,7 +9,6 @@
import pathlib
import shutil
from argparse import Namespace
from typing import Any, Dict, Optional
import pytest
@@ -75,7 +74,7 @@ def setup_combined_multiple_env():
env("create", "test1")
test1 = ev.read("test1")
with test1:
add("mpich@1.0")
add("zlib")
test1.concretize()
test1.write()
@@ -117,99 +116,6 @@ def check_viewdir_removal(viewdir):
) == ["projections.yaml"]
def test_env_track_nonexistant_path_fails(capfd):
with pytest.raises(spack.main.SpackCommandError):
env("track", "path/does/not/exist")
out, _ = capfd.readouterr()
assert "doesn't contain an environment" in out
def test_env_track_existing_env_fails(capfd):
env("create", "track_test")
with pytest.raises(spack.main.SpackCommandError):
env("track", "--name", "track_test", ev.environment_dir_from_name("track_test"))
out, _ = capfd.readouterr()
assert "environment named track_test already exists" in out
def test_env_track_valid(tmp_path):
with fs.working_dir(str(tmp_path)):
# create an independent environment
env("create", "-d", ".")
# test tracking an environment in known store
env("track", "--name", "test1", ".")
# test removing environment to ensure independent isn't deleted
env("rm", "-y", "test1")
assert os.path.isfile("spack.yaml")
def test_env_untrack_valid(tmp_path):
with fs.working_dir(str(tmp_path)):
# create an independent environment
env("create", "-d", ".")
# test tracking an environment in known store
env("track", "--name", "test_untrack", ".")
env("untrack", "--yes-to-all", "test_untrack")
# check that environment was sucessfully untracked
out = env("ls")
assert "test_untrack" not in out
def test_env_untrack_invalid_name():
# test untracking an environment that doesn't exist
env_name = "invalid_enviornment_untrack"
out = env("untrack", env_name)
assert f"Environment '{env_name}' does not exist" in out
def test_env_untrack_when_active(tmp_path, capfd):
env_name = "test_untrack_active"
with fs.working_dir(str(tmp_path)):
# create an independent environment
env("create", "-d", ".")
# test tracking an environment in known store
env("track", "--name", env_name, ".")
active_env = ev.read(env_name)
with active_env:
with pytest.raises(spack.main.SpackCommandError):
env("untrack", "--yes-to-all", env_name)
# check that environment could not be untracked while active
out, _ = capfd.readouterr()
assert f"'{env_name}' can't be untracked while activated" in out
env("untrack", "-f", env_name)
out = env("ls")
assert env_name not in out
def test_env_untrack_managed(tmp_path, capfd):
env_name = "test_untrack_managed"
# create an managed environment
env("create", env_name)
with pytest.raises(spack.main.SpackCommandError):
env("untrack", env_name)
# check that environment could not be untracked while active
out, _ = capfd.readouterr()
assert f"'{env_name}' is not a tracked env" in out
def test_add():
e = ev.create("test")
e.add("mpileaks")
@@ -221,7 +127,6 @@ def test_change_match_spec():
e = ev.read("test")
with e:
add("mpileaks@2.1")
add("mpileaks@2.2")
@@ -496,17 +401,14 @@ def test_env_install_single_spec(install_mockery, mock_fetch):
@pytest.mark.parametrize("unify", [True, False, "when_possible"])
def test_env_install_include_concrete_env(unify, install_mockery, mock_fetch, mutable_config):
def test_env_install_include_concrete_env(unify, install_mockery, mock_fetch):
test1, test2, combined = setup_combined_multiple_env()
combined.unify = unify
if not unify:
combined.manifest.set_default_view(False)
combined.add("mpileaks")
combined.concretize()
combined.write()
combined.unify = unify
with combined:
install()
@@ -520,14 +422,6 @@ def test_env_install_include_concrete_env(unify, install_mockery, mock_fetch, mu
assert test1_roots == combined_included_roots[test1.path]
assert test2_roots == combined_included_roots[test2.path]
mpileaks = combined.specs_by_hash[combined.concretized_order[0]]
if unify:
assert mpileaks["mpi"].dag_hash() in test1_roots
assert mpileaks["libelf"].dag_hash() in test2_roots
else:
# check that unification is not by accident
assert mpileaks["mpi"].dag_hash() not in test1_roots
def test_env_roots_marked_explicit(install_mockery, mock_fetch):
install = SpackCommand("install")
@@ -782,7 +676,7 @@ def test_force_remove_included_env():
rm_output = env("remove", "-f", "-y", "test")
list_output = env("list")
assert "'test' is used by environment 'combined_env'" in rm_output
assert '"test" is being used by environment "combined_env"' in rm_output
assert "test" not in list_output
@@ -1975,7 +1869,7 @@ def test_env_include_concrete_envs_lockfile():
def test_env_include_concrete_add_env():
test1, test2, combined = setup_combined_multiple_env()
# create new env & concretize
# crete new env & crecretize
env("create", "new")
new_env = ev.read("new")
with new_env:
@@ -2027,116 +1921,6 @@ def test_env_include_concrete_remove_env():
assert test2.path not in lockfile_as_dict["include_concrete"].keys()
def configure_reuse(reuse_mode, combined_env) -> Optional[ev.Environment]:
override_env = None
_config: Dict[Any, Any] = {}
if reuse_mode == "true":
_config = {"concretizer": {"reuse": True}}
elif reuse_mode == "from_environment":
_config = {"concretizer": {"reuse": {"from": [{"type": "environment"}]}}}
elif reuse_mode == "from_environment_test1":
_config = {"concretizer": {"reuse": {"from": [{"type": "environment", "path": "test1"}]}}}
elif reuse_mode == "from_environment_external_test":
# Create a new environment called external_test that enables the "debug"
# The default is "~debug"
env("create", "external_test")
override_env = ev.read("external_test")
with override_env:
add("mpich@1.0 +debug")
override_env.concretize()
override_env.write()
# Reuse from the environment that is not included.
# Specify the requirement for the debug variant. By default this would concretize to use
# mpich@3.0 but with include concrete the mpich@1.0 +debug version from the
# "external_test" environment will be used.
_config = {
"concretizer": {"reuse": {"from": [{"type": "environment", "path": "external_test"}]}},
"packages": {"mpich": {"require": ["+debug"]}},
}
elif reuse_mode == "from_environment_raise":
_config = {
"concretizer": {"reuse": {"from": [{"type": "environment", "path": "not-a-real-env"}]}}
}
# Disable unification in these tests to avoid confusing reuse due to unification using an
# include concrete spec vs reuse due to the reuse configuration
_config["concretizer"].update({"unify": False})
combined_env.manifest.configuration.update(_config)
combined_env.manifest.changed = True
combined_env.write()
return override_env
@pytest.mark.parametrize(
"reuse_mode",
[
"true",
"from_environment",
"from_environment_test1",
"from_environment_external_test",
"from_environment_raise",
],
)
def test_env_include_concrete_reuse(monkeypatch, reuse_mode):
# The mock packages do not use the gcc-runtime
def mock_has_runtime_dependencies(*args, **kwargs):
return True
monkeypatch.setattr(
spack.solver.asp, "_has_runtime_dependencies", mock_has_runtime_dependencies
)
# The default mpi version is 3.x provided by mpich in the mock repo.
# This test verifies that concretizing with an included concrete
# environment with "concretizer:reuse:true" the included
# concrete spec overrides the default with mpi@1.0.
test1, _, combined = setup_combined_multiple_env()
# Set the reuse mode for the environment
override_env = configure_reuse(reuse_mode, combined)
if override_env:
# If there is an override environment (ie. testing reuse with
# an external environment) update it here.
test1 = override_env
# Capture the test1 specs included by combined
test1_specs_by_hash = test1.specs_by_hash
try:
# Add mpileaks to the combined environment
with combined:
add("mpileaks")
combined.concretize()
comb_specs_by_hash = combined.specs_by_hash
# create reference env with mpileaks that does not use reuse
# This should concretize to the default version of mpich (3.0)
env("create", "new")
ref_env = ev.read("new")
with ref_env:
add("mpileaks")
ref_env.concretize()
ref_specs_by_hash = ref_env.specs_by_hash
# Ensure that the mpich used by the mpileaks is the mpich from the reused test environment
comb_mpileaks_spec = [s for s in comb_specs_by_hash.values() if s.name == "mpileaks"]
test1_mpich_spec = [s for s in test1_specs_by_hash.values() if s.name == "mpich"]
assert len(comb_mpileaks_spec) == 1
assert len(test1_mpich_spec) == 1
assert comb_mpileaks_spec[0]["mpich"].dag_hash() == test1_mpich_spec[0].dag_hash()
# None of the references specs (using mpich@3) reuse specs from test1.
# This tests that the reuse is not happening coincidently
assert not any([s in test1_specs_by_hash for s in ref_specs_by_hash])
# Make sure the raise tests raises
assert "raise" not in reuse_mode
except ev.SpackEnvironmentError:
assert "raise" in reuse_mode
@pytest.mark.parametrize("unify", [True, False, "when_possible"])
def test_env_include_concrete_env_reconcretized(unify):
"""Double check to make sure that concrete_specs for the local specs is empty
@@ -4333,13 +4117,13 @@ def test_spack_package_ids_variable(tmpdir, mock_packages):
# Include in Makefile and create target that depend on SPACK_PACKAGE_IDS
with open(makefile_path, "w") as f:
f.write(
"""
r"""
all: post-install
include include.mk
example/post-install/%: example/install/%
\t$(info post-install: $(HASH)) # noqa: W191,E101
$(info post-install: $(HASH)) # noqa: W191,E101
post-install: $(addprefix example/post-install/,$(example/SPACK_PACKAGE_IDS))
"""

Some files were not shown because too many files have changed in this diff Show More