Compare commits

..

14 Commits

Author SHA1 Message Date
Todd Gamblin
03cba85029 bugfix: don't include root in general depth rule 2023-03-11 16:21:53 -08:00
Todd Gamblin
2c7a2fa3e0 Don't require priority attribute on models. 2023-02-22 10:53:16 -08:00
Todd Gamblin
7e8fb4b8d0 WIP 2023-01-30 11:48:03 -08:00
Todd Gamblin
54087cf274 WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
b0e67d3411 WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
d92b653f0b WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
3435806007 WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
06d5abe895 WIP 2023-01-24 17:19:19 -08:00
Todd Gamblin
dc40e121a3 WIP 2023-01-24 17:18:50 -08:00
Todd Gamblin
03acf14d86 tests: test consistency of solver and spec_clauses
Previously we didn't check whether all facts on a specs were represented in the models
returned by the solver. This is an important check as it ensures that we can have
conditions (dependencies, conflicts, etc.) on any property modeled by spec_clauses.

This caught the issue reported in #32497 (https://github.com/spack/spack/issues/32497),
specifically that we don't currently model patch facts. We can fix this in a follow-on
but it is marked XFAIL for now.
2023-01-24 17:18:50 -08:00
Todd Gamblin
9ffb642b95 refactor: use AspFunction consistently before and after solve
Currently we create `AspFunction` objects as inputs to solves, but we don't use them
when extracting symbols from clingo solves. Use them more consistently in both
scenarios, and simplify the code.
2023-01-24 17:18:47 -08:00
Todd Gamblin
20e698c6b0 concretizer: add depth calculation 2023-01-24 17:12:10 -08:00
Todd Gamblin
c410f3f392 rename optimization criteria to be clearer about roots/non-roots 2023-01-24 17:12:10 -08:00
Todd Gamblin
b37fab40b5 solver: rework optimization criteria with larger constants
To allow room for DAG-ordered optimization, rework the way we write optimization
criteria in `concretize.lp`, and convert build offsets to use constants.
2023-01-24 17:12:10 -08:00
2625 changed files with 16107 additions and 26274 deletions

View File

@@ -1,5 +1,3 @@
# .git-blame-ignore-revs
# Formatted entire codebase with black 23
603569e321013a1a63a637813c94c2834d0a0023
# Formatted entire codebase with black 22
# Formatted entire codebase with black
f52f6e99dbf1131886a80112b8c79dfc414afb7c

1
.gitattributes vendored
View File

@@ -1,4 +1,3 @@
*.py diff=python
*.lp linguist-language=Prolog
lib/spack/external/* linguist-vendored
*.bat text eol=crlf

View File

@@ -19,7 +19,7 @@ jobs:
package-audits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
with:
python-version: ${{inputs.python_version}}

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup repo
@@ -158,7 +158,7 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -179,7 +179,7 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: Bootstrap clingo
run: |
set -ex
@@ -204,7 +204,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup repo
@@ -247,7 +247,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup non-root user
@@ -283,7 +283,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- name: Setup non-root user
@@ -316,7 +316,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -333,7 +333,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh

View File

@@ -50,7 +50,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
- name: Set Container Tag Normal (Nightly)
run: |
@@ -89,7 +89,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # @v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # @v1
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # @v1
- name: Log in to GitHub Container Registry
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # @v1
@@ -106,7 +106,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # @v2
uses: docker/build-push-action@37abcedcc1da61a57767b7588cb9d03eb57e28b3 # @v2
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -35,7 +35,7 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0

View File

@@ -47,7 +47,7 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
@@ -94,7 +94,7 @@ jobs:
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
@@ -133,7 +133,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -151,7 +151,7 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
@@ -185,7 +185,7 @@ jobs:
matrix:
python-version: ["3.10"]
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2

View File

@@ -18,7 +18,7 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
with:
python-version: '3.11'
@@ -35,7 +35,7 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # @v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # @v2
@@ -44,7 +44,7 @@ jobs:
cache: 'pip'
- name: Install Python packages
run: |
python3 -m pip install --upgrade pip six setuptools types-six black==23.1.0 mypy isort clingo flake8
python3 -m pip install --upgrade pip six setuptools types-six black mypy isort clingo flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.

View File

@@ -15,7 +15,7 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435
@@ -39,7 +39,7 @@ jobs:
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435
@@ -63,7 +63,7 @@ jobs:
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
with:
fetch-depth: 0
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435
@@ -87,7 +87,7 @@ jobs:
# git config --global core.symlinks false
# shell:
# powershell
# - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
# - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
# with:
# fetch-depth: 0
# - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435

View File

@@ -1,28 +1,3 @@
# v0.19.1 (2023-02-07)
### Spack Bugfixes
* `buildcache create`: make "file exists" less verbose (#35019)
* `spack mirror create`: don't change paths to urls (#34992)
* Improve error message for requirements (#33988)
* uninstall: fix accidental cubic complexity (#34005)
* scons: fix signature for `install_args` (#34481)
* Fix `combine_phase_logs` text encoding issues (#34657)
* Use a module-like object to propagate changes in the MRO, when setting build env (#34059)
* PackageBase should not define builder legacy attributes (#33942)
* Forward lookup of the "run_tests" attribute (#34531)
* Bugfix for timers (#33917, #33900)
* Fix path handling in prefix inspections (#35318)
* Fix libtool filter for Fujitsu compilers (#34916)
* Bug fix for duplicate rpath errors on macOS when creating build caches (#34375)
* FileCache: delete the new cache file on exception (#34623)
* Propagate exceptions from Spack python console (#34547)
* Tests: Fix a bug/typo in a `config_values.py` fixture (#33886)
* Various CI fixes (#33953, #34560, #34560, #34828)
* Docs: remove monitors and analyzers, typos (#34358, #33926)
* bump release version for tutorial command (#33859)
# v0.19.0 (2022-11-11)
`v0.19.0` is a major feature release.

View File

@@ -72,7 +72,6 @@ config:
root: $TMP_DIR/install
misc_cache: $$user_cache_path/cache
source_cache: $$user_cache_path/source
environments_root: $TMP_DIR/envs
EOF
cat >"$SPACK_USER_CONFIG_PATH/bootstrap.yaml" <<EOF
bootstrap:

View File

@@ -83,16 +83,6 @@ if defined _sp_flags (
exit /B 0
)
)
if not defined _sp_subcommand (
if not defined _sp_args (
if not defined _sp_flags (
python "%spack%" --help
exit /B 0
)
)
)
:: pass parsed variables outside of local scope. Need to do
:: this because delayedexpansion can only be set by setlocal
echo %_sp_flags%>flags
@@ -102,24 +92,24 @@ endlocal
set /p _sp_subcommand=<subcmd
set /p _sp_flags=<flags
set /p _sp_args=<args
if "%_sp_subcommand%"=="ECHO is off." (set "_sp_subcommand=")
if "%_sp_subcommand%"=="ECHO is on." (set "_sp_subcommand=")
if "%_sp_flags%"=="ECHO is off." (set "_sp_flags=")
if "%_sp_flags%"=="ECHO is on." (set "_sp_flags=")
if "%_sp_args%"=="ECHO is off." (set "_sp_args=")
if "%_sp_args%"=="ECHO is on." (set "_sp_args=")
set str_subcommand=%_sp_subcommand:"='%
set str_flags=%_sp_flags:"='%
set str_args=%_sp_args:"='%
if "%str_subcommand%"=="ECHO is off." (set "_sp_subcommand=")
if "%str_flags%"=="ECHO is off." (set "_sp_flags=")
if "%str_args%"=="ECHO is off." (set "_sp_args=")
del subcmd
del flags
del args
:: Filter out some commands. For any others, just run the command.
if %_sp_subcommand% == "cd" (
if "%_sp_subcommand%" == "cd" (
goto :case_cd
) else if %_sp_subcommand% == "env" (
) else if "%_sp_subcommand%" == "env" (
goto :case_env
) else if %_sp_subcommand% == "load" (
) else if "%_sp_subcommand%" == "load" (
goto :case_load
) else if %_sp_subcommand% == "unload" (
) else if "%_sp_subcommand%" == "unload" (
goto :case_load
) else (
goto :default_case
@@ -153,21 +143,19 @@ goto :end_switch
:: If no args or args contain --bat or -h/--help: just execute.
if NOT defined _sp_args (
goto :default_case
)
set args_no_quote=%_sp_args:"=%
if NOT "%args_no_quote%"=="%args_no_quote:--help=%" (
)else if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
) else if NOT "%args_no_quote%"=="%args_no_quote: -h=%" (
) else if NOT "%_sp_args%"=="%_sp_args: -h=%" (
goto :default_case
) else if NOT "%args_no_quote%"=="%args_no_quote:--bat=%" (
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case
) else if NOT "%args_no_quote%"=="%args_no_quote:deactivate=%" (
) else if NOT "%_sp_args%"=="%_sp_args:deactivate=%" (
for /f "tokens=* USEBACKQ" %%I in (
`call python %spack% %_sp_flags% env deactivate --bat %args_no_quote:deactivate=%`
`call python "%spack%" %_sp_flags% env deactivate --bat %_sp_args:deactivate=%`
) do %%I
) else if NOT "%args_no_quote%"=="%args_no_quote:activate=%" (
) else if NOT "%_sp_args%"=="%_sp_args:activate=%" (
for /f "tokens=* USEBACKQ" %%I in (
`python %spack% %_sp_flags% env activate --bat %args_no_quote:activate=%`
`call python "%spack%" %_sp_flags% env activate --bat %_sp_args:activate=%`
) do %%I
) else (
goto :default_case
@@ -226,10 +214,10 @@ for %%Z in ("%_pa_new_path%") do if EXIST %%~sZ\NUL (
exit /b 0
:: set module system roots
:_sp_multi_pathadd
:_sp_multi_pathadd
for %%I in (%~2) do (
for %%Z in (%_sp_compatible_sys_types%) do (
:pathadd "%~1" "%%I\%%Z"
)
)
exit /B %ERRORLEVEL%
exit /B %ERRORLEVEL%

View File

@@ -13,18 +13,16 @@ concretizer:
# Whether to consider installed packages or packages from buildcaches when
# concretizing specs. If `true`, we'll try to use as many installs/binaries
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization. If `dependencies`, we'll only reuse dependencies but
# give you a fresh concretization for your root specs.
reuse: dependencies
# concretization.
reuse: true
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets
# considered.
targets:
# Determine whether we want to target specific or generic
# microarchitectures. Valid values are: "microarchitectures" or "generic".
# An example of "microarchitectures" would be "skylake" or "bulldozer",
# while an example of "generic" would be "aarch64" or "x86_64_v4".
# Determine whether we want to target specific or generic microarchitectures.
# An example of the first kind might be for instance "skylake" or "bulldozer",
# while generic microarchitectures are for instance "aarch64" or "x86_64_v4".
granularity: microarchitectures
# If "false" allow targets that are incompatible with the current host (for
# instance concretize with target "icelake" while running on "haswell").
@@ -35,4 +33,4 @@ concretizer:
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: true
unify: true

View File

@@ -81,10 +81,6 @@ config:
source_cache: $spack/var/spack/cache
## Directory where spack managed environments are created and stored
# environments_root: $spack/var/spack/environments
# Cache directory for miscellaneous files, like the package index.
# This can be purged with `spack clean --misc-cache`
misc_cache: $user_cache_path/cache
@@ -185,7 +181,7 @@ config:
# when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should
# therefore generally be left untouched.
db_lock_timeout: 60
db_lock_timeout: 3
# How long to wait when attempting to modify a package (e.g. to install it).

View File

@@ -46,7 +46,7 @@ modules:
tcl:
all:
autoload: direct
autoload: none
# Default configurations if lmod is enabled
lmod:

View File

@@ -28,7 +28,7 @@ packages:
gl: [glx, osmesa]
glu: [mesa-glu, openglu]
golang: [go, gcc]
go-or-gccgo-bootstrap: [go-bootstrap, gcc]
go-external-or-gccgo-bootstrap: [go-bootstrap, gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]

View File

@@ -5,4 +5,3 @@ llnl*.rst
_build
.spack-env
spack.lock
_spack_root

View File

@@ -942,7 +942,7 @@ first ``libelf`` above, you would run:
$ spack load /qmm4kso
To see which packages that you have loaded to your environment you would
To see which packages that you have loaded to your enviornment you would
use ``spack find --loaded``.
.. code-block:: console

View File

@@ -18,7 +18,7 @@ your Spack mirror and then downloaded and installed by others.
Whenever a mirror provides prebuilt packages, Spack will take these packages
into account during concretization and installation, making ``spack install``
significantly faster.
signficantly faster.
.. note::

View File

@@ -28,14 +28,11 @@ This package provides the following variants:
* **cuda_arch**
This variant supports the optional specification of one or multiple architectures.
This variant supports the optional specification of the architecture.
Valid values are maintained in the ``cuda_arch_values`` property and
are the numeric character equivalent of the compute capability version
(e.g., '10' for version 1.0). Each provided value affects associated
``CUDA`` dependencies and compiler conflicts.
The variant builds both PTX code for the _virtual_ architecture
(e.g. ``compute_10``) and binary code for the _real_ architecture (e.g. ``sm_10``).
GPUs and their compute capability versions are listed at
https://developer.nvidia.com/cuda-gpus .

View File

@@ -124,7 +124,7 @@ Using oneAPI Tools Installed by Spack
=====================================
Spack can be a convenient way to install and configure compilers and
libraries, even if you do not intend to build a Spack package. If you
libaries, even if you do not intend to build a Spack package. If you
want to build a Makefile project using Spack-installed oneAPI compilers,
then use spack to configure your environment::

View File

@@ -397,7 +397,7 @@ for specifics and examples for ``packages.yaml`` files.
.. If your system administrator did not provide modules for pre-installed Intel
tools, you could do well to ask for them, because installing multiple copies
of the Intel tools, as is won't to happen once Spack is in the picture, is
of the Intel tools, as is wont to happen once Spack is in the picture, is
bound to stretch disk space and patience thin. If you *are* the system
administrator and are still new to modules, then perhaps it's best to follow
the `next section <Installing Intel tools within Spack_>`_ and install the tools
@@ -653,7 +653,7 @@ follow `the next section <intel-install-libs_>`_ instead.
* If you specified a custom variant (for example ``+vtune``) you may want to add this as your
preferred variant in the packages configuration for the ``intel-parallel-studio`` package
as described in :ref:`package-preferences`. Otherwise you will have to specify
the variant every time ``intel-parallel-studio`` is being used as ``mkl``, ``fftw`` or ``mpi``
the variant everytime ``intel-parallel-studio`` is being used as ``mkl``, ``fftw`` or ``mpi``
implementation to avoid pulling in a different variant.
* To set the Intel compilers for default use in Spack, instead of the usual ``%gcc``,

View File

@@ -366,7 +366,7 @@ If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``,
it uses the meson build system. Meson uses the default
``pyproject.toml`` keys to list dependencies.
See https://meson-python.readthedocs.io/en/latest/tutorials/introduction.html
See https://meson-python.readthedocs.io/en/latest/usage/start.html
for more information.
"""
@@ -582,7 +582,7 @@ libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed.
Instead of defining the ``import_modules`` explicitly, only the subset
Instead of defining the ``import_modules`` explicity, only the subset
of module names to be skipped can be defined by using ``skip_modules``.
If a defined module has submodules, they are skipped as well, e.g.,
in case the ``plotting`` modules should be excluded from the

View File

@@ -58,7 +58,9 @@ Testing
``WafPackage`` also provides ``test`` and ``installtest`` methods,
which are run after the ``build`` and ``install`` phases, respectively.
By default, these phases do nothing, but you can override them to
run package-specific unit tests.
run package-specific unit tests. For example, the
`py-py2cairo <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-py2cairo/package.py>`_
package uses:
.. code-block:: python

View File

@@ -89,7 +89,6 @@
# Enable todo items
todo_include_todos = True
#
# Disable duplicate cross-reference warnings.
#
@@ -354,7 +353,9 @@ class SpackStyle(DefaultStyle):
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [("index", "Spack.tex", "Spack Documentation", "Todd Gamblin", "manual")]
latex_documents = [
("index", "Spack.tex", "Spack Documentation", "Todd Gamblin", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
@@ -401,7 +402,7 @@ class SpackStyle(DefaultStyle):
"Spack",
"One line description of project.",
"Miscellaneous",
)
),
]
# Documents to append as an appendix to all manuals.
@@ -417,4 +418,6 @@ class SpackStyle(DefaultStyle):
# -- Extension configuration -------------------------------------------------
# sphinx.ext.intersphinx
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}

View File

@@ -222,7 +222,7 @@ and location. (See the *Configuration settings* section of ``man
ccache`` to learn more about the default settings and how to change
them). Please note that we currently disable ccache's ``hash_dir``
feature to avoid an issue with the stage directory (see
https://github.com/spack/spack/pull/3761#issuecomment-294352232).
https://github.com/LLNL/spack/pull/3761#issuecomment-294352232).
-----------------------
``shared_linking:type``

View File

@@ -227,9 +227,6 @@ You can get the name to use for ``<platform>`` by running ``spack arch
--platform``. The system config scope has a ``<platform>`` section for
sites at which ``/etc`` is mounted on multiple heterogeneous machines.
.. _config-scope-precedence:
----------------
Scope Precedence
----------------
@@ -242,11 +239,6 @@ lower-precedence settings. Completely ignoring higher-level configuration
options is supported with the ``::`` notation for keys (see
:ref:`config-overrides` below).
There are also special notations for string concatenation and precendense override.
Using the ``+:`` notation can be used to force *prepending* strings or lists. For lists, this is identical
to the default behavior. Using the ``-:`` works similarly, but for *appending* values.
:ref:`config-prepend-append`
^^^^^^^^^^^
Simple keys
^^^^^^^^^^^
@@ -287,47 +279,6 @@ command:
- ~/.spack/stage
.. _config-prepend-append:
^^^^^^^^^^^^^^^^^^^^
String Concatenation
^^^^^^^^^^^^^^^^^^^^
Above, the user ``config.yaml`` *completely* overrides specific settings in the
default ``config.yaml``. Sometimes, it is useful to add a suffix/prefix
to a path or name. To do this, you can use the ``-:`` notation for *append*
string concatenation at the end of a key in a configuration file. For example:
.. code-block:: yaml
:emphasize-lines: 1
:caption: ~/.spack/config.yaml
config:
install_tree-: /my/custom/suffix/
Spack will then append to the lower-precedence configuration under the
``install_tree-:`` section:
.. code-block:: console
$ spack config get config
config:
install_tree: /some/other/directory/my/custom/suffix
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
Similarly, ``+:`` can be used to *prepend* to a path or name:
.. code-block:: yaml
:emphasize-lines: 1
:caption: ~/.spack/config.yaml
config:
install_tree+: /my/custom/suffix/
.. _config-overrides:
^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -118,7 +118,7 @@ make another change, test that change, etc. We use `pytest
<http://pytest.org/>`_ as our tests framework, and these types of
arguments are just passed to the ``pytest`` command underneath. See `the
pytest docs
<https://doc.pytest.org/en/latest/how-to/usage.html#specifying-which-tests-to-run>`_
<http://doc.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests>`_
for more details on test selection syntax.
``spack unit-test`` has a few special options that can help you
@@ -147,7 +147,7 @@ you want to know about. For example, to see just the tests in
You can also combine any of these options with a ``pytest`` keyword
search. See the `pytest usage docs
<https://doc.pytest.org/en/latest/how-to/usage.html#specifying-which-tests-to-run>`_
<https://docs.pytest.org/en/stable/usage.html#specifying-tests-selecting-tests>`_:
for more details on test selection syntax. For example, to see the names of all tests that have "spec"
or "concretize" somewhere in their names:

View File

@@ -472,7 +472,7 @@ use my new hook as follows:
.. code-block:: python
def post_log_write(message, level):
"""Do something custom with the message and level every time we write
"""Do something custom with the messsage and level every time we write
to the log
"""
print('running post_log_write!')

View File

@@ -58,9 +58,9 @@ Using Environments
Here we follow a typical use case of creating, concretizing,
installing and loading an environment.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating a managed Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating a named Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An environment is created by:
@@ -72,8 +72,7 @@ Spack then creates the directory ``var/spack/environments/myenv``.
.. note::
All managed environments by default are stored in the ``var/spack/environments`` folder.
This location can be changed by setting the ``environments_root`` variable in ``config.yaml``.
All named environments are stored in the ``var/spack/environments`` folder.
In the ``var/spack/environments/myenv`` directory, Spack creates the
file ``spack.yaml`` and the hidden directory ``.spack-env``.

View File

@@ -116,7 +116,7 @@ creates a simple python file:
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers("github_user1", "github_user2")
# maintainers = ["github_user1", "github_user2"]
version("0.8.13", sha256="591a9b4ec81c1f2042a97aa60564e0cb79d041c52faa7416acb38bc95bd2c76d")

View File

@@ -21,7 +21,7 @@ be present on the machine where Spack is run:
:header-rows: 1
These requirements can be easily installed on most modern Linux systems;
on macOS, the Command Line Tools package is required, and a full XCode suite
on macOS, the Command Line Tools package is required, and a full XCode suite
may be necessary for some packages such as Qt and apple-gl. Spack is designed
to run on HPC platforms like Cray. Not all packages should be expected
to work on all platforms.
@@ -1506,7 +1506,7 @@ Spack On Windows
Windows support for Spack is currently under development. While this work is still in an early stage,
it is currently possible to set up Spack and perform a few operations on Windows. This section will guide
you through the steps needed to install Spack and start running it on a fresh Windows machine.
you through the steps needed to install Spack and start running it on a fresh Windows machine.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 1: Install prerequisites
@@ -1516,7 +1516,7 @@ To use Spack on Windows, you will need the following packages:
Required:
* Microsoft Visual Studio
* Python
* Python
* Git
Optional:
@@ -1547,8 +1547,8 @@ Intel Fortran
"""""""""""""
For Fortran-based packages on Windows, we strongly recommend Intel's oneAPI Fortran compilers.
The suite is free to download from Intel's website, located at
https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/fortran-compiler.html.
The suite is free to download from Intel's website, located at
https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/fortran-compiler.html#gs.70t5tw.
The executable of choice for Spack will be Intel's Beta Compiler, ifx, which supports the classic
compiler's (ifort's) frontend and runtime libraries by using LLVM.
@@ -1597,8 +1597,8 @@ in a Windows CMD prompt.
.. note::
If you chose to install Spack into a directory on Windows that is set up to require Administrative
Privileges, Spack will require elevated privileges to run.
Administrative Privileges can be denoted either by default such as
Privleges, Spack will require elevated privleges to run.
Administrative Privleges can be denoted either by default such as
``C:\Program Files``, or aministrator applied administrative restrictions
on a directory that spack installs files to such as ``C:\Users``
@@ -1694,7 +1694,7 @@ Spack console via:
spack install cpuinfo
If in the previous step, you did not have CMake or Ninja installed, running the command above should bootstrap both packages
If in the previous step, you did not have CMake or Ninja installed, running the command above should boostrap both packages
"""""""""""""""""""""""""""
Windows Compatible Packages

View File

@@ -13,7 +13,7 @@ The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `Lmod
<http://modules.sourceforge.net/>`_ and `LMod
<http://lmod.readthedocs.io/en/latest/>`_ by providing post-install hooks
that generate module files and commands to manipulate them.
@@ -26,8 +26,8 @@ Using module files via Spack
----------------------------
If you have installed a supported module system you should be able to
run ``module avail`` to see what module
files have been installed. Here is sample output of those programs,
run either ``module avail`` or ``use -l spack`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
.. code-block:: console
@@ -51,7 +51,12 @@ showing lots of installed packages:
help2man-1.47.4-gcc-4.8-kcnqmau lua-luaposix-33.4.0-gcc-4.8-mdod2ry netlib-scalapack-2.0.2-gcc-6.3.0-rgqfr6d py-scipy-0.19.0-gcc-6.3.0-kr7nat4 zlib-1.2.11-gcc-6.3.0-7cqp6cj
The names should look familiar, as they resemble the output from ``spack find``.
For example, you could type the following command to load the ``cmake`` module:
You *can* use the modules here directly. For example, you could type either of these commands
to load the ``cmake`` module:
.. code-block:: console
$ use cmake-3.7.2-gcc-6.3.0-fowuuby
.. code-block:: console
@@ -88,9 +93,9 @@ the different file formats that can be generated by Spack:
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
+=============================+====================+===============================+==============================================+======================+
| **Tcl - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
| **TCL - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/LMod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | LMod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
@@ -391,13 +396,13 @@ name and version for all packages that depend on mpi.
When specifying module names by projection for Lmod modules, we
recommend NOT including names of dependencies (e.g., MPI, compilers)
that are already in the Lmod hierarchy.
that are already in the LMod hierarchy.
.. note::
Tcl modules
Tcl modules also allow for explicit conflicts between modulefiles.
TCL modules
TCL modules also allow for explicit conflicts between modulefiles.
.. code-block:: yaml
@@ -421,9 +426,9 @@ that are already in the Lmod hierarchy.
.. note::
Lmod hierarchical module files
LMod hierarchical module files
When ``lmod`` is activated Spack will generate a set of hierarchical lua module
files that are understood by Lmod. The hierarchy will always contain the
files that are understood by LMod. The hierarchy will always contain the
two layers ``Core`` / ``Compiler`` but can be further extended to
any of the virtual dependencies present in Spack. A case that could be useful in
practice is for instance:
@@ -445,7 +450,7 @@ that are already in the Lmod hierarchy.
that will generate a hierarchy in which the ``lapack`` and ``mpi`` layer can be switched
independently. This allows a site to build the same libraries or applications against different
implementations of ``mpi`` and ``lapack``, and let Lmod switch safely from one to the
implementations of ``mpi`` and ``lapack``, and let LMod switch safely from one to the
other.
All packages built with a compiler in ``core_compilers`` and all
@@ -455,12 +460,12 @@ that are already in the Lmod hierarchy.
.. warning::
Consistency of Core packages
The user is responsible for maintining consistency among core packages, as ``core_specs``
bypasses the hierarchy that allows Lmod to safely switch between coherent software stacks.
bypasses the hierarchy that allows LMod to safely switch between coherent software stacks.
.. warning::
Deep hierarchies and ``lmod spider``
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the Lmod project <https://github.com/TACC/Lmod/issues/114>`_.
See `this discussion on the LMod project <https://github.com/TACC/Lmod/issues/114>`_.
""""""""""""""""""""""
Select default modules
@@ -529,7 +534,7 @@ installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
update ``MANPATH``.
The default list of environment variables in this config section
includes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH``
inludes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH``
and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH``
on macOS. On Linux however, the corresponding ``LD_LIBRARY_PATH``
variable is *not* set, because it affects the behavior of
@@ -629,9 +634,8 @@ by its dependency; when the dependency is autoloaded, the executable will be in
PATH. Similarly for scripting languages such as Python, packages and their dependencies
have to be loaded together.
Autoloading is enabled by default for Lmod and Environment Modules. The former
has builtin support for through the ``depends_on`` function. The latter uses
``module load`` statement to load and track dependencies.
Autoloading is enabled by default for LMod, as it has great builtin support for through
the ``depends_on`` function. For Environment Modules it is disabled by default.
Autoloading can also be enabled conditionally:
@@ -651,14 +655,12 @@ The allowed values for the ``autoload`` statement are either ``none``,
``direct`` or ``all``.
.. note::
Tcl prerequisites
TCL prerequisites
In the ``tcl`` section of the configuration file it is possible to use
the ``prerequisites`` directive that accepts the same values as
``autoload``. It will produce module files that have a ``prereq``
statement, which autoloads dependencies on Environment Modules when its
``auto_handling`` configuration option is enabled. If Environment Modules
is installed with Spack, ``auto_handling`` is enabled by default starting
version 4.2. Otherwise it is enabled by default since version 5.0.
statement, which can be used to autoload dependencies in some versions
of Environment Modules.
------------------------
Maintaining Module Files

File diff suppressed because it is too large Load Diff

View File

@@ -9,32 +9,27 @@
CI Pipelines
============
Spack provides commands that support generating and running automated build pipelines in CI instances. At the highest
level it works like this: provide a spack environment describing the set of packages you care about, and include a
description of how those packages should be mapped to Gitlab runners. Spack can then generate a ``.gitlab-ci.yml``
file containing job descriptions for all your packages that can be run by a properly configured CI instance. When
run, the generated pipeline will build and deploy binaries, and it can optionally report to a CDash instance
Spack provides commands that support generating and running automated build
pipelines designed for Gitlab CI. At the highest level it works like this:
provide a spack environment describing the set of packages you care about,
and include within that environment file a description of how those packages
should be mapped to Gitlab runners. Spack can then generate a ``.gitlab-ci.yml``
file containing job descriptions for all your packages that can be run by a
properly configured Gitlab CI instance. When run, the generated pipeline will
build and deploy binaries, and it can optionally report to a CDash instance
regarding the health of the builds as they evolve over time.
------------------------------
Getting started with pipelines
------------------------------
To get started with automated build pipelines a Gitlab instance with version ``>= 12.9``
(more about Gitlab CI `here <https://about.gitlab.com/product/continuous-integration/>`_)
with at least one `runner <https://docs.gitlab.com/runner/>`_ configured is required. This
can be done quickly by setting up a local Gitlab instance.
It is fairly straightforward to get started with automated build pipelines. At
a minimum, you'll need to set up a Gitlab instance (more about Gitlab CI
`here <https://about.gitlab.com/product/continuous-integration/>`_) and configure
at least one `runner <https://docs.gitlab.com/runner/>`_. Then the basic steps
for setting up a build pipeline are as follows:
It is possible to set up pipelines on gitlab.com, but the builds there are limited to
60 minutes and generic hardware. It is possible to
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
topics are outside the scope of this document.
After setting up a Gitlab instance for running CI, the basic steps for setting up a build pipeline are as follows:
#. Create a repository in the Gitlab instance with CI and a runner enabled.
#. Create a repository on your gitlab instance
#. Add a ``spack.yaml`` at the root containing your pipeline environment
#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate
the pipeline dynamically, and one to run the generated jobs).
@@ -45,6 +40,13 @@ See the :ref:`functional_example` section for a minimal working example. See al
the :ref:`custom_Workflow` section for a link to an example of a custom workflow
based on spack pipelines.
While it is possible to set up pipelines on gitlab.com, as illustrated above, the
builds there are limited to 60 minutes and generic hardware. It is also possible to
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
topics are outside the scope of this document.
Spack's pipelines are now making use of the
`trigger <https://docs.gitlab.com/ee/ci/yaml/#trigger>`_ syntax to run
dynamically generated
@@ -130,35 +132,29 @@ And here's the spack environment built by the pipeline represented as a
mirrors: { "mirror": "s3://spack-public/mirror" }
ci:
gitlab-ci:
before_script:
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
- . "./spack/share/spack/setup-env.sh"
script:
- pushd ${SPACK_CONCRETE_ENV_DIR} && spack env activate --without-view . && popd
- spack -d ci rebuild
mappings:
- match: ["os=ubuntu18.04"]
runner-attributes:
image:
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
entrypoint: [""]
tags:
- docker
enable-artifacts-buildcache: True
rebuild-index: False
pipeline-gen:
- any-job:
before_script:
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
- . "./spack/share/spack/setup-env.sh"
- build-job:
tags: [docker]
image:
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
entrypoint: [""]
The elements of this file important to spack ci pipelines are described in more
detail below, but there are a couple of things to note about the above working
example:
.. note::
There is no ``script`` attribute specified for here. The reason for this is
Spack CI will automatically generate reasonable default scripts. More
detail on what is in these scripts can be found below.
Also notice the ``before_script`` section. It is required when using any of the
default scripts to source the ``setup-env.sh`` script in order to inform
the default scripts where to find the ``spack`` executable.
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
results in large binary artifacts getting transferred back and forth between
gitlab and the runners. But in this example on gitlab.com where there is no
@@ -178,7 +174,7 @@ during subsequent pipeline runs.
With the addition of reproducible builds (#22887) a previously working
pipeline will require some changes:
* In the build-jobs, the environment location changed.
* In the build jobs (``runner-attributes``), the environment location changed.
This will typically show as a ``KeyError`` in the failing job. Be sure to
point to ``${SPACK_CONCRETE_ENV_DIR}``.
@@ -200,9 +196,9 @@ ci pipelines. These commands are covered in more detail in this section.
.. _cmd-spack-ci:
^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^
``spack ci``
^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^
Super-command for functionality related to generating pipelines and executing
pipeline jobs.
@@ -231,7 +227,7 @@ Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are
generated for specs that are already up to date on the mirror. If enabling
DAG pruning using ``--prune-dag``, more information may be required in your
``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding
``noop-job``.
``service-job-attributes``.
The optional ``--check-index-only`` argument can be used to speed up pipeline
generation by telling spack to consider only remote buildcache indices when
@@ -267,11 +263,11 @@ generated by jobs in the pipeline.
.. _cmd-spack-ci-rebuild:
^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^
``spack ci rebuild``
^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^
The purpose of ``spack ci rebuild`` is to take an assigned
The purpose of ``spack ci rebuild`` is straightforward: take its assigned
spec and ensure a binary of a successful build exists on the target mirror.
If the binary does not already exist, it is built from source and pushed
to the mirror. The associated stand-alone tests are optionally run against
@@ -284,7 +280,7 @@ directory. The script is run in a job to install the spec from source. The
resulting binary package is pushed to the mirror. If ``cdash`` is configured
for the environment, then the build results will be uploaded to the site.
Environment variables and values in the ``ci::pipeline-gen`` section of the
Environment variables and values in the ``gitlab-ci`` section of the
``spack.yaml`` environment file provide inputs to this process. The
two main sources of environment variables are variables written into
``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime.
@@ -302,23 +298,21 @@ A snippet from an example ``spack.yaml`` file illustrating use of this
option *and* specification of a package with broken tests is given below.
The inclusion of a spec for building ``gptune`` is not shown here. Note
that ``--tests`` is passed to ``spack ci rebuild`` as part of the
``build-job`` script.
``gitlab-ci`` script.
.. code-block:: yaml
ci:
pipeline-gen:
- build-job
script:
- . "./share/spack/setup-env.sh"
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
- spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
gitlab-ci:
script:
- . "./share/spack/setup-env.sh"
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
- spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
broken-tests-packages:
- gptune
@@ -360,31 +354,113 @@ arguments you can pass to ``spack ci reproduce-build`` in order to reproduce
a particular build locally.
------------------------------------
Job Types
A pipeline-enabled spack environment
------------------------------------
^^^^^^^^^^^^^^^
Rebuild (build)
^^^^^^^^^^^^^^^
Here's an example of a spack environment file that has been enhanced with
sections describing a build pipeline:
Rebuild jobs, denoted as ``build-job``'s in the ``pipeline-gen`` list, are jobs
associated with concrete specs that have been marked for rebuild. By default a simple
script for doing rebuild is generated, but may be modified as needed.
.. code-block:: yaml
The default script does three main steps, change directories to the pipelines concrete
environment, activate the concrete environment, and run the ``spack ci rebuild`` command:
spack:
definitions:
- pkgs:
- readline@7.0
- compilers:
- '%gcc@5.5.0'
- oses:
- os=ubuntu18.04
- os=centos7
specs:
- matrix:
- [$pkgs]
- [$compilers]
- [$oses]
mirrors:
cloud_gitlab: https://mirror.spack.io
gitlab-ci:
mappings:
- match:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
- match:
- os=centos7
runner-attributes:
tags:
- spack-kube
image: spack/centos7
cdash:
build-group: Release Testing
url: https://cdash.spack.io
project: Spack
site: Spack AWS Gitlab Instance
.. code-block:: bash
Hopefully, the ``definitions``, ``specs``, ``mirrors``, etc. sections are already
familiar, as they are part of spack :ref:`environments`. So let's take a more
in-depth look some of the pipeline-related sections in that environment file
that might not be as familiar.
cd ${concrete_environment_dir}
spack env activate --without-view .
spack ci rebuild
The ``gitlab-ci`` section is used to configure how the pipeline workload should be
generated, mainly how the jobs for building specs should be assigned to the
configured runners on your instance. Each entry within the list of ``mappings``
corresponds to a known gitlab runner, where the ``match`` section is used
in assigning a release spec to one of the runners, and the ``runner-attributes``
section is used to configure the spec/job for that particular runner.
Both the top-level ``gitlab-ci`` section as well as each ``runner-attributes``
section can also contain the following keys: ``image``, ``tags``, ``variables``,
``before_script``, ``script``, and ``after_script``. If any of these keys are
provided at the ``gitlab-ci`` level, they will be used as the defaults for any
``runner-attributes``, unless they are overridden in those sections. Specifying
any of these keys at the ``runner-attributes`` level generally overrides the
keys specified at the higher level, with a couple exceptions. Any ``variables``
specified at both levels result in those dictionaries getting merged in the
resulting generated job, and any duplicate variable names get assigned the value
provided in the specific ``runner-attributes``. If ``tags`` are specified both
at the ``gitlab-ci`` level as well as the ``runner-attributes`` level, then the
lists of tags are combined, and any duplicates are removed.
See the section below on using a custom spack for an example of how these keys
could be used.
There are other pipeline options you can configure within the ``gitlab-ci`` section
as well.
The ``bootstrap`` section allows you to specify lists of specs from
your ``definitions`` that should be staged ahead of the environment's ``specs`` (this
section is described in more detail below). The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
The optional ``broken-specs-url`` key tells Spack to check against a list of
specs that are known to be currently broken in ``develop``. If any such specs
are found, the ``spack ci generate`` command will fail with an error message
informing the user what broken specs were encountered. This allows the pipeline
to fail early and avoid wasting compute resources attempting to build packages
that will not succeed.
The optional ``cdash`` section provides information that will be used by the
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
to CDash. All the jobs generated from this environment will belong to a
"build group" within CDash that can be tracked over time. As the release
progresses, this build group may have jobs added or removed. The url, project,
and site are used to specify the CDash instance to which build results should
be reported.
Take a look at the
`schema <https://github.com/spack/spack/blob/develop/lib/spack/spack/schema/gitlab_ci.py>`_
for the gitlab-ci section of the spack environment file, to see precisely what
syntax is allowed there.
.. _rebuild_index:
^^^^^^^^^^^^^^^^^^^^^^
Update Index (reindex)
^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Note about rebuilding buildcache index
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, while a pipeline job may rebuild a package, create a buildcache
entry, and push it to the mirror, it does not automatically re-generate the
@@ -399,44 +475,21 @@ not correctly reflect the mirror's contents at the end of a pipeline.
To make sure the buildcache index is up to date at the end of your pipeline,
spack generates a job to update the buildcache index of the target mirror
at the end of each pipeline by default. You can disable this behavior by
adding ``rebuild-index: False`` inside the ``ci`` section of your
spack environment.
Reindex jobs do not allow modifying the ``script`` attribute since it is automatically
generated using the target mirror listed in the ``mirrors::mirror`` configuration.
^^^^^^^^^^^^^^^^^
Signing (signing)
^^^^^^^^^^^^^^^^^
This job is run after all of the rebuild jobs are completed and is intended to be used
to sign the package binaries built by a protected CI run. Signing jobs are generated
only if a signing job ``script`` is specified and the spack CI job type is protected.
Note, if an ``any-job`` section contains a script, this will not implicitly create a
``signing`` job, a signing job may only exist if it is explicitly specified in the
configuration with a ``script`` attribute. Specifying a signing job without a script
does not create a signing job and the job configuration attributes will be ignored.
Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``.
^^^^^^^^^^^^^^^^^
Cleanup (cleanup)
^^^^^^^^^^^^^^^^^
When using ``temporary-storage-url-prefix`` the cleanup job will destroy the mirror
created for the associated Gitlab pipeline. Cleanup jobs do not allow modifying the
script, but do expect that the spack command is in the path and require a
``before_script`` to be specified that sources the ``setup-env.sh`` script.
adding ``rebuild-index: False`` inside the ``gitlab-ci`` section of your
spack environment. Spack will assign the job any runner attributes found
on the ``service-job-attributes``, if you have provided that in your
``spack.yaml``.
.. _noop_jobs:
^^^^^^^^^^^^
No Op (noop)
^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^
Note about "no-op" jobs
^^^^^^^^^^^^^^^^^^^^^^^
If no specs in an environment need to be rebuilt during a given pipeline run
(meaning all are already up to date on the mirror), a single successful job
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab
considers to be an error). The ``noop-job*`` sections
considers to be an error). An optional ``service-job-attributes`` section
can be added to your ``spack.yaml`` where you can provide ``tags`` and
``image`` or ``variables`` for the generated NO-OP job. This section also
supports providing ``before_script``, ``script``, and ``after_script``, in
@@ -446,100 +499,51 @@ Following is an example of this section added to a ``spack.yaml``:
.. code-block:: yaml
spack:
ci:
pipeline-gen:
- noop-job:
tags: ['custom', 'tag']
image:
name: 'some.image.registry/custom-image:latest'
entrypoint: ['/bin/bash']
script::
- echo "Custom message in a custom script"
spack:
specs:
- openmpi
mirrors:
cloud_gitlab: https://mirror.spack.io
gitlab-ci:
mappings:
- match:
- os=centos8
runner-attributes:
tags:
- custom
- tag
image: spack/centos7
service-job-attributes:
tags: ['custom', 'tag']
image:
name: 'some.image.registry/custom-image:latest'
entrypoint: ['/bin/bash']
script:
- echo "Custom message in a custom script"
The example above illustrates how you can provide the attributes used to run
the NO-OP job in the case of an empty pipeline. The only field for the NO-OP
job that might be generated for you is ``script``, but that will only happen
if you do not provide one yourself. Notice in this example the ``script``
uses the ``::`` notation to prescribe override behavior. Without this, the
``echo`` command would have been prepended to the automatically generated script
rather than replacing it.
if you do not provide one yourself.
------------------------------------
ci.yaml
------------------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Assignment of specs to runners
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here's an example of a spack configuration file describing a build pipeline:
The ``mappings`` section corresponds to a list of runners, and during assignment
of specs to runners, the list is traversed in order looking for matches, the
first runner that matches a release spec is assigned to build that spec. The
``match`` section within each runner mapping section is a list of specs, and
if any of those specs match the release spec (the ``spec.satisfies()`` method
is used), then that runner is considered a match.
.. code-block:: yaml
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuration of specs/jobs for a runner
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ci:
target: gitlab
rebuild_index: True
broken-specs-url: https://broken.specs.url
broken-tests-packages:
- gptune
pipeline-gen:
- submapping:
- match:
- os=ubuntu18.04
build-job:
tags:
- spack-kube
image: spack/ubuntu-bionic
- match:
- os=centos7
build-job:
tags:
- spack-kube
image: spack/centos7
cdash:
build-group: Release Testing
url: https://cdash.spack.io
project: Spack
site: Spack AWS Gitlab Instance
The ``ci`` config section is used to configure how the pipeline workload should be
generated, mainly how the jobs for building specs should be assigned to the
configured runners on your instance. The main section for configuring pipelines
is ``pipeline-gen``, which is a list of job attribute sections that are merged,
using the same rules as Spack configs (:ref:`config-scope-precedence`), from the bottom up.
The order sections are applied is to be consistent with how spack orders scope precedence when merging lists.
There are two main section types, ``<type>-job`` sections and ``submapping``
sections.
^^^^^^^^^^^^^^^^^^^^^^
Job Attribute Sections
^^^^^^^^^^^^^^^^^^^^^^
Each type of job may have attributes added or removed via sections in the ``pipeline-gen``
list. Job type specific attributes may be specified using the keys ``<type>-job`` to
add attributes to all jobs of type ``<type>`` or ``<type>-job-remove`` to remove attributes
of type ``<type>``. Each section may only contain one type of job attribute specification, ie. ,
``build-job`` and ``noop-job`` may not coexist but ``build-job`` and ``build-job-remove`` may.
.. note::
The ``*-remove`` specifications are applied before the additive attribute specification.
For example, in the case where both ``build-job`` and ``build-job-remove`` are listed in
the same ``pipeline-gen`` section, the value will still exist in the merged build-job after
applying the section.
All of the attributes specified are forwarded to the generated CI jobs, however special
treatment is applied to the attributes ``tags``, ``image``, ``variables``, ``script``,
``before_script``, and ``after_script`` as they are components recognized explicitly by the
Spack CI generator. For the ``tags`` attribute, Spack will remove reserved tags
(:ref:`reserved_tags`) from all jobs specified in the config. In some cases, such as for
``signing`` jobs, reserved tags will be added back based on the type of CI that is being run.
Once a runner has been chosen to build a release spec, the ``build-job*``
sections provide information determining details of the job in the context of
the runner. At lease one of the ``build-job*`` sections must contain a ``tags`` key, which
Once a runner has been chosen to build a release spec, the ``runner-attributes``
section provides information determining details of the job in the context of
the runner. The ``runner-attributes`` section must have a ``tags`` key, which
is a list containing at least one tag used to select the runner from among the
runners known to the gitlab instance. For Docker executor type runners, the
``image`` key is used to specify the Docker image used to build the release spec
@@ -550,7 +554,7 @@ information on to the runner that it needs to do its work (e.g. scheduler
parameters, etc.). Any ``variables`` provided here will be added, verbatim, to
each job.
The ``build-job`` section also allows users to supply custom ``script``,
The ``runner-attributes`` section also allows users to supply custom ``script``,
``before_script``, and ``after_script`` sections to be applied to every job
scheduled on that runner. This allows users to do any custom preparation or
cleanup tasks that fit their particular workflow, as well as completely
@@ -561,45 +565,46 @@ environment directory is located within your ``--artifacts_root`` (or if not
provided, within your ``$CI_PROJECT_DIR``), activates that environment for
you, and invokes ``spack ci rebuild``.
Sections that specify scripts (``script``, ``before_script``, ``after_script``) are all
read as lists of commands or lists of lists of commands. It is recommended to write scripts
as lists of lists if scripts will be composed via merging. The default behavior of merging
lists will remove duplicate commands and potentially apply unwanted reordering, whereas
merging lists of lists will preserve the local ordering and never removes duplicate
commands. When writing commands to the CI target script, all lists are expanded and
flattened into a single list.
.. _staging_algorithm:
^^^^^^^^^^^^^^^^^^^
Submapping Sections
^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Summary of ``.gitlab-ci.yml`` generation algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A special case of attribute specification is the ``submapping`` section which may be used
to apply job attributes to build jobs based on the package spec associated with the rebuild
job. Submapping is specified as a list of spec ``match`` lists associated with
``build-job``/``build-job-remove`` sections. There are two options for ``match_behavior``,
either ``first`` or ``merge`` may be specified. In either case, the ``submapping`` list is
processed from the bottom up, and then each ``match`` list is searched for a string that
satisfies the check ``spec.satisfies({match_item})`` for each concrete spec.
All specs yielded by the matrix (or all the specs in the environment) have their
dependencies computed, and the entire resulting set of specs are staged together
before being run through the ``gitlab-ci/mappings`` entries, where each staged
spec is assigned a runner. "Staging" is the name given to the process of
figuring out in what order the specs should be built, taking into consideration
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
any stage only depend on jobs in previous stages (since those jobs are guaranteed
to have completed already). As a runner is determined for a job, the information
in the ``runner-attributes`` is used to populate various parts of the job
description that will be used by Gitlab CI. Once all the jobs have been assigned
a runner, the ``.gitlab-ci.yml`` is written to disk.
The the case of ``match_behavior: first``, the first ``match`` section in the list of
``submappings`` that contains a string that satisfies the spec will apply it's
``build-job*`` attributes to the rebuild job associated with that spec. This is the
default behavior and will be the method if no ``match_behavior`` is specified.
The short example provided above would result in the ``readline``, ``ncurses``,
and ``pkgconf`` packages getting staged and built on the runner chosen by the
``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor
type runner, and thus certain jobs will be run in the ``centos7`` container,
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
will contain 6 jobs in three stages. Once the jobs have been generated, the
presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the
``spack ci generate`` command would result in all of the jobs being put in a
build group on CDash called "Release Testing" (that group will be created if
it didn't already exist).
The the case of ``merge`` match, all of the ``match`` sections in the list of
``submappings`` that contain a string that satisfies the spec will have the associated
``build-job*`` attributes applied to the rebuild job associated with that spec. Again,
the attributes will be merged starting from the bottom match going up to the top match.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional compiler bootstrapping
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In the case that no match is found in a submapping section, no additional attributes will be applied.
^^^^^^^^^^^^^
Bootstrapping
^^^^^^^^^^^^^
The ``bootstrap`` section allows you to specify lists of specs from
your ``definitions`` that should be staged ahead of the environment's ``specs``. At the moment
Spack pipelines also have support for bootstrapping compilers on systems that
may not already have the desired compilers installed. The idea here is that
you can specify a list of things to bootstrap in your ``definitions``, and
spack will guarantee those will be installed in a phase of the pipeline before
your release specs, so that you can rely on those packages being available in
the binary mirror when you need them later on in the pipeline. At the moment
the only viable use-case for bootstrapping is to install compilers.
Here's an example of what bootstrapping some compilers might look like:
@@ -675,86 +680,6 @@ environment/stack file, and in that case no bootstrapping will be done (only the
specs will be staged for building) and the runners will be expected to already
have all needed compilers installed and configured for spack to use.
^^^^^^^^^^^^^^^^^^^
Pipeline Buildcache
^^^^^^^^^^^^^^^^^^^
The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
^^^^^^^^^^^^^^^^
Broken Specs URL
^^^^^^^^^^^^^^^^
The optional ``broken-specs-url`` key tells Spack to check against a list of
specs that are known to be currently broken in ``develop``. If any such specs
are found, the ``spack ci generate`` command will fail with an error message
informing the user what broken specs were encountered. This allows the pipeline
to fail early and avoid wasting compute resources attempting to build packages
that will not succeed.
^^^^^
CDash
^^^^^
The optional ``cdash`` section provides information that will be used by the
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
to CDash. All the jobs generated from this environment will belong to a
"build group" within CDash that can be tracked over time. As the release
progresses, this build group may have jobs added or removed. The url, project,
and site are used to specify the CDash instance to which build results should
be reported.
Take a look at the
`schema <https://github.com/spack/spack/blob/develop/lib/spack/spack/schema/ci.py>`_
for the gitlab-ci section of the spack environment file, to see precisely what
syntax is allowed there.
.. _reserved_tags:
^^^^^^^^^^^^^
Reserved Tags
^^^^^^^^^^^^^
Spack has a subset of tags (``public``, ``protected``, and ``notary``) that it reserves
for classifying runners that may require special permissions or access. The tags
``public`` and ``protected`` are used to distinguish between runners that use public
permissions and runners with protected permissions. The ``notary`` tag is a special tag
that is used to indicate runners that have access to the highly protected information
used for signing binaries using the ``signing`` job.
.. _staging_algorithm:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Summary of ``.gitlab-ci.yml`` generation algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All specs yielded by the matrix (or all the specs in the environment) have their
dependencies computed, and the entire resulting set of specs are staged together
before being run through the ``ci/pipeline-gen`` entries, where each staged
spec is assigned a runner. "Staging" is the name given to the process of
figuring out in what order the specs should be built, taking into consideration
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
any stage only depend on jobs in previous stages (since those jobs are guaranteed
to have completed already). As a runner is determined for a job, the information
in the merged ``any-job*`` and ``build-job*`` sections is used to populate various parts of the job
description that will be used by the target CI pipelines. Once all the jobs have been assigned
a runner, the ``.gitlab-ci.yml`` is written to disk.
The short example provided above would result in the ``readline``, ``ncurses``,
and ``pkgconf`` packages getting staged and built on the runner chosen by the
``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor
type runner, and thus certain jobs will be run in the ``centos7`` container,
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
will contain 6 jobs in three stages. Once the jobs have been generated, the
presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the
``spack ci generate`` command would result in all of the jobs being put in a
build group on CDash called "Release Testing" (that group will be created if
it didn't already exist).
-------------------------------------
Using a custom spack in your pipeline
-------------------------------------
@@ -801,21 +726,23 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
spack:
...
ci:
pipeline-gen:
- build-job:
tags:
- spack-kube
image: spack/ubuntu-bionic
before_script:
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_REF} && popd
- . "./spack/share/spack/setup-env.sh"
script:
- spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR}
- spack -d ci rebuild
after_script:
- rm -rf ./spack
gitlab-ci:
mappings:
- match:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
before_script:
- git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_REF} && popd
- . "./spack/share/spack/setup-env.sh"
script:
- spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR}
- spack -d ci rebuild
after_script:
- rm -rf ./spack
Now all of the generated rebuild jobs will use the same shell script to clone
spack before running their actual workload.
@@ -904,4 +831,3 @@ verify binary packages (when installing or creating buildcaches). You could
also have already trusted a key spack know about, or if no key is present anywhere,
spack will install specs using ``--no-check-signature`` and create buildcaches
using ``-u`` (for unsigned binaries).

93
lib/spack/env/cc vendored
View File

@@ -427,48 +427,6 @@ isystem_include_dirs_list=""
libs_list=""
other_args_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
parse_Wl() {
# drop -Wl
shift
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
rp="$1"
wl_expect_rpath=no
else
rp=""
case "$1" in
-rpath=*)
rp="${1#-rpath=}"
;;
--rpath=*)
rp="${1#--rpath=}"
;;
-rpath|--rpath)
wl_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append other_args_list "-Wl,$1"
;;
esac
fi
if [ -n "$rp" ]; then
if system_dir "$rp"; then
append system_rpath_dirs_list "$rp"
else
append rpath_dirs_list "$rp"
fi
fi
shift
done
# By lack of local variables, always set this to empty string.
rp=""
}
while [ $# -ne 0 ]; do
@@ -568,9 +526,54 @@ while [ $# -ne 0 ]; do
append other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
parse_Wl $1
unset IFS
arg="${1#-Wl,}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
case "$arg" in
-rpath=*) rp="${arg#-rpath=}" ;;
--rpath=*) rp="${arg#--rpath=}" ;;
-rpath,*) rp="${arg#-rpath,}" ;;
--rpath,*) rp="${arg#--rpath,}" ;;
-rpath|--rpath)
shift; arg="$1"
case "$arg" in
-Wl,*)
rp="${arg#-Wl,}"
;;
*)
die "-Wl,-rpath was not followed by -Wl,*"
;;
esac
;;
"$dtags_to_strip")
: # We want to remove explicitly this flag
;;
*)
append other_args_list "-Wl,$arg"
;;
esac
;;
-Xlinker,*)
arg="${1#-Xlinker,}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
case "$arg" in
-rpath=*) rp="${arg#-rpath=}" ;;
--rpath=*) rp="${arg#--rpath=}" ;;
-rpath|--rpath)
shift; arg="$1"
case "$arg" in
-Xlinker,*)
rp="${arg#-Xlinker,}"
;;
*)
die "-Xlinker,-rpath was not followed by -Xlinker,*"
;;
esac
;;
*)
append other_args_list "-Xlinker,$arg"
;;
esac
;;
-Xlinker)
if [ "$2" = "-rpath" ]; then

View File

@@ -16,16 +16,19 @@
import sys
import tempfile
from contextlib import contextmanager
from sys import platform as _platform
from typing import Callable, List, Match, Optional, Tuple, Union
from llnl.util import tty
from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, symlink
from spack.util.executable import Executable, which
from spack.util.executable import CommandNotFoundError, Executable, which
from spack.util.path import path_to_os_path, system_path_filter
if sys.platform != "win32":
is_windows = _platform == "win32"
if not is_windows:
import grp
import pwd
else:
@@ -81,77 +84,9 @@
"visit_directory_tree",
]
if sys.version_info < (3, 7, 4):
# monkeypatch shutil.copystat to fix PermissionError when copying read-only
# files on Lustre when using Python < 3.7.4
def copystat(src, dst, follow_symlinks=True):
"""Copy file metadata
Copy the permission bits, last access time, last modification time, and
flags from `src` to `dst`. On Linux, copystat() also copies the "extended
attributes" where possible. The file contents, owner, and group are
unaffected. `src` and `dst` are path names given as strings.
If the optional flag `follow_symlinks` is not set, symlinks aren't
followed if and only if both `src` and `dst` are symlinks.
"""
def _nop(args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if sys.version_info >= (3, 3):
if fn in os.supports_follow_symlinks: # novermin
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), follow_symlinks=follow)
# We must copy extended attributes before the file is (potentially)
# chmod()'ed read-only, otherwise setxattr() will error with -EACCES.
shutil._copyxattr(src, dst, follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, "st_flags"):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in "EOPNOTSUPP", "ENOTSUP":
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
shutil.copystat = copystat
def getuid():
if sys.platform == "win32":
if is_windows:
import ctypes
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
@@ -164,7 +99,7 @@ def getuid():
@system_path_filter
def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists
if sys.platform == "win32":
if is_windows:
# Windows path existence checks will sometimes fail on junctions/links/symlinks
# so check for that case
if os.path.exists(dst) or os.path.islink(dst):
@@ -182,7 +117,13 @@ def path_contains_subdirectory(path, root):
@memoized
def file_command(*args):
"""Creates entry point to `file` system command with provided arguments"""
file_cmd = which("file", required=True)
try:
file_cmd = which("file", required=True)
except CommandNotFoundError as e:
if is_windows:
raise CommandNotFoundError("`file` utility is not available on Windows")
else:
raise e
for arg in args:
file_cmd.add_default_arg(arg)
return file_cmd
@@ -193,11 +134,7 @@ def _get_mime_type():
"""Generate method to call `file` system command to aquire mime type
for a specified path
"""
if sys.platform == "win32":
# -h option (no-dereference) does not exist in Windows
return file_command("-b", "--mime-type")
else:
return file_command("-b", "-h", "--mime-type")
return file_command("-b", "-h", "--mime-type")
@memoized
@@ -333,6 +270,7 @@ def groupid_to_group(x):
regex = re.escape(regex)
filenames = path_to_os_path(*filenames)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
@@ -548,7 +486,7 @@ def get_owner_uid(path, err_msg=None):
else:
p_stat = os.stat(path)
if sys.platform != "win32":
if _platform != "win32":
owner_uid = p_stat.st_uid
else:
sid = win32security.GetFileSecurity(
@@ -581,7 +519,7 @@ def group_ids(uid=None):
Returns:
(list of int): gids of groups the user is a member of
"""
if sys.platform == "win32":
if is_windows:
tty.warn("Function is not supported on Windows")
return []
@@ -601,7 +539,7 @@ def group_ids(uid=None):
@system_path_filter(arg_slice=slice(1))
def chgrp(path, group, follow_symlinks=True):
"""Implement the bash chgrp function on a single path"""
if sys.platform == "win32":
if is_windows:
raise OSError("Function 'chgrp' is not supported on Windows")
if isinstance(group, str):
@@ -1128,7 +1066,7 @@ def open_if_filename(str_or_file, mode="r"):
@system_path_filter
def touch(path):
"""Creates an empty file at the specified path."""
if sys.platform == "win32":
if is_windows:
perms = os.O_WRONLY | os.O_CREAT
else:
perms = os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY
@@ -1190,7 +1128,7 @@ def temp_cwd():
yield tmp_dir
finally:
kwargs = {}
if sys.platform == "win32":
if is_windows:
kwargs["ignore_errors"] = False
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
shutil.rmtree(tmp_dir, **kwargs)
@@ -1284,6 +1222,7 @@ def traverse_tree(
# target is relative to the link, then that may not resolve properly
# relative to our cwd - see resolve_link_target_relative_to_the_link
if os.path.isdir(source_child) and (follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
@@ -1435,7 +1374,7 @@ def visit_directory_tree(root, visitor, rel_path="", depth=0):
try:
isdir = f.is_dir()
except OSError as e:
if sys.platform == "win32" and hasattr(e, "winerror") and e.winerror == 5 and islink:
if is_windows and hasattr(e, "winerror") and e.winerror == 5 and islink:
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
@@ -1544,11 +1483,11 @@ def readonly_file_handler(ignore_errors=False):
"""
def error_remove_readonly(func, path, exc):
if sys.platform != "win32":
if not is_windows:
raise RuntimeError("This method should only be invoked on Windows")
excvalue = exc[1]
if (
sys.platform == "win32"
is_windows
and func in (os.rmdir, os.remove, os.unlink)
and excvalue.errno == errno.EACCES
):
@@ -1578,7 +1517,7 @@ def remove_linked_tree(path):
# Windows readonly files cannot be removed by Python
# directly.
if sys.platform == "win32":
if is_windows:
kwargs["ignore_errors"] = False
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
@@ -1725,6 +1664,7 @@ def find(root, files, recursive=True):
@system_path_filter
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
@@ -2092,7 +2032,7 @@ def names(self):
# on non Windows platform
# Windows valid library extensions are:
# ['.dll', '.lib']
valid_exts = [".dll", ".lib"] if sys.platform == "win32" else [".dylib", ".so", ".a"]
valid_exts = [".dll", ".lib"] if is_windows else [".dylib", ".so", ".a"]
for ext in valid_exts:
i = name.rfind(ext)
if i != -1:
@@ -2240,7 +2180,7 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
if sys.platform == "win32":
if is_windows:
static_ext = "lib"
# For linking (runtime=False) you need the .lib files regardless of
# whether you are doing a shared or static link
@@ -2272,7 +2212,7 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
# finally search all of root recursively. The search stops when the first
# match is found.
common_lib_dirs = ["lib", "lib64"]
if sys.platform == "win32":
if is_windows:
common_lib_dirs.extend(["bin", "Lib"])
for subdir in common_lib_dirs:
@@ -2407,7 +2347,7 @@ def _link(self, path, dest_dir):
# For py2 compatibility, we have to catch the specific Windows error code
# associate with trying to create a file that already exists (winerror 183)
except OSError as e:
if sys.platform == "win32" and (e.winerror == 183 or e.errno == errno.EEXIST):
if e.winerror == 183:
# We have either already symlinked or we are encoutering a naming clash
# either way, we don't want to overwrite existing libraries
already_linked = islink(dest_file)
@@ -2695,28 +2635,3 @@ def temporary_dir(
yield tmp_dir
finally:
remove_directory_contents(tmp_dir)
def filesummary(path, print_bytes=16) -> Tuple[int, bytes]:
"""Create a small summary of the given file. Does not error
when file does not exist.
Args:
print_bytes (int): Number of bytes to print from start/end of file
Returns:
Tuple of size and byte string containing first n .. last n bytes.
Size is 0 if file cannot be read."""
try:
n = print_bytes
with open(path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if size <= 2 * n:
short_contents = f.read(2 * n)
else:
short_contents = f.read(n)
f.seek(-n, 2)
short_contents += b"..." + f.read(n)
return size, short_contents
except OSError:
return 0, b""

View File

@@ -198,7 +198,7 @@ def _memoized_function(*args, **kwargs):
except TypeError as e:
# TypeError is raised when indexing into a dict if the key is unhashable.
raise UnhashableArguments(
"args + kwargs '{}' was not hashable for function '{}'".format(key, func.__name__)
"args + kwargs '{}' was not hashable for function '{}'".format(key, func.__name__),
) from e
return _memoized_function
@@ -237,7 +237,6 @@ def decorator_with_or_without_args(decorator):
@decorator
"""
# See https://stackoverflow.com/questions/653368 for more on this
@functools.wraps(decorator)
def new_dec(*args, **kwargs):
@@ -991,7 +990,8 @@ def enum(**kwargs):
def stable_partition(
input_iterable: Iterable, predicate_fn: Callable[[Any], bool]
input_iterable: Iterable,
predicate_fn: Callable[[Any], bool],
) -> Tuple[List[Any], List[Any]]:
"""Partition the input iterable according to a custom predicate.
@@ -1104,7 +1104,11 @@ def __enter__(self):
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(self._context, exc_value, traceback.format_tb(tb))
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.

View File

@@ -75,7 +75,7 @@ def __init__(self, ignore=None):
# so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_root, src_rel)
# Files to link. Maps dst_rel to (src_rel, src_root)
self.files = OrderedDict()
def before_visit_dir(self, root, rel_path, depth):
@@ -430,11 +430,6 @@ class MergeConflictError(Exception):
pass
class ConflictingSpecsError(MergeConflictError):
def __init__(self, spec_1, spec_2):
super(MergeConflictError, self).__init__(spec_1, spec_2)
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super(MergeConflictError, self).__init__("Package merge blocked by file: %s" % path)

View File

@@ -18,7 +18,7 @@ class Barrier:
Python 2 doesn't have multiprocessing barriers so we implement this.
See https://greenteapress.com/semaphores/LittleBookOfSemaphores.pdf, p. 41.
See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
"""
def __init__(self, n, timeout=None):

View File

@@ -5,13 +5,15 @@
import errno
import os
import shutil
import sys
import tempfile
from os.path import exists, join
from sys import platform as _platform
from llnl.util import lang
if sys.platform == "win32":
is_windows = _platform == "win32"
if is_windows:
from win32file import CreateHardLink
@@ -21,7 +23,7 @@ def symlink(real_path, link_path):
On Windows, use junctions if os.symlink fails.
"""
if sys.platform != "win32":
if not is_windows:
os.symlink(real_path, link_path)
elif _win32_can_symlink():
# Windows requires target_is_directory=True when the target is a dir.
@@ -30,15 +32,9 @@ def symlink(real_path, link_path):
try:
# Try to use junctions
_win32_junction(real_path, link_path)
except OSError as e:
if e.errno == errno.EEXIST:
# EEXIST error indicates that file we're trying to "link"
# is already present, don't bother trying to copy which will also fail
# just raise
raise
else:
# If all else fails, fall back to copying files
shutil.copyfile(real_path, link_path)
except OSError:
# If all else fails, fall back to copying files
shutil.copyfile(real_path, link_path)
def islink(path):
@@ -103,7 +99,7 @@ def _win32_is_junction(path):
if os.path.islink(path):
return False
if sys.platform == "win32":
if is_windows:
import ctypes.wintypes
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW

View File

@@ -108,6 +108,7 @@ class SuppressOutput:
"""Class for disabling output in a scope using 'with' keyword"""
def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True):
self._msg_enabled_initial = _msg_enabled
self._warn_enabled_initial = _warn_enabled
self._error_enabled_initial = _error_enabled

View File

@@ -11,7 +11,6 @@
import io
import os
import sys
from typing import IO, Any, List, Optional
from llnl.util.tty import terminal_size
from llnl.util.tty.color import cextra, clen
@@ -98,16 +97,7 @@ def config_uniform_cols(elts, console_width, padding, cols=0):
return config
def colify(
elts: List[Any],
cols: int = 0,
output: Optional[IO] = None,
indent: int = 0,
padding: int = 2,
tty: Optional[bool] = None,
method: str = "variable",
console_cols: Optional[int] = None,
):
def colify(elts, **options):
"""Takes a list of elements as input and finds a good columnization
of them, similar to how gnu ls does. This supports both
uniform-width and variable-width (tighter) columns.
@@ -116,21 +106,31 @@ def colify(
using ``str()``.
Keyword Arguments:
output: A file object to write to. Default is ``sys.stdout``
indent: Optionally indent all columns by some number of spaces
padding: Spaces between columns. Default is 2
width: Width of the output. Default is 80 if tty not detected
cols: Force number of columns. Default is to size to terminal, or
output (typing.IO): A file object to write to. Default is ``sys.stdout``
indent (int): Optionally indent all columns by some number of spaces
padding (int): Spaces between columns. Default is 2
width (int): Width of the output. Default is 80 if tty not detected
cols (int): Force number of columns. Default is to size to terminal, or
single-column if no tty
tty: Whether to attempt to write to a tty. Default is to autodetect a
tty (bool): Whether to attempt to write to a tty. Default is to autodetect a
tty. Set to False to force single-column output
method: Method to use to fit columns. Options are variable or uniform.
method (str): Method to use to fit columns. Options are variable or uniform.
Variable-width columns are tighter, uniform columns are all the same width
and fit less data on the screen
console_cols: number of columns on this console (default: autodetect)
"""
if output is None:
output = sys.stdout
# Get keyword arguments or set defaults
cols = options.pop("cols", 0)
output = options.pop("output", sys.stdout)
indent = options.pop("indent", 0)
padding = options.pop("padding", 2)
tty = options.pop("tty", None)
method = options.pop("method", "variable")
console_cols = options.pop("width", None)
if options:
raise TypeError(
"'%s' is an invalid keyword argument for this function." % next(options.iterkeys())
)
# elts needs to be an array of strings so we can count the elements
elts = [str(elt) for elt in elts]
@@ -153,11 +153,10 @@ def colify(
cols = 1
# Specify the number of character columns to use.
if console_cols is None:
if not console_cols:
console_rows, console_cols = terminal_size()
elif not isinstance(console_cols, int):
elif type(console_cols) != int:
raise ValueError("Number of columns must be an int")
console_cols = max(1, console_cols - indent)
# Choose a method. Variable-width colums vs uniform-width.
@@ -193,13 +192,7 @@ def colify(
return (config.cols, tuple(config.widths))
def colify_table(
table: List[List[Any]],
output: Optional[IO] = None,
indent: int = 0,
padding: int = 2,
console_cols: Optional[int] = None,
):
def colify_table(table, **options):
"""Version of ``colify()`` for data expressed in rows, (list of lists).
Same as regular colify but:
@@ -225,38 +218,20 @@ def transpose():
for row in table:
yield row[i]
colify(
transpose(),
cols=columns, # this is always the number of cols in the table
tty=True, # don't reduce to 1 column for non-tty
output=output,
indent=indent,
padding=padding,
console_cols=console_cols,
)
if "cols" in options:
raise ValueError("Cannot override columsn in colify_table.")
options["cols"] = columns
# don't reduce to 1 column for non-tty
options["tty"] = True
colify(transpose(), **options)
def colified(
elts: List[Any],
cols: int = 0,
output: Optional[IO] = None,
indent: int = 0,
padding: int = 2,
tty: Optional[bool] = None,
method: str = "variable",
console_cols: Optional[int] = None,
):
def colified(elts, **options):
"""Invokes the ``colify()`` function but returns the result as a string
instead of writing it to an output string."""
sio = io.StringIO()
colify(
elts,
cols=cols,
output=sio,
indent=indent,
padding=padding,
tty=tty,
method=method,
console_cols=console_cols,
)
options["output"] = sio
colify(elts, **options)
return sio.getvalue()

View File

@@ -161,7 +161,10 @@ def _is_background(self):
def _get_canon_echo_flags(self):
"""Get current termios canonical and echo settings."""
cfg = termios.tcgetattr(self.stream)
return (bool(cfg[3] & termios.ICANON), bool(cfg[3] & termios.ECHO))
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def _enable_keyboard_input(self):
"""Disable canonical input and echoing on ``self.stream``."""

View File

@@ -77,7 +77,10 @@ def __init__(self, pid, controller_fd, timeout=1, sleep_time=1e-1, debug=False):
def get_canon_echo_attrs(self):
"""Get echo and canon attributes of the terminal of controller_fd."""
cfg = termios.tcgetattr(self.controller_fd)
return (bool(cfg[3] & termios.ICANON), bool(cfg[3] & termios.ECHO))
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
@@ -89,7 +92,11 @@ def status(self):
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write(
"canon: %s, echo: %s\n" % ("on" if canon else "off", "on" if echo else "off")
"canon: %s, echo: %s\n"
% (
"on" if canon else "off",
"on" if echo else "off",
)
)
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())

View File

@@ -25,7 +25,7 @@ def architecture_compatible(self, target, constraint):
return (
not target.architecture
or not constraint.architecture
or target.architecture.intersects(constraint.architecture)
or target.architecture.satisfies(constraint.architecture)
)
@memoized
@@ -104,7 +104,7 @@ def compiler_compatible(self, parent, child, **kwargs):
for cversion in child.compiler.versions:
# For a few compilers use specialized comparisons.
# Otherwise match on version match.
if pversion.intersects(cversion):
if pversion.satisfies(cversion):
return True
elif parent.compiler.name == "gcc" and self._gcc_compiler_compare(
pversion, cversion

View File

@@ -321,7 +321,8 @@ def _check_patch_urls(pkgs, error_cls):
errors.append(
error_cls(
"patch URL in package {0} must end with {1}".format(
pkg_cls.name, full_index_arg
pkg_cls.name,
full_index_arg,
),
[patch.url],
)
@@ -695,11 +696,8 @@ def _ensure_variant_defaults_are_parsable(pkgs, error_cls):
try:
variant.validate_or_raise(vspec, pkg_cls=pkg_cls)
except spack.variant.InvalidVariantValueError:
error_msg = (
"The default value of the variant '{}' in package '{}' failed validation"
)
question = "Is it among the allowed values?"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), [question]))
error_msg = "The variant '{}' default value in package '{}' cannot be validated"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
return errors
@@ -724,7 +722,7 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
dependency_pkg_cls = None
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(s.name)
assert any(v.intersects(s.versions) for v in list(dependency_pkg_cls.versions))
assert any(v.satisfies(s.versions) for v in list(dependency_pkg_cls.versions))
except Exception:
summary = (
"{0}: dependency on {1} cannot be satisfied " "by known versions of {1.name}"

View File

@@ -6,8 +6,6 @@
import codecs
import collections
import hashlib
import io
import itertools
import json
import multiprocessing.pool
import os
@@ -22,8 +20,7 @@
import urllib.parse
import urllib.request
import warnings
from contextlib import closing, contextmanager
from gzip import GzipFile
from contextlib import closing
from urllib.error import HTTPError, URLError
import ruamel.yaml as yaml
@@ -42,10 +39,7 @@
import spack.platforms
import spack.relocate as relocate
import spack.repo
import spack.stage
import spack.store
import spack.traverse as traverse
import spack.util.crypto
import spack.util.file_cache as file_cache
import spack.util.gpg
import spack.util.spack_json as sjson
@@ -53,7 +47,7 @@
import spack.util.url as url_util
import spack.util.web as web_util
from spack.caches import misc_cache_location
from spack.relocate_text import utf8_paths_to_single_binary_regex
from spack.relocate import utf8_paths_to_single_binary_regex
from spack.spec import Spec
from spack.stage import Stage
from spack.util.executable import which
@@ -215,7 +209,10 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
break
else:
self._mirrors_for_spec[dag_hash].append(
{"mirror_url": mirror_url, "spec": indexed_spec}
{
"mirror_url": mirror_url,
"spec": indexed_spec,
}
)
finally:
shutil.rmtree(tmpdir)
@@ -297,9 +294,10 @@ def update_spec(self, spec, found_list):
cur_entry["spec"] = new_entry["spec"]
break
else:
current_list.append(
{"mirror_url": new_entry["mirror_url"], "spec": new_entry["spec"]}
)
current_list.append = {
"mirror_url": new_entry["mirror_url"],
"spec": new_entry["spec"],
}
def update(self, with_cooldown=False):
"""Make sure local cache of buildcache index files is up to date.
@@ -366,7 +364,8 @@ def update(self, with_cooldown=False):
# May need to fetch the index and update the local caches
try:
needs_regen = self._fetch_and_cache_index(
cached_mirror_url, cache_entry=cache_entry
cached_mirror_url,
cache_entry=cache_entry,
)
self._last_fetch_times[cached_mirror_url] = (now, True)
all_methods_failed = False
@@ -558,12 +557,7 @@ class NoChecksumException(spack.error.SpackError):
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super(NoChecksumException, self).__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
pass
class NewLayoutException(spack.error.SpackError):
@@ -743,31 +737,34 @@ def get_buildfile_manifest(spec):
return data
def prefixes_to_hashes(spec):
return {
str(s.prefix): s.dag_hash()
for s in itertools.chain(
spec.traverse(root=True, deptype="link"), spec.dependencies(deptype="run")
)
}
def get_buildinfo_dict(spec, rel=False):
"""Create metadata for a tarball"""
def write_buildinfo_file(spec, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
manifest = get_buildfile_manifest(spec)
return {
"sbang_install_path": spack.hooks.sbang.sbang_install_path(),
"relative_rpaths": rel,
"buildpath": spack.store.layout.root,
"spackprefix": spack.paths.prefix,
"relative_prefix": os.path.relpath(spec.prefix, spack.store.layout.root),
"relocate_textfiles": manifest["text_to_relocate"],
"relocate_binaries": manifest["binary_to_relocate"],
"relocate_links": manifest["link_to_relocate"],
"hardlinks_deduped": manifest["hardlinks_deduped"],
"prefix_to_hash": prefixes_to_hashes(spec),
}
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps + spec.dependencies(deptype="run"):
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo["sbang_install_path"] = spack.hooks.sbang.sbang_install_path()
buildinfo["relative_rpaths"] = rel
buildinfo["buildpath"] = spack.store.layout.root
buildinfo["spackprefix"] = spack.paths.prefix
buildinfo["relative_prefix"] = os.path.relpath(spec.prefix, spack.store.layout.root)
buildinfo["relocate_textfiles"] = manifest["text_to_relocate"]
buildinfo["relocate_binaries"] = manifest["binary_to_relocate"]
buildinfo["relocate_links"] = manifest["link_to_relocate"]
buildinfo["hardlinks_deduped"] = manifest["hardlinks_deduped"]
buildinfo["prefix_to_hash"] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, "w") as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
@@ -1140,68 +1137,6 @@ def generate_key_index(key_prefix, tmpdir=None):
shutil.rmtree(tmpdir)
@contextmanager
def gzip_compressed_tarfile(path):
"""Create a reproducible, compressed tarfile"""
# Create gzip compressed tarball of the install prefix
# 1) Use explicit empty filename and mtime 0 for gzip header reproducibility.
# If the filename="" is dropped, Python will use fileobj.name instead.
# This should effectively mimick `gzip --no-name`.
# 2) On AMD Ryzen 3700X and an SSD disk, we have the following on compression speed:
# compresslevel=6 gzip default: llvm takes 4mins, roughly 2.1GB
# compresslevel=9 python default: llvm takes 12mins, roughly 2.1GB
# So we follow gzip.
with open(path, "wb") as fileobj, closing(
GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=fileobj)
) as gzip_file, tarfile.TarFile(name="", mode="w", fileobj=gzip_file) as tar:
yield tar
def deterministic_tarinfo(tarinfo: tarfile.TarInfo):
# We only add files, symlinks, hardlinks, and directories
# No character devices, block devices and FIFOs should ever enter a tarball.
if tarinfo.isdev():
return None
# For distribution, it makes no sense to user/group data; since (a) they don't exist
# on other machines, and (b) they lead to surprises as `tar x` run as root will change
# ownership if it can. We want to extract as the current user. By setting owner to root,
# root will extract as root, and non-privileged user will extract as themselves.
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.uname = ""
tarinfo.gname = ""
# Reset mtime to epoch time, our prefixes are not truly immutable, so files may get
# touched; as long as the content does not change, this ensures we get stable tarballs.
tarinfo.mtime = 0
# Normalize mode
if tarinfo.isfile() or tarinfo.islnk():
# If user can execute, use 0o755; else 0o644
# This is to avoid potentially unsafe world writable & exeutable files that may get
# extracted when Python or tar is run with privileges
tarinfo.mode = 0o644 if tarinfo.mode & 0o100 == 0 else 0o755
else: # symbolic link and directories
tarinfo.mode = 0o755
return tarinfo
def tar_add_metadata(tar: tarfile.TarFile, path: str, data: dict):
# Serialize buildinfo for the tarball
bstring = syaml.dump(data, default_flow_style=True).encode("utf-8")
tarinfo = tarfile.TarInfo(name=path)
tarinfo.size = len(bstring)
tar.addfile(deterministic_tarinfo(tarinfo), io.BytesIO(bstring))
def _do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo):
with gzip_compressed_tarfile(tarfile_path) as tar:
tar.add(name=binaries_dir, arcname=pkg_dir, filter=deterministic_tarinfo)
tar_add_metadata(tar, buildinfo_file_name(pkg_dir), buildinfo)
def _build_tarball(
spec,
out_url,
@@ -1219,37 +1154,15 @@ def _build_tarball(
if not spec.concrete:
raise ValueError("spec must be concrete to build tarball")
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
_build_tarball_in_stage_dir(
spec,
out_url,
stage_dir=tmpdir,
force=force,
relative=relative,
unsigned=unsigned,
allow_root=allow_root,
key=key,
regenerate_index=regenerate_index,
)
# set up some paths
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
def _build_tarball_in_stage_dir(
spec,
out_url,
stage_dir,
force=False,
relative=False,
unsigned=False,
allow_root=False,
key=None,
regenerate_index=False,
):
cache_prefix = build_cache_prefix(stage_dir)
tarfile_name = tarball_name(spec, ".spack")
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(cache_prefix, tarball_path_name(spec, ".spack"))
remote_spackfile_path = url_util.join(out_url, os.path.relpath(spackfile_path, stage_dir))
remote_spackfile_path = url_util.join(out_url, os.path.relpath(spackfile_path, tmpdir))
mkdirp(tarfile_dir)
if web_util.url_exists(remote_spackfile_path):
@@ -1268,7 +1181,7 @@ def _build_tarball_in_stage_dir(
signed_specfile_path = "{0}.sig".format(specfile_path)
remote_specfile_path = url_util.join(
out_url, os.path.relpath(specfile_path, os.path.realpath(stage_dir))
out_url, os.path.relpath(specfile_path, os.path.realpath(tmpdir))
)
remote_signed_specfile_path = "{0}.sig".format(remote_specfile_path)
@@ -1283,41 +1196,50 @@ def _build_tarball_in_stage_dir(
):
raise NoOverwriteException(url_util.format(remote_specfile_path))
pkg_dir = os.path.basename(spec.prefix.rstrip(os.path.sep))
workdir = os.path.join(stage_dir, pkg_dir)
# TODO: We generally don't want to mutate any files, but when using relative
# mode, Spack unfortunately *does* mutate rpaths and links ahead of time.
# For now, we only make a full copy of the spec prefix when in relative mode.
if relative:
# tarfile is used because it preserves hardlink etc best.
binaries_dir = workdir
temp_tarfile_name = tarball_name(spec, ".tar")
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, "w")) as tar:
tar.add(name="%s" % spec.prefix, arcname=".")
with closing(tarfile.open(temp_tarfile_path, "r")) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
else:
binaries_dir = spec.prefix
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, ".tar")
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, "w")) as tar:
tar.add(name="%s" % spec.prefix, arcname=".")
with closing(tarfile.open(temp_tarfile_path, "r")) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
buildinfo = get_buildinfo_dict(spec, relative)
write_buildinfo_file(spec, workdir, relative)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if relative:
make_package_relative(workdir, spec, buildinfo, allow_root)
elif not allow_root:
ensure_package_relocatable(buildinfo, binaries_dir)
_do_create_tarball(tarfile_path, binaries_dir, pkg_dir, buildinfo)
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
# On AMD Ryzen 3700X and an SSD disk, we have the following on compression speed:
# compresslevel=6 gzip default: llvm takes 4mins, roughly 2.1GB
# compresslevel=9 python default: llvm takes 12mins, roughly 2.1GB
# So we follow gzip.
with closing(tarfile.open(tarfile_path, "w:gz", compresslevel=6)) as tar:
tar.add(name="%s" % workdir, arcname="%s" % os.path.basename(spec.prefix))
# remove copy of install directory
if relative:
shutil.rmtree(workdir)
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
@@ -1343,11 +1265,7 @@ def _build_tarball_in_stage_dir(
spec_dict["buildinfo"] = buildinfo
with open(specfile_path, "w") as outfile:
# Note: when using gpg clear sign, we need to avoid long lines (19995 chars).
# If lines are longer, they are truncated without error. Thanks GPG!
# So, here we still add newlines, but no indent, so save on file size and
# line length.
json.dump(spec_dict, outfile, indent=0, separators=(",", ":"))
outfile.write(sjson.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
@@ -1364,61 +1282,73 @@ def _build_tarball_in_stage_dir(
tty.debug('Buildcache for "{0}" written to \n {1}'.format(spec, remote_spackfile_path))
# push the key to the build cache's _pgp directory so it can be
# imported
if not unsigned:
push_keys(out_url, keys=[key], regenerate_index=regenerate_index, tmpdir=stage_dir)
try:
# push the key to the build cache's _pgp directory so it can be
# imported
if not unsigned:
push_keys(out_url, keys=[key], regenerate_index=regenerate_index, tmpdir=tmpdir)
# create an index.json for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
# create an index.json for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, tmpdir)))
finally:
shutil.rmtree(tmpdir)
return None
def nodes_to_be_packaged(specs, root=True, dependencies=True):
def nodes_to_be_packaged(specs, include_root=True, include_dependencies=True):
"""Return the list of nodes to be packaged, given a list of specs.
Args:
specs (List[spack.spec.Spec]): list of root specs to be processed
root (bool): include the root of each spec in the nodes
dependencies (bool): include the dependencies of each
include_root (bool): include the root of each spec in the nodes
include_dependencies (bool): include the dependencies of each
spec in the nodes
"""
if not root and not dependencies:
return []
elif dependencies:
nodes = traverse.traverse_nodes(specs, root=root, deptype="all")
else:
nodes = set(specs)
if not include_root and not include_dependencies:
return set()
# Limit to installed non-externals.
packageable = lambda n: not n.external and n.installed
def skip_node(current_node):
if current_node.external or current_node.virtual:
return True
return spack.store.db.query_one(current_node) is None
# Mass install check
with spack.store.db.read_transaction():
return list(filter(packageable, nodes))
expanded_set = set()
for current_spec in specs:
if not include_dependencies:
nodes = [current_spec]
else:
nodes = [
n
for n in current_spec.traverse(
order="post", root=include_root, deptype=("link", "run")
)
]
for node in nodes:
if not skip_node(node):
expanded_set.add(node)
return expanded_set
def push(specs, push_url, include_root: bool = True, include_dependencies: bool = True, **kwargs):
def push(specs, push_url, specs_kwargs=None, **kwargs):
"""Create a binary package for each of the specs passed as input and push them
to a given push URL.
Args:
specs (List[spack.spec.Spec]): installed specs to be packaged
push_url (str): url where to push the binary package
include_root (bool): include the root of each spec in the nodes
include_dependencies (bool): include the dependencies of each
spec in the nodes
specs_kwargs (dict): dictionary with two possible boolean keys, "include_root"
and "include_dependencies", which determine which part of each spec is
packaged and pushed to the mirror
**kwargs: TODO
"""
# Be explicit about the arugment type
if type(include_root) != bool or type(include_dependencies) != bool:
raise ValueError("Expected include_root/include_dependencies to be True/False")
nodes = nodes_to_be_packaged(specs, root=include_root, dependencies=include_dependencies)
specs_kwargs = specs_kwargs or {"include_root": True, "include_dependencies": True}
nodes = nodes_to_be_packaged(specs, **specs_kwargs)
# TODO: This seems to be an easy target for task
# TODO: distribution using a parallel pool
@@ -1605,12 +1535,13 @@ def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
return None
def make_package_relative(workdir, spec, buildinfo, allow_root):
def make_package_relative(workdir, spec, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo["buildpath"]
orig_path_names = list()
cur_path_names = list()
@@ -1634,10 +1565,16 @@ def make_package_relative(workdir, spec, buildinfo, allow_root):
relocate.make_link_relative(cur_path_names, orig_path_names)
def ensure_package_relocatable(buildinfo, binaries_dir):
"""Check if package binaries are relocatable."""
binaries = [os.path.join(binaries_dir, f) for f in buildinfo["relocate_binaries"]]
relocate.ensure_binaries_are_relocatable(binaries)
def check_package_relocatable(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo["relocate_binaries"]:
cur_path_names.append(os.path.join(workdir, filename))
allow_root or relocate.ensure_binaries_are_relocatable(cur_path_names)
def dedupe_hardlinks_if_necessary(root, buildinfo):
@@ -1793,16 +1730,16 @@ def is_backup_file(file):
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names, prefix_to_prefix_text)
relocate.unsafe_relocate_text(text_names, prefix_to_prefix_text)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate, prefix_to_prefix_bin)
relocate.unsafe_relocate_text_bin(files_to_relocate, prefix_to_prefix_bin)
# If we are installing back to the same location
# relocate the sbang location if the spack directory changed
else:
if old_spack_prefix != new_spack_prefix:
relocate.relocate_text(text_names, prefix_to_prefix_text)
relocate.unsafe_relocate_text(text_names, prefix_to_prefix_text)
def _extract_inner_tarball(spec, filename, extract_to, unsigned, remote_checksum):
@@ -1840,15 +1777,14 @@ def _extract_inner_tarball(spec, filename, extract_to, unsigned, remote_checksum
raise UnsignedPackageException(
"To install unsigned packages, use the --no-check-signature option."
)
# compute the sha256 checksum of the tarball
# get the sha256 checksum of the tarball
local_checksum = checksum_tarball(tarfile_path)
expected = remote_checksum["hash"]
# if the checksums don't match don't install
if local_checksum != expected:
size, contents = fsys.filesummary(tarfile_path)
raise NoChecksumException(tarfile_path, size, contents, "sha256", expected, local_checksum)
if local_checksum != remote_checksum["hash"]:
raise NoChecksumException(
"Package tarball failed checksum verification.\n" "It cannot be installed."
)
return tarfile_path
@@ -1906,14 +1842,12 @@ def extract_tarball(spec, download_result, allow_root=False, unsigned=False, for
# compute the sha256 checksum of the tarball
local_checksum = checksum_tarball(tarfile_path)
expected = bchecksum["hash"]
# if the checksums don't match don't install
if local_checksum != expected:
size, contents = fsys.filesummary(tarfile_path)
if local_checksum != bchecksum["hash"]:
_delete_staged_downloads(download_result)
raise NoChecksumException(
tarfile_path, size, contents, "sha256", expected, local_checksum
"Package tarball failed checksum verification.\n" "It cannot be installed."
)
new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root))
@@ -2004,11 +1938,8 @@ def install_root_node(spec, allow_root, unsigned=False, force=False, sha256=None
tarball_path = download_result["tarball_stage"].save_filename
msg = msg.format(tarball_path, sha256)
if not checker.check(tarball_path):
size, contents = fsys.filesummary(tarball_path)
_delete_staged_downloads(download_result)
raise NoChecksumException(
tarball_path, size, contents, checker.hash_name, sha256, checker.sum
)
raise spack.binary_distribution.NoChecksumException(msg)
tty.debug("Verified SHA256 checksum of the build cache")
# don't print long padded paths while extracting/relocating binaries
@@ -2082,7 +2013,12 @@ def try_direct_fetch(spec, mirrors=None):
fetched_spec = Spec.from_json(specfile_contents)
fetched_spec._mark_concrete()
found_specs.append({"mirror_url": mirror.fetch_url, "spec": fetched_spec})
found_specs.append(
{
"mirror_url": mirror.fetch_url,
"spec": fetched_spec,
}
)
return found_specs
@@ -2384,7 +2320,11 @@ def download_single_spec(concrete_spec, destination, mirror_url=None):
local_tarball_path = os.path.join(destination, tarball_dir_name)
files_to_fetch = [
{"url": [tarball_path_name], "path": local_tarball_path, "required": True},
{
"url": [tarball_path_name],
"path": local_tarball_path,
"required": True,
},
{
"url": [
tarball_name(concrete_spec, ".spec.json.sig"),
@@ -2505,7 +2445,12 @@ def conditional_fetch(self):
response.headers.get("Etag", None) or response.headers.get("etag", None)
)
return FetchIndexResult(etag=etag, hash=computed_hash, data=result, fresh=False)
return FetchIndexResult(
etag=etag,
hash=computed_hash,
data=result,
fresh=False,
)
class EtagIndexFetcher:

View File

@@ -5,7 +5,11 @@
"""Function and classes needed to bootstrap Spack itself."""
from .config import ensure_bootstrap_configuration, is_bootstrapping
from .core import all_core_root_specs, ensure_core_dependencies, ensure_patchelf_in_path_or_raise
from .core import (
all_core_root_specs,
ensure_core_dependencies,
ensure_patchelf_in_path_or_raise,
)
from .environment import BootstrapEnvironment, ensure_environment_dependencies
from .status import status_message

View File

@@ -59,7 +59,10 @@ def _try_import_from_store(module, query_spec, query_info=None):
# to be picked up and used, possibly depending on something in the store, first
# allows the bootstrap version to work when an incompatible version is in
# sys.path
orders = [module_paths + sys.path, sys.path + module_paths]
orders = [
module_paths + sys.path,
sys.path + module_paths,
]
for path in orders:
sys.path = path
try:

View File

@@ -53,7 +53,12 @@
import spack.util.url
import spack.version
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
from ._common import (
_executables_in_store,
_python_import,
_root_spec,
_try_import_from_store,
)
from .config import spack_python_interpreter, spec_for_current_python
#: Name of the file containing metadata about the bootstrapping source
@@ -208,7 +213,7 @@ def _install_and_test(self, abstract_spec, bincache_platform, bincache_data, tes
# This will be None for things that don't depend on python
python_spec = item.get("python", None)
# Skip specs which are not compatible
if not abstract_spec.intersects(candidate_spec):
if not abstract_spec.satisfies(candidate_spec):
continue
if python_spec is not None and python_spec not in abstract_spec:

View File

@@ -171,7 +171,7 @@ def mypy_root_spec():
def black_root_spec():
"""Return the root spec used to bootstrap black"""
return _root_spec("py-black@:23.1.0")
return _root_spec("py-black")
def flake8_root_spec():

View File

@@ -69,13 +69,13 @@
from spack.installer import InstallError
from spack.util.cpus import cpus_available
from spack.util.environment import (
SYSTEM_DIRS,
EnvironmentModifications,
env_flag,
filter_system_paths,
get_path,
inspect_path,
is_system_path,
system_dirs,
validate,
)
from spack.util.executable import Executable
@@ -397,7 +397,7 @@ def set_compiler_environment_variables(pkg, env):
env.set("SPACK_COMPILER_SPEC", str(spec.compiler))
env.set("SPACK_SYSTEM_DIRS", ":".join(SYSTEM_DIRS))
env.set("SPACK_SYSTEM_DIRS", ":".join(system_dirs))
compiler.setup_custom_environment(pkg, env)
@@ -485,13 +485,7 @@ def update_compiler_args_for_dep(dep):
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
# In some circumstances (particularly for externals) finding
# libraries packages can be time consuming, so indicate that
# we are performing this operation (and also report when it
# finishes).
tty.debug("Collecting libraries for {0}".format(dep.name))
dep_link_dirs.extend(query.libs.directories)
tty.debug("Libraries for {0} have been collected.".format(dep.name))
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
@@ -778,9 +772,7 @@ def setup_package(pkg, dirty, context="build"):
set_compiler_environment_variables(pkg, env_mods)
set_wrapper_variables(pkg, env_mods)
tty.debug("setup_package: grabbing modifications from dependencies")
env_mods.extend(modifications_from_dependencies(pkg.spec, context, custom_mods_only=False))
tty.debug("setup_package: collected all modifications from dependencies")
# architecture specific setup
platform = spack.platforms.by_name(pkg.spec.architecture.platform)
@@ -788,7 +780,6 @@ def setup_package(pkg, dirty, context="build"):
platform.setup_platform_environment(pkg, env_mods)
if context == "build":
tty.debug("setup_package: setup build environment for root")
builder = spack.builder.create(pkg)
builder.setup_build_environment(env_mods)
@@ -799,7 +790,6 @@ def setup_package(pkg, dirty, context="build"):
" includes and omit it when invoked with '--cflags'."
)
elif context == "test":
tty.debug("setup_package: setup test environment for root")
env_mods.extend(
inspect_path(
pkg.spec.prefix,
@@ -816,7 +806,6 @@ def setup_package(pkg, dirty, context="build"):
# Load modules on an already clean environment, just before applying Spack's
# own environment modifications. This ensures Spack controls CC/CXX/... variables.
if need_compiler:
tty.debug("setup_package: loading compiler modules")
for mod in pkg.compiler.modules:
load_module(mod)
@@ -954,7 +943,6 @@ def default_modifications_for_dep(dep):
_make_runnable(dep, env)
def add_modifications_for_dep(dep):
tty.debug("Adding env modifications for {0}".format(dep.name))
# Some callers of this function only want the custom modifications.
# For callers that want both custom and default modifications, we want
# to perform the default modifications here (this groups custom
@@ -980,7 +968,6 @@ def add_modifications_for_dep(dep):
builder.setup_dependent_build_environment(env, spec)
else:
dpkg.setup_dependent_run_environment(env, spec)
tty.debug("Added env modifications for {0}".format(dep.name))
# Note that we want to perform environment modifications in a fixed order.
# The Spec.traverse method provides this: i.e. in addition to
@@ -1029,6 +1016,7 @@ def get_cmake_prefix_path(pkg):
def _setup_pkg_and_run(
serialized_pkg, function, kwargs, child_pipe, input_multiprocess_fd, jsfd1, jsfd2
):
context = kwargs.get("context", "build")
try:

View File

@@ -110,7 +110,11 @@ class AutotoolsBuilder(BaseBuilder):
phases = ("autoreconf", "configure", "build", "install")
#: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "check", "installcheck")
legacy_methods = (
"configure_args",
"check",
"installcheck",
)
#: Names associated with package attributes in the old build-system format
legacy_attributes = (

View File

@@ -31,6 +31,7 @@ def cmake_cache_option(name, boolean_value, comment=""):
class CachedCMakeBuilder(CMakeBuilder):
#: Phases of a Cached CMake package
#: Note: the initconfig phase is used for developer builds as a final phase to stop on
phases: Tuple[str, ...] = ("initconfig", "cmake", "build", "install")

View File

@@ -8,7 +8,7 @@
import platform
import re
import sys
from typing import List, Optional, Tuple
from typing import List, Tuple
import llnl.util.filesystem as fs
@@ -16,7 +16,7 @@
import spack.builder
import spack.package_base
import spack.util.path
from spack.directives import build_system, conflicts, depends_on, variant
from spack.directives import build_system, depends_on, variant
from spack.multimethod import when
from ._checks import BaseBuilder, execute_build_time_tests
@@ -35,43 +35,6 @@ def _extract_primary_generator(generator):
return primary_generator
def generator(*names: str, default: Optional[str] = None):
"""The build system generator to use.
See ``cmake --help`` for a list of valid generators.
Currently, "Unix Makefiles" and "Ninja" are the only generators
that Spack supports. Defaults to "Unix Makefiles".
See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
for more information.
Args:
names: allowed generators for this package
default: default generator
"""
allowed_values = ("make", "ninja")
if any(x not in allowed_values for x in names):
msg = "only 'make' and 'ninja' are allowed for CMake's 'generator' directive"
raise ValueError(msg)
default = default or names[0]
not_used = [x for x in allowed_values if x not in names]
def _values(x):
return x in allowed_values
_values.__doc__ = f"{','.join(names)}"
variant(
"generator",
default=default,
values=_values,
description="the build system generator to use",
)
for x in not_used:
conflicts(f"generator={x}")
class CMakePackage(spack.package_base.PackageBase):
"""Specialized class for packages built using CMake
@@ -104,15 +67,8 @@ class CMakePackage(spack.package_base.PackageBase):
when="^cmake@3.9:",
description="CMake interprocedural optimization",
)
if sys.platform == "win32":
generator("ninja")
else:
generator("ninja", "make", default="make")
depends_on("cmake", type="build")
depends_on("gmake", type="build", when="generator=make")
depends_on("ninja", type="build", when="generator=ninja")
depends_on("ninja", type="build", when="platform=windows")
def flags_to_build_system_args(self, flags):
"""Return a list of all command line arguments to pass the specified
@@ -182,6 +138,18 @@ class CMakeBuilder(BaseBuilder):
| :py:meth:`~.CMakeBuilder.build_directory` | Directory where to |
| | build the package |
+-----------------------------------------------+--------------------+
The generator used by CMake can be specified by providing the ``generator``
attribute. Per
https://cmake.org/cmake/help/git-master/manual/cmake-generators.7.html,
the format is: [<secondary-generator> - ]<primary_generator>.
The full list of primary and secondary generators supported by CMake may be found
in the documentation for the version of CMake used; however, at this time Spack
supports only the primary generators "Unix Makefiles" and "Ninja." Spack's CMake
support is agnostic with respect to primary generators. Spack will generate a
runtime error if the generator string does not follow the prescribed format, or if
the primary generator is not supported.
"""
#: Phases of a CMake package
@@ -192,6 +160,7 @@ class CMakeBuilder(BaseBuilder):
#: Names associated with package attributes in the old build-system format
legacy_attributes: Tuple[str, ...] = (
"generator",
"build_targets",
"install_targets",
"build_time_test_callbacks",
@@ -202,6 +171,16 @@ class CMakeBuilder(BaseBuilder):
"build_directory",
)
#: The build system generator to use.
#:
#: See ``cmake --help`` for a list of valid generators.
#: Currently, "Unix Makefiles" and "Ninja" are the only generators
#: that Spack supports. Defaults to "Unix Makefiles".
#:
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = "Ninja" if sys.platform == "win32" else "Unix Makefiles"
#: Targets to be used during the build phase
build_targets: List[str] = []
#: Targets to be used during the install phase
@@ -223,20 +202,12 @@ def root_cmakelists_dir(self):
"""
return self.pkg.stage.source_path
@property
def generator(self):
if self.spec.satisfies("generator=make"):
return "Unix Makefiles"
if self.spec.satisfies("generator=ninja"):
return "Ninja"
msg = f'{self.spec.format()} has an unsupported value for the "generator" variant'
raise ValueError(msg)
@property
def std_cmake_args(self):
"""Standard cmake arguments provided as a property for
convenience of package writers
"""
# standard CMake arguments
std_cmake_args = CMakeBuilder.std_args(self.pkg, generator=self.generator)
std_cmake_args += getattr(self.pkg, "cmake_flag_args", [])
return std_cmake_args
@@ -281,7 +252,10 @@ def std_args(pkg, generator=None):
if platform.mac_ver()[0]:
args.extend(
[define("CMAKE_FIND_FRAMEWORK", "LAST"), define("CMAKE_FIND_APPBUNDLE", "LAST")]
[
define("CMAKE_FIND_FRAMEWORK", "LAST"),
define("CMAKE_FIND_APPBUNDLE", "LAST"),
]
)
# Set up CMake rpath

View File

@@ -8,7 +8,7 @@
import spack.directives
import spack.package_base
from ._checks import BaseBuilder, apply_macos_rpath_fixups, execute_install_time_tests
from ._checks import BaseBuilder, apply_macos_rpath_fixups
class Package(spack.package_base.PackageBase):
@@ -38,13 +38,7 @@ class GenericBuilder(BaseBuilder):
legacy_methods: Tuple[str, ...] = ()
#: Names associated with package attributes in the old build-system format
legacy_attributes: Tuple[str, ...] = ("archive_files", "install_time_test_callbacks")
#: Callback names for post-install phase tests
install_time_test_callbacks = []
legacy_attributes: Tuple[str, ...] = ("archive_files",)
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
spack.builder.run_after("install", when="platform=darwin")(apply_macos_rpath_fixups)
# unconditionally perform any post-install phase tests
spack.builder.run_after("install")(execute_install_time_tests)

View File

@@ -857,7 +857,10 @@ def scalapack_libs(self):
raise_lib_error("Cannot find a BLACS library for the given MPI.")
int_suff = "_" + self.intel64_int_suffix
scalapack_libnames = ["libmkl_scalapack" + int_suff, blacs_lib + int_suff]
scalapack_libnames = [
"libmkl_scalapack" + int_suff,
blacs_lib + int_suff,
]
sca_libs = find_libraries(
scalapack_libnames, root=self.component_lib_dir("mkl"), shared=("+shared" in self.spec)
)
@@ -1158,7 +1161,9 @@ def _determine_license_type(self):
#
# Ideally, we just tell the installer to look around on the system.
# Thankfully, we neither need to care nor emulate where it looks:
license_type = {"ACTIVATION_TYPE": "exist_lic"}
license_type = {
"ACTIVATION_TYPE": "exist_lic",
}
# However (and only), if the spack-internal Intel license file has been
# populated beyond its templated explanatory comments, proffer it to

View File

@@ -68,7 +68,10 @@ def unpack(self, pkg, spec, prefix):
@staticmethod
def _generate_tree_line(name, prefix):
return """{{ name = "{name}", root = "{prefix}" }};""".format(name=name, prefix=prefix)
return """{{ name = "{name}", root = "{prefix}" }};""".format(
name=name,
prefix=prefix,
)
def generate_luarocks_config(self, pkg, spec, prefix):
spec = self.pkg.spec

View File

@@ -120,7 +120,6 @@ def std_meson_args(self):
of package writers.
"""
# standard Meson arguments
std_meson_args = MesonBuilder.std_args(self.pkg)
std_meson_args += getattr(self, "meson_flag_args", [])
return std_meson_args
@@ -183,10 +182,7 @@ def meson_args(self):
def meson(self, pkg, spec, prefix):
"""Run ``meson`` in the build directory"""
options = []
if self.spec["meson"].satisfies("@0.64:"):
options.append("setup")
options.append(os.path.abspath(self.root_mesonlists_dir))
options = [os.path.abspath(self.root_mesonlists_dir)]
options += self.std_meson_args
options += self.meson_args()
with fs.working_dir(self.build_directory, create=True):

View File

@@ -77,7 +77,7 @@ def toolchain_version(self):
Override this method to select a specific version of the toolchain or change
selection heuristics.
Default is whatever version of msvc has been selected by concretization"""
return "v" + self.pkg.compiler.platform_toolset_ver
return self.compiler.msvc_version
@property
def std_msbuild_args(self):

View File

@@ -92,7 +92,7 @@ def makefile_root(self):
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
"""
return self.stage.source_path
return self.stage.source_dir
@property
def nmakefile_name(self):

View File

@@ -37,7 +37,11 @@ class IntelOneApiPackage(Package):
conflicts(c, msg="This package in only available for x86_64 and Linux")
# Add variant to toggle environment modifications from vars.sh
variant("envmods", default=True, description="Toggles environment modifications")
variant(
"envmods",
default=True,
description="Toggles environment modifications",
)
@staticmethod
def update_description(cls):

View File

@@ -21,7 +21,7 @@
import spack.package_base
import spack.spec
import spack.store
from spack.directives import build_system, depends_on, extends, maintainers
from spack.directives import build_system, depends_on, extends
from spack.error import NoHeadersError, NoLibrariesError, SpecError
from spack.version import Version
@@ -29,7 +29,7 @@
class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart", "pradyunsg")
maintainers = ["adamjstewart"]
@property
def import_modules(self):
@@ -113,9 +113,6 @@ def view_file_conflicts(self, view, merge_map):
return conflicts
def add_files_to_view(self, view, merge_map, skip_if_exists=True):
if not self.extendee_spec:
return super().add_files_to_view(view, merge_map, skip_if_exists)
bin_dir = self.spec.prefix.bin
python_prefix = self.extendee_spec.prefix
python_is_external = self.extendee_spec.external
@@ -187,6 +184,8 @@ class PythonPackage(PythonExtension):
#: Package name, version, and extension on PyPI
pypi: Optional[str] = None
maintainers = ["adamjstewart", "pradyunsg"]
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = "PythonPackage"
@@ -268,7 +267,7 @@ def update_external_dependencies(self, extendee_spec=None):
python.external_path = self.spec.external_path
python._mark_concrete()
self.spec.add_dependency_edge(python, deptypes=("build", "link", "run"))
self.spec.add_dependency_edge(python, ("build", "link", "run"))
def get_external_python_for_prefix(self):
"""

View File

@@ -7,7 +7,7 @@
import llnl.util.lang as lang
from spack.directives import extends, maintainers
from spack.directives import extends
from .generic import GenericBuilder, Package
@@ -71,7 +71,7 @@ class RPackage(Package):
GenericBuilder = RBuilder
maintainers("glennpj")
maintainers = ["glennpj"]
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -11,7 +11,7 @@
import spack.builder
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs
from spack.directives import build_system, extends, maintainers
from spack.directives import build_system, extends
from spack.package_base import PackageBase
from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError
@@ -23,7 +23,7 @@ class RacketPackage(PackageBase):
"""
#: Package name, version, and extension on PyPI
maintainers("elfprince13")
maintainers = ["elfprince13"]
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = "RacketPackage"

View File

@@ -138,7 +138,7 @@ class ROCmPackage(PackageBase):
depends_on("llvm-amdgpu", when="+rocm")
depends_on("hsa-rocr-dev", when="+rocm")
depends_on("hip +rocm", when="+rocm")
depends_on("hip", when="+rocm")
conflicts("^blt@:0.3.6", when="+rocm")

View File

@@ -7,7 +7,7 @@
import spack.builder
import spack.package_base
from spack.directives import build_system, extends, maintainers
from spack.directives import build_system, extends
from ._checks import BaseBuilder
@@ -15,7 +15,7 @@
class RubyPackage(spack.package_base.PackageBase):
"""Specialized class for building Ruby gems."""
maintainers("Kerilk")
maintainers = ["Kerilk"]
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -61,7 +61,10 @@ def import_modules(self):
list: list of strings of module names
"""
modules = []
root = os.path.join(self.prefix, self.spec["python"].package.platlib)
root = os.path.join(
self.prefix,
self.spec["python"].package.platlib,
)
# Some Python libraries are packages: collections of modules
# distributed in directories containing __init__.py files

View File

@@ -38,12 +38,13 @@
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
import spack.util.web as web_util
from spack import traverse
from spack.error import SpackError
from spack.reporters import CDash, CDashConfiguration
from spack.reporters.cdash import build_stamp as cdash_build_stamp
JOB_RETRY_CONDITIONS = ["always"]
JOB_RETRY_CONDITIONS = [
"always",
]
TEMP_STORAGE_MIRROR_NAME = "ci_temporary_mirror"
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
@@ -128,7 +129,10 @@ def _remove_reserved_tags(tags):
def _get_spec_string(spec):
format_elements = ["{name}{@version}", "{%compiler}"]
format_elements = [
"{name}{@version}",
"{%compiler}",
]
if spec.architecture:
format_elements.append(" {arch=architecture}")
@@ -324,7 +328,12 @@ def _compute_spec_deps(spec_list, check_index_only=False, mirrors_to_check=None)
dependencies = []
def append_dep(s, d):
dependencies.append({"spec": s, "depends": d})
dependencies.append(
{
"spec": s,
"depends": d,
}
)
for spec in spec_list:
for s in spec.traverse(deptype=all):
@@ -337,7 +346,10 @@ def append_dep(s, d):
)
skey = _spec_deps_key(s)
spec_labels[skey] = {"spec": s, "needs_rebuild": not up_to_date_mirrors}
spec_labels[skey] = {
"spec": s,
"needs_rebuild": not up_to_date_mirrors,
}
for d in s.dependencies(deptype=all):
dkey = _spec_deps_key(d)
@@ -356,13 +368,76 @@ def append_dep(s, d):
}
)
deps_json_obj = {"specs": specs, "dependencies": dependencies}
deps_json_obj = {
"specs": specs,
"dependencies": dependencies,
}
return deps_json_obj
def _spec_matches(spec, match_string):
return spec.intersects(match_string)
return spec.satisfies(match_string)
def _remove_attributes(src_dict, dest_dict):
if "tags" in src_dict and "tags" in dest_dict:
# For 'tags', we remove any tags that are listed for removal
for tag in src_dict["tags"]:
while tag in dest_dict["tags"]:
dest_dict["tags"].remove(tag)
def _copy_attributes(attrs_list, src_dict, dest_dict):
for runner_attr in attrs_list:
if runner_attr in src_dict:
if runner_attr in dest_dict and runner_attr == "tags":
# For 'tags', we combine the lists of tags, while
# avoiding duplicates
for tag in src_dict[runner_attr]:
if tag not in dest_dict[runner_attr]:
dest_dict[runner_attr].append(tag)
elif runner_attr in dest_dict and runner_attr == "variables":
# For 'variables', we merge the dictionaries. Any conflicts
# (i.e. 'runner-attributes' has same variable key as the
# higher level) we resolve by keeping the more specific
# 'runner-attributes' version.
for src_key, src_val in src_dict[runner_attr].items():
dest_dict[runner_attr][src_key] = copy.deepcopy(src_dict[runner_attr][src_key])
else:
dest_dict[runner_attr] = copy.deepcopy(src_dict[runner_attr])
def _find_matching_config(spec, gitlab_ci):
runner_attributes = {}
overridable_attrs = [
"image",
"tags",
"variables",
"before_script",
"script",
"after_script",
]
_copy_attributes(overridable_attrs, gitlab_ci, runner_attributes)
matched = False
only_first = gitlab_ci.get("match_behavior", "first") == "first"
for ci_mapping in gitlab_ci["mappings"]:
for match_string in ci_mapping["match"]:
if _spec_matches(spec, match_string):
matched = True
if "remove-attributes" in ci_mapping:
_remove_attributes(ci_mapping["remove-attributes"], runner_attributes)
if "runner-attributes" in ci_mapping:
_copy_attributes(
overridable_attrs, ci_mapping["runner-attributes"], runner_attributes
)
break
if matched and only_first:
break
return runner_attributes if matched else None
def _format_job_needs(
@@ -438,28 +513,16 @@ def compute_affected_packages(rev1="HEAD^", rev2="HEAD"):
return spack.repo.get_all_package_diffs("ARC", rev1=rev1, rev2=rev2)
def get_spec_filter_list(env, affected_pkgs, dependent_traverse_depth=None):
def get_spec_filter_list(env, affected_pkgs):
"""Given a list of package names and an active/concretized
environment, return the set of all concrete specs from the
environment that could have been affected by changing the
list of packages.
If a ``dependent_traverse_depth`` is given, it is used to limit
upward (in the parent direction) traversal of specs of touched
packages. E.g. if 1 is provided, then only direct dependents
of touched package specs are traversed to produce specs that
could have been affected by changing the package, while if 0 is
provided, only the changed specs themselves are traversed. If ``None``
is given, upward traversal of touched package specs is done all
the way to the environment roots. Providing a negative number
results in no traversals at all, yielding an empty set.
Arguments:
env (spack.environment.Environment): Active concrete environment
affected_pkgs (List[str]): Affected package names
dependent_traverse_depth: Optional integer to limit dependent
traversal, or None to disable the limit.
Returns:
@@ -472,237 +535,17 @@ def get_spec_filter_list(env, affected_pkgs, dependent_traverse_depth=None):
tty.debug("All concrete environment specs:")
for s in all_concrete_specs:
tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
affected_pkgs = frozenset(affected_pkgs)
env_matches = [s for s in all_concrete_specs if s.name in affected_pkgs]
env_matches = [s for s in all_concrete_specs if s.name in frozenset(affected_pkgs)]
visited = set()
dag_hash = lambda s: s.dag_hash()
for depth, parent in traverse.traverse_nodes(
env_matches, direction="parents", key=dag_hash, depth=True, order="breadth"
):
if dependent_traverse_depth is not None and depth > dependent_traverse_depth:
break
affected_specs.update(parent.traverse(direction="children", visited=visited, key=dag_hash))
for match in env_matches:
for parent in match.traverse(direction="parents", key=dag_hash):
affected_specs.update(
parent.traverse(direction="children", visited=visited, key=dag_hash)
)
return affected_specs
def _build_jobs(phases, staged_phases):
for phase in phases:
phase_name = phase["name"]
spec_labels, dependencies, stages = staged_phases[phase_name]
for stage_jobs in stages:
for spec_label in stage_jobs:
spec_record = spec_labels[spec_label]
release_spec = spec_record["spec"]
release_spec_dag_hash = release_spec.dag_hash()
yield release_spec, release_spec_dag_hash
def _noop(x):
return x
def _unpack_script(script_section, op=_noop):
script = []
for cmd in script_section:
if isinstance(cmd, list):
for subcmd in cmd:
script.append(op(subcmd))
else:
script.append(op(cmd))
return script
class SpackCI:
"""Spack CI object used to generate intermediate representation
used by the CI generator(s).
"""
def __init__(self, ci_config, phases, staged_phases):
"""Given the information from the ci section of the config
and the job phases setup meta data needed for generating Spack
CI IR.
"""
self.ci_config = ci_config
self.named_jobs = ["any", "build", "cleanup", "noop", "reindex", "signing"]
self.ir = {
"jobs": {},
"temporary-storage-url-prefix": self.ci_config.get(
"temporary-storage-url-prefix", None
),
"enable-artifacts-buildcache": self.ci_config.get(
"enable-artifacts-buildcache", False
),
"bootstrap": self.ci_config.get(
"bootstrap", []
), # This is deprecated and should be removed
"rebuild-index": self.ci_config.get("rebuild-index", True),
"broken-specs-url": self.ci_config.get("broken-specs-url", None),
"broken-tests-packages": self.ci_config.get("broken-tests-packages", []),
"target": self.ci_config.get("target", "gitlab"),
}
jobs = self.ir["jobs"]
for spec, dag_hash in _build_jobs(phases, staged_phases):
jobs[dag_hash] = self.__init_job(spec)
for name in self.named_jobs:
# Skip the special named jobs
if name not in ["any", "build"]:
jobs[name] = self.__init_job("")
def __init_job(self, spec):
"""Initialize job object"""
return {"spec": spec, "attributes": {}}
def __is_named(self, section):
"""Check if a pipeline-gen configuration section is for a named job,
and if so return the name otherwise return none.
"""
for _name in self.named_jobs:
keys = ["{0}-job".format(_name), "{0}-job-remove".format(_name)]
if any([key for key in keys if key in section]):
return _name
return None
@staticmethod
def __job_name(name, suffix=""):
"""Compute the name of a named job with appropriate suffix.
Valid suffixes are either '-remove' or empty string or None
"""
assert type(name) == str
jname = name
if suffix:
jname = "{0}-job{1}".format(name, suffix)
else:
jname = "{0}-job".format(name)
return jname
def __apply_submapping(self, dest, spec, section):
"""Apply submapping setion to the IR dict"""
matched = False
only_first = section.get("match_behavior", "first") == "first"
for match_attrs in reversed(section["submapping"]):
attrs = cfg.InternalConfigScope._process_dict_keyname_overrides(match_attrs)
for match_string in match_attrs["match"]:
if _spec_matches(spec, match_string):
matched = True
if "build-job-remove" in match_attrs:
spack.config.remove_yaml(dest, attrs["build-job-remove"])
if "build-job" in match_attrs:
spack.config.merge_yaml(dest, attrs["build-job"])
break
if matched and only_first:
break
return dest
# Generate IR from the configs
def generate_ir(self):
"""Generate the IR from the Spack CI configurations."""
jobs = self.ir["jobs"]
# Implicit job defaults
defaults = [
{
"build-job": {
"script": [
"cd {env_dir}",
"spack env activate --without-view .",
"spack ci rebuild",
]
}
},
{"noop-job": {"script": ['echo "All specs already up to date, nothing to rebuild."']}},
]
# Job overrides
overrides = [
# Reindex script
{
"reindex-job": {
"script:": [
"spack buildcache update-index --keys --mirror-url {index_target_mirror}"
]
}
},
# Cleanup script
{
"cleanup-job": {
"script:": [
"spack -d mirror destroy --mirror-url {mirror_prefix}/$CI_PIPELINE_ID"
]
}
},
# Add signing job tags
{"signing-job": {"tags": ["aws", "protected", "notary"]}},
# Remove reserved tags
{"any-job-remove": {"tags": SPACK_RESERVED_TAGS}},
]
pipeline_gen = overrides + self.ci_config.get("pipeline-gen", []) + defaults
for section in reversed(pipeline_gen):
name = self.__is_named(section)
has_submapping = "submapping" in section
section = cfg.InternalConfigScope._process_dict_keyname_overrides(section)
if name:
remove_job_name = self.__job_name(name, suffix="-remove")
merge_job_name = self.__job_name(name)
do_remove = remove_job_name in section
do_merge = merge_job_name in section
def _apply_section(dest, src):
if do_remove:
dest = spack.config.remove_yaml(dest, src[remove_job_name])
if do_merge:
dest = copy.copy(spack.config.merge_yaml(dest, src[merge_job_name]))
if name == "build":
# Apply attributes to all build jobs
for _, job in jobs.items():
if job["spec"]:
_apply_section(job["attributes"], section)
elif name == "any":
# Apply section attributes too all jobs
for _, job in jobs.items():
_apply_section(job["attributes"], section)
else:
# Create a signing job if there is script and the job hasn't
# been initialized yet
if name == "signing" and name not in jobs:
if "signing-job" in section:
if "script" not in section["signing-job"]:
continue
else:
jobs[name] = self.__init_job("")
# Apply attributes to named job
_apply_section(jobs[name]["attributes"], section)
elif has_submapping:
# Apply section jobs with specs to match
for _, job in jobs.items():
if job["spec"]:
job["attributes"] = self.__apply_submapping(
job["attributes"], job["spec"], section
)
for _, job in jobs.items():
if job["spec"]:
job["spec"] = job["spec"].name
return self.ir
def generate_gitlab_ci_yaml(
env,
print_summary,
@@ -752,32 +595,14 @@ def generate_gitlab_ci_yaml(
yaml_root = ev.config_dict(env.yaml)
# Get the joined "ci" config with all of the current scopes resolved
ci_config = cfg.get("ci")
if "gitlab-ci" not in yaml_root:
tty.die('Environment yaml does not have "gitlab-ci" section')
if not ci_config:
tty.die('Environment yaml does not have "ci" section')
gitlab_ci = yaml_root["gitlab-ci"]
# Default target is gitlab...and only target is gitlab
if "target" in ci_config and ci_config["target"] != "gitlab":
tty.die('Spack CI module only generates target "gitlab"')
cdash_config = cfg.get("cdash")
cdash_handler = CDashHandler(cdash_config) if "build-group" in cdash_config else None
cdash_handler = CDashHandler(yaml_root.get("cdash")) if "cdash" in yaml_root else None
build_group = cdash_handler.build_group if cdash_handler else None
dependent_depth = os.environ.get("SPACK_PRUNE_UNTOUCHED_DEPENDENT_DEPTH", None)
if dependent_depth is not None:
try:
dependent_depth = int(dependent_depth)
except (TypeError, ValueError):
tty.warn(
f"Unrecognized value ({dependent_depth}) "
"provided for SPACK_PRUNE_UNTOUCHED_DEPENDENT_DEPTH, "
"ignoring it."
)
dependent_depth = None
prune_untouched_packages = False
spack_prune_untouched = os.environ.get("SPACK_PRUNE_UNTOUCHED", None)
if spack_prune_untouched is not None and spack_prune_untouched.lower() == "true":
@@ -793,9 +618,7 @@ def generate_gitlab_ci_yaml(
tty.debug("affected pkgs:")
for p in affected_pkgs:
tty.debug(" {0}".format(p))
affected_specs = get_spec_filter_list(
env, affected_pkgs, dependent_traverse_depth=dependent_depth
)
affected_specs = get_spec_filter_list(env, affected_pkgs)
tty.debug("all affected specs:")
for s in affected_specs:
tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
@@ -837,39 +660,53 @@ def generate_gitlab_ci_yaml(
# trying to build.
broken_specs_url = ""
known_broken_specs_encountered = []
if "broken-specs-url" in ci_config:
broken_specs_url = ci_config["broken-specs-url"]
if "broken-specs-url" in gitlab_ci:
broken_specs_url = gitlab_ci["broken-specs-url"]
enable_artifacts_buildcache = False
if "enable-artifacts-buildcache" in ci_config:
enable_artifacts_buildcache = ci_config["enable-artifacts-buildcache"]
if "enable-artifacts-buildcache" in gitlab_ci:
enable_artifacts_buildcache = gitlab_ci["enable-artifacts-buildcache"]
rebuild_index_enabled = True
if "rebuild-index" in ci_config and ci_config["rebuild-index"] is False:
if "rebuild-index" in gitlab_ci and gitlab_ci["rebuild-index"] is False:
rebuild_index_enabled = False
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
if "temporary-storage-url-prefix" in gitlab_ci:
temp_storage_url_prefix = gitlab_ci["temporary-storage-url-prefix"]
bootstrap_specs = []
phases = []
if "bootstrap" in ci_config:
for phase in ci_config["bootstrap"]:
if "bootstrap" in gitlab_ci:
for phase in gitlab_ci["bootstrap"]:
try:
phase_name = phase.get("name")
strip_compilers = phase.get("compiler-agnostic")
except AttributeError:
phase_name = phase
strip_compilers = False
phases.append({"name": phase_name, "strip-compilers": strip_compilers})
phases.append(
{
"name": phase_name,
"strip-compilers": strip_compilers,
}
)
for bs in env.spec_lists[phase_name]:
bootstrap_specs.append(
{"spec": bs, "phase-name": phase_name, "strip-compilers": strip_compilers}
{
"spec": bs,
"phase-name": phase_name,
"strip-compilers": strip_compilers,
}
)
phases.append({"name": "specs", "strip-compilers": False})
phases.append(
{
"name": "specs",
"strip-compilers": False,
}
)
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
@@ -920,27 +757,6 @@ def generate_gitlab_ci_yaml(
shutil.copyfile(env.manifest_path, os.path.join(concrete_env_dir, "spack.yaml"))
shutil.copyfile(env.lock_path, os.path.join(concrete_env_dir, "spack.lock"))
with open(env.manifest_path, "r") as env_fd:
env_yaml_root = syaml.load(env_fd)
# Add config scopes to environment
env_includes = env_yaml_root["spack"].get("include", [])
cli_scopes = [
os.path.abspath(s.path)
for s in cfg.scopes().values()
if type(s) == cfg.ImmutableConfigScope
and s.path not in env_includes
and os.path.exists(s.path)
]
include_scopes = []
for scope in cli_scopes:
if scope not in include_scopes and scope not in env_includes:
include_scopes.insert(0, scope)
env_includes.extend(include_scopes)
env_yaml_root["spack"]["include"] = env_includes
with open(os.path.join(concrete_env_dir, "spack.yaml"), "w") as fd:
fd.write(syaml.dump_config(env_yaml_root, default_flow_style=False))
job_log_dir = os.path.join(pipeline_artifacts_dir, "logs")
job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction")
job_test_dir = os.path.join(pipeline_artifacts_dir, "tests")
@@ -952,7 +768,7 @@ def generate_gitlab_ci_yaml(
# generation job and the rebuild jobs. This can happen when gitlab
# checks out the project into a runner-specific directory, for example,
# and different runners are picked for generate and rebuild jobs.
ci_project_dir = os.environ.get("CI_PROJECT_DIR", os.getcwd())
ci_project_dir = os.environ.get("CI_PROJECT_DIR")
rel_artifacts_root = os.path.relpath(pipeline_artifacts_dir, ci_project_dir)
rel_concrete_env_dir = os.path.relpath(concrete_env_dir, ci_project_dir)
rel_job_log_dir = os.path.relpath(job_log_dir, ci_project_dir)
@@ -966,7 +782,7 @@ def generate_gitlab_ci_yaml(
try:
bindist.binary_index.update()
except bindist.FetchCacheError as e:
tty.warn(e)
tty.error(e)
staged_phases = {}
try:
@@ -1023,9 +839,6 @@ def generate_gitlab_ci_yaml(
else:
broken_spec_urls = web_util.list_url(broken_specs_url)
spack_ci = SpackCI(ci_config, phases, staged_phases)
spack_ci_ir = spack_ci.generate_ir()
before_script, after_script = None, None
for phase in phases:
phase_name = phase["name"]
@@ -1053,7 +866,7 @@ def generate_gitlab_ci_yaml(
spec_record["needs_rebuild"] = False
continue
runner_attribs = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"]
runner_attribs = _find_matching_config(release_spec, gitlab_ci)
if not runner_attribs:
tty.warn("No match found for {0}, skipping it".format(release_spec))
@@ -1084,21 +897,23 @@ def generate_gitlab_ci_yaml(
except AttributeError:
image_name = build_image
if "script" not in runner_attribs:
raise AttributeError
job_script = ["spack env activate --without-view ."]
def main_script_replacements(cmd):
return cmd.replace("{env_dir}", concrete_env_dir)
if artifacts_root:
job_script.insert(0, "cd {0}".format(concrete_env_dir))
job_script = _unpack_script(runner_attribs["script"], op=main_script_replacements)
job_script.extend(["spack ci rebuild"])
if "script" in runner_attribs:
job_script = [s for s in runner_attribs["script"]]
before_script = None
if "before_script" in runner_attribs:
before_script = _unpack_script(runner_attribs["before_script"])
before_script = [s for s in runner_attribs["before_script"]]
after_script = None
if "after_script" in runner_attribs:
after_script = _unpack_script(runner_attribs["after_script"])
after_script = [s for s in runner_attribs["after_script"]]
osname = str(release_spec.architecture)
job_name = get_job_name(
@@ -1160,7 +975,7 @@ def main_script_replacements(cmd):
bs_arch = c_spec.architecture
bs_arch_family = bs_arch.target.microarchitecture.family
if (
c_spec.intersects(compiler_pkg_spec)
c_spec.satisfies(compiler_pkg_spec)
and bs_arch_family == spec_arch_family
):
# We found the bootstrap compiler this release spec
@@ -1294,9 +1109,15 @@ def main_script_replacements(cmd):
"variables": variables,
"script": job_script,
"tags": tags,
"artifacts": {"paths": artifact_paths, "when": "always"},
"artifacts": {
"paths": artifact_paths,
"when": "always",
},
"needs": sorted(job_dependencies, key=lambda d: d["job"]),
"retry": {"max": 2, "when": JOB_RETRY_CONDITIONS},
"retry": {
"max": 2,
"when": JOB_RETRY_CONDITIONS,
},
"interruptible": True,
}
@@ -1314,7 +1135,10 @@ def main_script_replacements(cmd):
if image_name:
job_object["image"] = image_name
if image_entry is not None:
job_object["image"] = {"name": image_name, "entrypoint": image_entry}
job_object["image"] = {
"name": image_name,
"entrypoint": image_entry,
}
output_object[job_name] = job_object
job_id += 1
@@ -1342,9 +1166,26 @@ def main_script_replacements(cmd):
else:
tty.warn("Unable to populate buildgroup without CDash credentials")
service_job_config = None
if "service-job-attributes" in gitlab_ci:
service_job_config = gitlab_ci["service-job-attributes"]
default_attrs = [
"image",
"tags",
"variables",
"before_script",
# 'script',
"after_script",
]
service_job_retries = {
"max": 2,
"when": ["runner_system_failure", "stuck_or_timeout_failure", "script_failure"],
"when": [
"runner_system_failure",
"stuck_or_timeout_failure",
"script_failure",
],
}
if job_id > 0:
@@ -1353,29 +1194,55 @@ def main_script_replacements(cmd):
# schedule a job to clean up the temporary storage location
# associated with this pipeline.
stage_names.append("cleanup-temp-storage")
cleanup_job = copy.deepcopy(spack_ci_ir["jobs"]["cleanup"]["attributes"])
cleanup_job = {}
if service_job_config:
_copy_attributes(default_attrs, service_job_config, cleanup_job)
if "tags" in cleanup_job:
service_tags = _remove_reserved_tags(cleanup_job["tags"])
cleanup_job["tags"] = service_tags
cleanup_job["stage"] = "cleanup-temp-storage"
cleanup_job["script"] = [
"spack -d mirror destroy --mirror-url {0}/$CI_PIPELINE_ID".format(
temp_storage_url_prefix
)
]
cleanup_job["when"] = "always"
cleanup_job["retry"] = service_job_retries
cleanup_job["interruptible"] = True
cleanup_job["script"] = _unpack_script(
cleanup_job["script"],
op=lambda cmd: cmd.replace("mirror_prefix", temp_storage_url_prefix),
)
output_object["cleanup"] = cleanup_job
if (
"script" in spack_ci_ir["jobs"]["signing"]["attributes"]
"signing-job-attributes" in gitlab_ci
and spack_pipeline_type == "spack_protected_branch"
):
# External signing: generate a job to check and sign binary pkgs
stage_names.append("stage-sign-pkgs")
signing_job = spack_ci_ir["jobs"]["signing"]["attributes"]
signing_job_config = gitlab_ci["signing-job-attributes"]
signing_job = {}
signing_job["script"] = _unpack_script(signing_job["script"])
signing_job_attrs_to_copy = [
"image",
"tags",
"variables",
"before_script",
"script",
"after_script",
]
_copy_attributes(signing_job_attrs_to_copy, signing_job_config, signing_job)
signing_job_tags = []
if "tags" in signing_job:
signing_job_tags = _remove_reserved_tags(signing_job["tags"])
for tag in ["aws", "protected", "notary"]:
if tag not in signing_job_tags:
signing_job_tags.append(tag)
signing_job["tags"] = signing_job_tags
signing_job["stage"] = "stage-sign-pkgs"
signing_job["when"] = "always"
@@ -1387,17 +1254,23 @@ def main_script_replacements(cmd):
if rebuild_index_enabled:
# Add a final job to regenerate the index
stage_names.append("stage-rebuild-index")
final_job = spack_ci_ir["jobs"]["reindex"]["attributes"]
final_job = {}
if service_job_config:
_copy_attributes(default_attrs, service_job_config, final_job)
if "tags" in final_job:
service_tags = _remove_reserved_tags(final_job["tags"])
final_job["tags"] = service_tags
index_target_mirror = mirror_urls[0]
if remote_mirror_override:
index_target_mirror = remote_mirror_override
final_job["stage"] = "stage-rebuild-index"
final_job["script"] = _unpack_script(
final_job["script"],
op=lambda cmd: cmd.replace("{index_target_mirror}", index_target_mirror),
)
final_job["stage"] = "stage-rebuild-index"
final_job["script"] = [
"spack buildcache update-index --keys --mirror-url {0}".format(index_target_mirror)
]
final_job["when"] = "always"
final_job["retry"] = service_job_retries
final_job["interruptible"] = True
@@ -1478,7 +1351,15 @@ def main_script_replacements(cmd):
else:
# No jobs were generated
tty.debug("No specs to rebuild, generating no-op job")
noop_job = spack_ci_ir["jobs"]["noop"]["attributes"]
noop_job = {}
if service_job_config:
_copy_attributes(default_attrs, service_job_config, noop_job)
if "script" not in noop_job:
noop_job["script"] = [
'echo "All specs already up to date, nothing to rebuild."',
]
noop_job["retry"] = service_job_retries
@@ -1492,7 +1373,7 @@ def main_script_replacements(cmd):
sys.exit(1)
with open(output_file, "w") as outf:
outf.write(syaml.dump(sorted_output, default_flow_style=True))
outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
def _url_encode_string(input_string):
@@ -1608,8 +1489,9 @@ def _push_mirror_contents(env, specfile_path, sign_binaries, mirror_url):
hashes = env.all_hashes() if env else None
matches = spack.store.specfile_matches(specfile_path, hashes=hashes)
push_url = spack.mirror.Mirror.from_url(mirror_url).push_url
spec_kwargs = {"include_root": True, "include_dependencies": False}
kwargs = {"force": True, "allow_root": True, "unsigned": unsigned}
bindist.push(matches, push_url, include_root=True, include_dependencies=False, **kwargs)
bindist.push(matches, push_url, spec_kwargs, **kwargs)
def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
@@ -1675,7 +1557,7 @@ def copy_files_to_artifacts(src, artifacts_dir):
msg = ("Unable to copy files ({0}) to artifacts {1} due to " "exception: {2}").format(
src, artifacts_dir, str(err)
)
tty.warn(msg)
tty.error(msg)
def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
@@ -1738,7 +1620,9 @@ def download_and_extract_artifacts(url, work_dir):
"""
tty.msg("Fetching artifacts from: {0}\n".format(url))
headers = {"Content-Type": "application/zip"}
headers = {
"Content-Type": "application/zip",
}
token = os.environ.get("GITLAB_PRIVATE_TOKEN", None)
if token:
@@ -1895,7 +1779,6 @@ def reproduce_ci_job(url, work_dir):
function is a set of printed instructions for running docker and then
commands to run to reproduce the build once inside the container.
"""
work_dir = os.path.realpath(work_dir)
download_and_extract_artifacts(url, work_dir)
lock_file = fs.find(work_dir, "spack.lock")[0]
@@ -2060,9 +1943,7 @@ def reproduce_ci_job(url, work_dir):
if job_image:
inst_list.append("\nRun the following command:\n\n")
inst_list.append(
" $ docker run --rm --name spack_reproducer -v {0}:{1}:Z -ti {2}\n".format(
work_dir, mount_as_dir, job_image
)
" $ docker run --rm -v {0}:{1} -ti {2}\n".format(work_dir, mount_as_dir, job_image)
)
inst_list.append("\nOnce inside the container:\n\n")
else:
@@ -2113,16 +1994,13 @@ def process_command(name, commands, repro_dir):
# Create a string [command 1] && [command 2] && ... && [command n] with commands
# quoted using double quotes.
args_to_string = lambda args: " ".join('"{}"'.format(arg) for arg in args)
full_command = " \n ".join(map(args_to_string, commands))
full_command = " && ".join(map(args_to_string, commands))
# Write the command to a shell script
script = "{0}.sh".format(name)
with open(script, "w") as fd:
fd.write("#!/bin/sh\n\n")
fd.write("\n# spack {0} command\n".format(name))
fd.write("set -e\n")
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
fd.write("set -x\n")
fd.write(full_command)
fd.write("\n")
@@ -2203,7 +2081,10 @@ def write_broken_spec(url, pkg_name, stack_name, job_url, pipeline_url, spec_dic
with open(file_path, "w") as fd:
fd.write(syaml.dump(broken_spec_details))
web_util.push_to_url(
file_path, url, keep_original=False, extra_args={"ContentType": "text/plain"}
file_path,
url,
keep_original=False,
extra_args={"ContentType": "text/plain"},
)
except Exception as err:
# If there is an S3 error (e.g., access denied or connection
@@ -2281,7 +2162,14 @@ def run_standalone_tests(**kwargs):
tty.error("Reproduction directory is required for stand-alone tests")
return
test_args = ["spack", "--color=always", "--backtrace", "--verbose", "test", "run"]
test_args = [
"spack",
"--color=always",
"--backtrace",
"--verbose",
"test",
"run",
]
if fail_fast:
test_args.append("--fail-fast")
@@ -2431,9 +2319,19 @@ def populate_buildgroup(self, job_names):
opener = build_opener(HTTPHandler)
parent_group_id = self.create_buildgroup(opener, headers, url, self.build_group, "Daily")
parent_group_id = self.create_buildgroup(
opener,
headers,
url,
self.build_group,
"Daily",
)
group_id = self.create_buildgroup(
opener, headers, url, "Latest {0}".format(self.build_group), "Latest"
opener,
headers,
url,
"Latest {0}".format(self.build_group),
"Latest",
)
if not parent_group_id or not group_id:
@@ -2443,9 +2341,13 @@ def populate_buildgroup(self, job_names):
data = {
"dynamiclist": [
{"match": name, "parentgroupid": parent_group_id, "site": self.site}
{
"match": name,
"parentgroupid": parent_group_id,
"site": self.site,
}
for name in job_names
]
],
}
enc_data = json.dumps(data).encode("utf-8")

View File

@@ -43,6 +43,7 @@ def matches(obj, proto):
return all((key in obj and matches(obj[key], val)) for key, val in proto.items())
if isinstance(obj, collections.abc.Sequence) and not isinstance(obj, str):
if not (isinstance(proto, collections.abc.Sequence) and not isinstance(proto, str)):
return False

View File

@@ -161,7 +161,9 @@ class _UnquotedFlags(object):
"""
flags_arg_pattern = re.compile(
r'^({0})=([^\'"].*)$'.format("|".join(spack.spec.FlagMap.valid_compiler_flags()))
r'^({0})=([^\'"].*)$'.format(
"|".join(spack.spec.FlagMap.valid_compiler_flags()),
)
)
def __init__(self, all_unquoted_flag_pairs: List[Tuple[Match[str], str]]):
@@ -225,6 +227,7 @@ def parse_specs(args, **kwargs):
return specs
except spack.error.SpecError as e:
msg = e.message
if e.long_message:
msg += e.long_message

View File

@@ -53,6 +53,7 @@ def packages(parser, args):
def packages_https(parser, args):
# Since packages takes a long time, --all is required without name
if not args.check_all and not args.name:
tty.die("Please specify one or more packages to audit, or --all.")

View File

@@ -5,7 +5,7 @@
import spack.cmd.common.env_utility as env_utility
description = (
"run a command in a spec's install environment, or dump its environment to screen or file"
"run a command in a spec's install environment, " "or dump its environment to screen or file"
)
section = "build"
level = "long"

View File

@@ -103,7 +103,9 @@ def setup_parser(subparser):
help="Regenerate buildcache index after building package(s)",
)
create.add_argument(
"--spec-file", default=None, help="Create buildcache entry for spec from json or yaml file"
"--spec-file",
default=None,
help="Create buildcache entry for spec from json or yaml file",
)
create.add_argument(
"--only",
@@ -400,7 +402,7 @@ def _matching_specs(specs, spec_file):
return spack.store.find(constraints, hashes=hashes)
if env:
return [concrete for _, concrete in env.concretized_specs()]
return [env.specs_by_hash[h] for h in env.concretized_order]
tty.die(
"build cache file creation requires at least one"
@@ -459,6 +461,10 @@ def create_fn(args):
msg = "Pushing binary packages to {0}/build_cache".format(url)
tty.msg(msg)
specs_kwargs = {
"include_root": "package" in args.things_to_install,
"include_dependencies": "dependencies" in args.things_to_install,
}
kwargs = {
"key": args.key,
"force": args.force,
@@ -467,13 +473,7 @@ def create_fn(args):
"allow_root": args.allow_root,
"regenerate_index": args.rebuild_index,
}
bindist.push(
matches,
url,
include_root="package" in args.things_to_install,
include_dependencies="dependencies" in args.things_to_install,
**kwargs,
)
bindist.push(matches, url, specs_kwargs, **kwargs)
def install_fn(args):
@@ -498,11 +498,11 @@ def list_fn(args):
if not args.allarch:
arch = spack.spec.Spec.default_arch()
specs = [s for s in specs if s.intersects(arch)]
specs = [s for s in specs if s.satisfies(arch)]
if args.specs:
constraints = set(args.specs)
specs = [s for s in specs if any(s.intersects(c) for c in constraints)]
specs = [s for s in specs if any(s.satisfies(c) for c in constraints)]
if sys.stdout.isatty():
builds = len(specs)
tty.msg("%s." % plural(builds, "cached build"))

View File

@@ -20,7 +20,9 @@ def setup_parser(subparser):
help="name of the list to remove specs from",
)
subparser.add_argument(
"--match-spec", dest="match_spec", help="if name is ambiguous, supply a spec to match"
"--match-spec",
dest="match_spec",
help="if name is ambiguous, supply a spec to match",
)
subparser.add_argument(
"-a",

View File

@@ -33,6 +33,12 @@ def deindent(desc):
return desc.replace(" ", "")
def get_env_var(variable_name):
if variable_name in os.environ:
return os.environ.get(variable_name)
return None
def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help="CI sub-commands")
@@ -249,9 +255,10 @@ def ci_rebuild(args):
# Make sure the environment is "gitlab-enabled", or else there's nothing
# to do.
ci_config = cfg.get("ci")
if not ci_config:
tty.die("spack ci rebuild requires an env containing ci cfg")
yaml_root = ev.config_dict(env.yaml)
gitlab_ci = yaml_root["gitlab-ci"] if "gitlab-ci" in yaml_root else None
if not gitlab_ci:
tty.die("spack ci rebuild requires an env containing gitlab-ci cfg")
tty.msg(
"SPACK_BUILDCACHE_DESTINATION={0}".format(
@@ -262,27 +269,27 @@ def ci_rebuild(args):
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
pipeline_artifacts_dir = os.environ.get("SPACK_ARTIFACTS_ROOT")
job_log_dir = os.environ.get("SPACK_JOB_LOG_DIR")
job_test_dir = os.environ.get("SPACK_JOB_TEST_DIR")
repro_dir = os.environ.get("SPACK_JOB_REPRO_DIR")
local_mirror_dir = os.environ.get("SPACK_LOCAL_MIRROR_DIR")
concrete_env_dir = os.environ.get("SPACK_CONCRETE_ENV_DIR")
ci_pipeline_id = os.environ.get("CI_PIPELINE_ID")
ci_job_name = os.environ.get("CI_JOB_NAME")
signing_key = os.environ.get("SPACK_SIGNING_KEY")
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
compiler_action = os.environ.get("SPACK_COMPILER_ACTION")
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
pipeline_artifacts_dir = get_env_var("SPACK_ARTIFACTS_ROOT")
job_log_dir = get_env_var("SPACK_JOB_LOG_DIR")
job_test_dir = get_env_var("SPACK_JOB_TEST_DIR")
repro_dir = get_env_var("SPACK_JOB_REPRO_DIR")
local_mirror_dir = get_env_var("SPACK_LOCAL_MIRROR_DIR")
concrete_env_dir = get_env_var("SPACK_CONCRETE_ENV_DIR")
ci_pipeline_id = get_env_var("CI_PIPELINE_ID")
ci_job_name = get_env_var("CI_JOB_NAME")
signing_key = get_env_var("SPACK_SIGNING_KEY")
job_spec_pkg_name = get_env_var("SPACK_JOB_SPEC_PKG_NAME")
job_spec_dag_hash = get_env_var("SPACK_JOB_SPEC_DAG_HASH")
compiler_action = get_env_var("SPACK_COMPILER_ACTION")
spack_pipeline_type = get_env_var("SPACK_PIPELINE_TYPE")
remote_mirror_override = get_env_var("SPACK_REMOTE_MIRROR_OVERRIDE")
remote_mirror_url = get_env_var("SPACK_REMOTE_MIRROR_URL")
spack_ci_stack_name = get_env_var("SPACK_CI_STACK_NAME")
shared_pr_mirror_url = get_env_var("SPACK_CI_SHARED_PR_MIRROR_URL")
rebuild_everything = get_env_var("SPACK_REBUILD_EVERYTHING")
# Construct absolute paths relative to current $CI_PROJECT_DIR
ci_project_dir = os.environ.get("CI_PROJECT_DIR")
ci_project_dir = get_env_var("CI_PROJECT_DIR")
pipeline_artifacts_dir = os.path.join(ci_project_dir, pipeline_artifacts_dir)
job_log_dir = os.path.join(ci_project_dir, job_log_dir)
job_test_dir = os.path.join(ci_project_dir, job_test_dir)
@@ -299,10 +306,8 @@ def ci_rebuild(args):
# Query the environment manifest to find out whether we're reporting to a
# CDash instance, and if so, gather some information from the manifest to
# support that task.
cdash_config = cfg.get("cdash")
cdash_handler = None
if "build-group" in cdash_config:
cdash_handler = spack_ci.CDashHandler(cdash_config)
cdash_handler = spack_ci.CDashHandler(yaml_root.get("cdash")) if "cdash" in yaml_root else None
if cdash_handler:
tty.debug("cdash url = {0}".format(cdash_handler.url))
tty.debug("cdash project = {0}".format(cdash_handler.project))
tty.debug("cdash project_enc = {0}".format(cdash_handler.project_enc))
@@ -335,13 +340,13 @@ def ci_rebuild(args):
pipeline_mirror_url = None
temp_storage_url_prefix = None
if "temporary-storage-url-prefix" in ci_config:
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
if "temporary-storage-url-prefix" in gitlab_ci:
temp_storage_url_prefix = gitlab_ci["temporary-storage-url-prefix"]
pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id)
enable_artifacts_mirror = False
if "enable-artifacts-buildcache" in ci_config:
enable_artifacts_mirror = ci_config["enable-artifacts-buildcache"]
if "enable-artifacts-buildcache" in gitlab_ci:
enable_artifacts_mirror = gitlab_ci["enable-artifacts-buildcache"]
if enable_artifacts_mirror or (
spack_is_pr_pipeline and not enable_artifacts_mirror and not temp_storage_url_prefix
):
@@ -525,28 +530,40 @@ def ci_rebuild(args):
if not verify_binaries:
install_args.append("--no-check-signature")
cdash_args = []
if cdash_handler:
# Add additional arguments to `spack install` for CDash reporting.
cdash_args.extend(cdash_handler.args())
slash_hash = "/{}".format(job_spec.dag_hash())
# Arguments when installing dependencies from cache
deps_install_args = install_args
# Arguments when installing the root from sources
root_install_args = install_args + [
"--keep-stage",
"--only=package",
"--use-buildcache=package:never,dependencies:only",
slash_hash,
]
if cdash_handler:
# Add additional arguments to `spack install` for CDash reporting.
root_install_args.extend(cdash_handler.args())
root_install_args.append(slash_hash)
# ["x", "y"] -> "'x' 'y'"
args_to_string = lambda args: " ".join("'{}'".format(arg) for arg in args)
commands = [
# apparently there's a race when spack bootstraps? do it up front once
[SPACK_COMMAND, "-e", env.path, "bootstrap", "now"],
[
SPACK_COMMAND,
"-e",
env.path,
"bootstrap",
"now",
],
[
SPACK_COMMAND,
"-e",
env.path,
"config",
"add",
"config:db_lock_timeout:120", # 2 minutes for processes to fight for a db lock
],
[
SPACK_COMMAND,
"-e",
@@ -588,8 +605,8 @@ def ci_rebuild(args):
# avoid wasting compute cycles attempting to build those hashes.
if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
tty.debug("Install failed on develop")
if "broken-specs-url" in ci_config:
broken_specs_url = ci_config["broken-specs-url"]
if "broken-specs-url" in gitlab_ci:
broken_specs_url = gitlab_ci["broken-specs-url"]
dev_fail_hash = job_spec.dag_hash()
broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
tty.msg("Reporting broken develop build as: {0}".format(broken_spec_path))
@@ -597,8 +614,8 @@ def ci_rebuild(args):
broken_spec_path,
job_spec_pkg_name,
spack_ci_stack_name,
os.environ.get("CI_JOB_URL"),
os.environ.get("CI_PIPELINE_URL"),
get_env_var("CI_JOB_URL"),
get_env_var("CI_PIPELINE_URL"),
job_spec.to_dict(hash=ht.dag_hash),
)
@@ -610,14 +627,17 @@ def ci_rebuild(args):
# the package, run them and copy the output. Failures of any kind should
# *not* terminate the build process or preclude creating the build cache.
broken_tests = (
"broken-tests-packages" in ci_config
and job_spec.name in ci_config["broken-tests-packages"]
"broken-tests-packages" in gitlab_ci
and job_spec.name in gitlab_ci["broken-tests-packages"]
)
reports_dir = fs.join_path(os.getcwd(), "cdash_report")
if args.tests and broken_tests:
tty.warn("Unable to run stand-alone tests since listed in " "ci's 'broken-tests-packages'")
tty.warn(
"Unable to run stand-alone tests since listed in "
"gitlab-ci's 'broken-tests-packages'"
)
if cdash_handler:
msg = "Package is listed in ci's broken-tests-packages"
msg = "Package is listed in gitlab-ci's broken-tests-packages"
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
cdash_handler.copy_test_results(reports_dir, job_test_dir)
elif args.tests:
@@ -680,8 +700,8 @@ def ci_rebuild(args):
# If this is a develop pipeline, check if the spec that we just built is
# on the broken-specs list. If so, remove it.
if spack_is_develop_pipeline and "broken-specs-url" in ci_config:
broken_specs_url = ci_config["broken-specs-url"]
if spack_is_develop_pipeline and "broken-specs-url" in gitlab_ci:
broken_specs_url = gitlab_ci["broken-specs-url"]
just_built_hash = job_spec.dag_hash()
broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
if web_util.url_exists(broken_spec_path):
@@ -698,9 +718,9 @@ def ci_rebuild(args):
else:
tty.debug("spack install exited non-zero, will not create buildcache")
api_root_url = os.environ.get("CI_API_V4_URL")
ci_project_id = os.environ.get("CI_PROJECT_ID")
ci_job_id = os.environ.get("CI_JOB_ID")
api_root_url = get_env_var("CI_API_V4_URL")
ci_project_id = get_env_var("CI_PROJECT_ID")
ci_job_id = get_env_var("CI_JOB_ID")
repro_job_url = "{0}/projects/{1}/jobs/{2}/artifacts".format(
api_root_url, ci_project_id, ci_job_id

View File

@@ -13,7 +13,11 @@
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import ArgparseCompletionWriter, ArgparseRstWriter, ArgparseWriter
from llnl.util.argparsewriter import (
ArgparseCompletionWriter,
ArgparseRstWriter,
ArgparseWriter,
)
from llnl.util.tty.colify import colify
import spack.cmd
@@ -38,7 +42,7 @@
"format": "bash",
"header": os.path.join(spack.paths.share_path, "bash", "spack-completion.in"),
"update": os.path.join(spack.paths.share_path, "spack-completion.bash"),
}
},
}

View File

@@ -6,7 +6,6 @@
import argparse
import os.path
import textwrap
from llnl.util.lang import stable_partition
@@ -416,40 +415,6 @@ def add_cdash_args(subparser, add_help):
cdash_subgroup.add_argument("--cdash-buildstamp", default=None, help=cdash_help["buildstamp"])
def print_cdash_help():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
"""
),
)
add_cdash_args(parser, True)
parser.print_help()
def sanitize_reporter_options(namespace: argparse.Namespace):
"""Sanitize options that affect generation and configuration of reports, like
CDash or JUnit.
Args:
namespace: options parsed from cli
"""
has_any_cdash_option = (
namespace.cdash_upload_url or namespace.cdash_build or namespace.cdash_site
)
if namespace.log_format == "junit" and has_any_cdash_option:
raise argparse.ArgumentTypeError("cannot pass any cdash option when --log-format=junit")
# If any CDash option is passed, assume --log-format=cdash is implied
if namespace.log_format is None and has_any_cdash_option:
namespace.log_format = "cdash"
namespace.reporter = _cdash_reporter(namespace)
class ConfigSetAction(argparse.Action):
"""Generic action for setting spack config options from CLI.
@@ -514,15 +479,7 @@ def add_concretizer_args(subparser):
dest="concretizer:reuse",
const=True,
default=None,
help="reuse installed packages/buildcaches when possible",
)
subgroup.add_argument(
"--reuse-deps",
action=ConfigSetAction,
dest="concretizer:reuse",
const="dependencies",
default=None,
help="reuse installed dependencies only",
help="reuse installed dependencies/buildcaches when possible",
)

View File

@@ -12,11 +12,7 @@
import spack.build_environment as build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.error
import spack.paths
import spack.spec
import spack.store
from spack import traverse
from spack.util.environment import dump_environment, pickle_environment
@@ -42,41 +38,6 @@ def setup_parser(subparser):
)
class AreDepsInstalledVisitor:
def __init__(self, context="build"):
if context not in ("build", "test"):
raise ValueError("context can only be build or test")
if context == "build":
self.direct_deps = ("build", "link", "run")
else:
self.direct_deps = ("build", "test", "link", "run")
self.has_uninstalled_deps = False
def accept(self, item):
# The root may be installed or uninstalled.
if item.depth == 0:
return True
# Early exit after we've seen an uninstalled dep.
if self.has_uninstalled_deps:
return False
spec = item.edge.spec
if not spec.external and not spec.installed:
self.has_uninstalled_deps = True
return False
return True
def neighbors(self, item):
# Direct deps: follow build & test edges.
# Transitive deps: follow link / run.
deptypes = self.direct_deps if item.depth == 0 else ("link", "run")
return item.edge.spec.edges_to_dependencies(deptype=deptypes)
def emulate_env_utility(cmd_name, context, args):
if not args.spec:
tty.die("spack %s requires a spec." % cmd_name)
@@ -104,27 +65,6 @@ def emulate_env_utility(cmd_name, context, args):
spec = spack.cmd.matching_spec_from_env(spec)
# Require that dependencies are installed.
visitor = AreDepsInstalledVisitor(context=context)
# Mass install check needs read transaction.
with spack.store.db.read_transaction():
traverse.traverse_breadth_first_with_visitor([spec], traverse.CoverNodesVisitor(visitor))
if visitor.has_uninstalled_deps:
raise spack.error.SpackError(
f"Not all dependencies of {spec.name} are installed. "
f"Cannot setup {context} environment:",
spec.tree(
status_fn=spack.spec.Spec.install_status,
hashlen=7,
hashes=True,
# This shows more than necessary, but we cannot dynamically change deptypes
# in Spec.tree(...).
deptypes="all" if context == "build" else ("build", "test", "link", "run"),
),
)
build_environment.setup_package(spec.package, args.dirty, context)
if args.dump:

View File

@@ -408,7 +408,13 @@ def config_prefer_upstream(args):
pkgs = {}
for spec in pref_specs:
# Collect all the upstream compilers and versions for this package.
pkg = pkgs.get(spec.name, {"version": [], "compiler": []})
pkg = pkgs.get(
spec.name,
{
"version": [],
"compiler": [],
},
)
pkgs[spec.name] = pkg
# We have no existing variant if this is our first added version.

View File

@@ -16,10 +16,19 @@
import spack.stage
import spack.util.web
from spack.spec import Spec
from spack.url import UndetectableNameError, UndetectableVersionError, parse_name, parse_version
from spack.url import (
UndetectableNameError,
UndetectableVersionError,
parse_name,
parse_version,
)
from spack.util.editor import editor
from spack.util.executable import ProcessError, which
from spack.util.naming import mod_to_class, simplify_name, valid_fully_qualified_module_name
from spack.util.naming import (
mod_to_class,
simplify_name,
valid_fully_qualified_module_name,
)
description = "create a new package file"
section = "packaging"
@@ -61,7 +70,7 @@ class {class_name}({base_class_name}):
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers("github_user1", "github_user2")
# maintainers = ["github_user1", "github_user2"]
{versions}

View File

@@ -96,5 +96,8 @@ def report(args):
def debug(parser, args):
action = {"create-db-tarball": create_db_tarball, "report": report}
action = {
"create-db-tarball": create_db_tarball,
"report": report,
}
action[args.debug_command](args)

View File

@@ -33,7 +33,12 @@
level = "long"
# Arguments for display_specs when we find ambiguity
display_args = {"long": True, "show_flags": True, "variants": True, "indent": 4}
display_args = {
"long": True,
"show_flags": True,
"variants": True,
"indent": 4,
}
def setup_parser(sp):

View File

@@ -46,14 +46,6 @@ def setup_parser(subparser):
)
def shift(asp_function):
"""Transforms ``attr("foo", "bar")`` into ``foo("bar")``."""
if not asp_function.args:
raise ValueError(f"Can't shift ASP function with no arguments: {str(asp_function)}")
first, *rest = asp_function.args
return asp.AspFunction(first, rest)
def compare_specs(a, b, to_string=False, color=None):
"""
Generate a comparison, including diffs (for each side) and an intersection.
@@ -79,13 +71,23 @@ def compare_specs(a, b, to_string=False, color=None):
# get facts for specs, making sure to include build dependencies of concrete
# specs and to descend into dependency hashes so we include all facts.
a_facts = set(
shift(func)
for func in setup.spec_clauses(a, body=True, expand_hashes=True, concrete_build_deps=True)
func.shift()
for func in setup.spec_clauses(
a,
body=True,
expand_hashes=True,
concrete_build_deps=True,
)
if func.name == "attr"
)
b_facts = set(
shift(func)
for func in setup.spec_clauses(b, body=True, expand_hashes=True, concrete_build_deps=True)
func.shift()
for func in setup.spec_clauses(
b,
body=True,
expand_hashes=True,
concrete_build_deps=True,
)
if func.name == "attr"
)

View File

@@ -148,7 +148,8 @@ def env_activate(args):
if not args.shell:
spack.cmd.common.shell_init_instructions(
"spack env activate", " eval `spack env activate {sh_arg} [...]`"
"spack env activate",
" eval `spack env activate {sh_arg} [...]`",
)
return 1
@@ -165,7 +166,7 @@ def env_activate(args):
short_name = os.path.basename(env_path)
ev.Environment(env).write(regenerate=False)
# Managed environment
# Named environment
elif ev.exists(env_name_or_dir) and not args.dir:
env_path = ev.root(env_name_or_dir)
short_name = env_name_or_dir
@@ -237,7 +238,8 @@ def env_deactivate_setup_parser(subparser):
def env_deactivate(args):
if not args.shell:
spack.cmd.common.shell_init_instructions(
"spack env deactivate", " eval `spack env deactivate {sh_arg}`"
"spack env deactivate",
" eval `spack env deactivate {sh_arg}`",
)
return 1

View File

@@ -38,7 +38,11 @@ def setup_parser(subparser):
default=False,
help="packages with detected externals won't be built with Spack",
)
find_parser.add_argument("--exclude", action="append", help="packages to exclude from search")
find_parser.add_argument(
"--exclude",
action="append",
help="packages to exclude from search",
)
find_parser.add_argument(
"-p",
"--path",
@@ -183,6 +187,7 @@ def external_read_cray_manifest(args):
def _collect_and_consume_cray_manifest_files(
manifest_file=None, manifest_directory=None, dry_run=False, fail_on_error=False
):
manifest_files = []
if manifest_file:
manifest_files.append(manifest_file)

View File

@@ -25,7 +25,10 @@ def setup_parser(subparser):
help="fetch only missing (not yet installed) dependencies",
)
subparser.add_argument(
"-D", "--dependencies", action="store_true", help="also fetch all dependencies"
"-D",
"--dependencies",
action="store_true",
help="also fetch all dependencies",
)
arguments.add_common_arguments(subparser, ["specs"])
subparser.epilog = (

View File

@@ -9,7 +9,13 @@
import spack.config
import spack.environment as ev
import spack.store
from spack.graph import DAGWithDependencyTypes, SimpleDAG, graph_ascii, graph_dot, static_graph_dot
from spack.graph import (
DAGWithDependencyTypes,
SimpleDAG,
graph_ascii,
graph_dot,
static_graph_dot,
)
description = "generate graphs of package dependency relationships"
section = "basic"

View File

@@ -39,14 +39,12 @@
compiler flags:
@g{cflags="flags"} cppflags, cflags, cxxflags,
fflags, ldflags, ldlibs
@g{==} propagate flags to package dependencies
variants:
@B{+variant} enable <variant>
@r{-variant} or @r{~variant} disable <variant>
@B{variant=value} set non-boolean <variant> to <value>
@B{variant=value1,value2,value3} set multi-value <variant> values
@B{++}, @r{--}, @r{~~}, @B{==} propagate variants to package dependencies
architecture variants:
@m{platform=platform} linux, darwin, cray, etc.
@@ -70,8 +68,6 @@
hdf5 @c{@1.8:} @g{%gcc} hdf5 1.8 or higher built with gcc
hdf5 @B{+mpi} hdf5 with mpi enabled
hdf5 @r{~mpi} hdf5 with mpi disabled
hdf5 @B{++mpi} hdf5 with mpi enabled and propagates
hdf5 @r{~~mpi} hdf5 with mpi disabled and propagates
hdf5 @B{+mpi} ^mpich hdf5 with mpi, using mpich
hdf5 @B{+mpi} ^openmpi@c{@1.7} hdf5 with mpi, using openmpi 1.7
boxlib @B{dim=2} boxlib built for 2 dimensions
@@ -82,7 +78,9 @@
"""
guides = {"spec": spec_guide}
guides = {
"spec": spec_guide,
}
def setup_parser(subparser):

View File

@@ -283,7 +283,7 @@ def print_tests(pkg):
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.intersects("llvm+clang"):
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END

View File

@@ -7,6 +7,7 @@
import os
import shutil
import sys
import textwrap
from typing import List
import llnl.util.filesystem as fs
@@ -259,10 +260,165 @@ def default_log_file(spec):
def report_filename(args: argparse.Namespace, specs: List[spack.spec.Spec]) -> str:
"""Return the filename to be used for reporting to JUnit or CDash format."""
result = args.log_file or default_log_file(specs[0])
result = args.log_file or args.cdash_upload_url or default_log_file(specs[0])
return result
def install_specs(specs, install_kwargs, cli_args):
try:
if ev.active_environment():
install_specs_inside_environment(specs, install_kwargs, cli_args)
else:
install_specs_outside_environment(specs, install_kwargs)
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
e.print_context()
assert e.pkg, "Expected InstallError to include the associated package"
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write("Full build log:\n")
with open(e.pkg.build_log_path) as log:
shutil.copyfileobj(log, sys.stderr)
raise
def install_specs_inside_environment(specs, install_kwargs, cli_args):
specs_to_install, specs_to_add = [], []
env = ev.active_environment()
for abstract, concrete in specs:
# This won't find specs added to the env since last
# concretize, therefore should we consider enforcing
# concretization of the env before allowing to install
# specs?
m_spec = env.matching_spec(abstract)
# If there is any ambiguity in the above call to matching_spec
# (i.e. if more than one spec in the environment matches), then
# SpackEnvironmentError is raised, with a message listing the
# the matches. Getting to this point means there were either
# no matches or exactly one match.
if not m_spec and not cli_args.add:
msg = (
"Cannot install '{0}' because it is not in the current environment."
" You can add it to the environment with 'spack add {0}', or as part"
" of the install command with 'spack install --add {0}'"
).format(str(abstract))
tty.die(msg)
if not m_spec:
tty.debug("adding {0} as a root".format(abstract.name))
specs_to_add.append((abstract, concrete))
continue
tty.debug("exactly one match for {0} in env -> {1}".format(m_spec.name, m_spec.dag_hash()))
if m_spec in env.roots() or not cli_args.add:
# either the single match is a root spec (in which case
# the spec is not added to the env again), or the user did
# not specify --add (in which case it is assumed we are
# installing already-concretized specs in the env)
tty.debug("just install {0}".format(m_spec.name))
specs_to_install.append(m_spec)
else:
# the single match is not a root (i.e. it's a dependency),
# and --add was specified, so we'll add it as a
# root before installing
tty.debug("add {0} then install it".format(m_spec.name))
specs_to_add.append((abstract, concrete))
if specs_to_add:
tty.debug("Adding the following specs as roots:")
for abstract, concrete in specs_to_add:
tty.debug(" {0}".format(abstract.name))
with env.write_transaction():
specs_to_install.append(env.concretize_and_add(abstract, concrete))
env.write(regenerate=False)
# Install the validated list of cli specs
if specs_to_install:
tty.debug("Installing the following cli specs:")
for s in specs_to_install:
tty.debug(" {0}".format(s.name))
env.install_specs(specs_to_install, **install_kwargs)
def install_specs_outside_environment(specs, install_kwargs):
installs = [(concrete.package, install_kwargs) for _, concrete in specs]
builder = PackageInstaller(installs)
builder.install()
def print_cdash_help():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
"""
),
)
arguments.add_cdash_args(parser, True)
parser.print_help()
def install_all_specs_from_active_environment(
install_kwargs, only_concrete, cli_test_arg, reporter_factory
):
"""Install all specs from the active environment
Args:
install_kwargs (dict): dictionary of options to be passed to the installer
only_concrete (bool): if true don't concretize the environment, but install
only the specs that are already concrete
cli_test_arg (bool or str): command line argument to select which test to run
reporter: reporter object for the installations
"""
env = ev.active_environment()
if not env:
msg = "install requires a package argument or active environment"
if "spack.yaml" in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
install_kwargs["tests"] = compute_tests_install_kwargs(env.user_specs, cli_test_arg)
if not only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=install_kwargs["tests"])
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not specs:
msg = "{0} environment has no specs to install".format(env.name)
tty.msg(msg)
return
reporter = reporter_factory(specs) or lang.nullcontext()
tty.msg("Installing environment {0}".format(env.name))
with reporter:
env.install_all(**install_kwargs)
tty.debug("Regenerating environment views for {0}".format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
def compute_tests_install_kwargs(specs, cli_test_arg):
"""Translate the test cli argument into the proper install argument"""
if cli_test_arg == "all":
@@ -272,6 +428,43 @@ def compute_tests_install_kwargs(specs, cli_test_arg):
return False
def specs_from_cli(args, install_kwargs):
"""Return abstract and concrete spec parsed from the command line."""
abstract_specs = spack.cmd.parse_specs(args.spec)
install_kwargs["tests"] = compute_tests_install_kwargs(abstract_specs, args.test)
try:
concrete_specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=install_kwargs["tests"]
)
except SpackError as e:
tty.debug(e)
if args.log_format is not None:
reporter = args.reporter()
reporter.concretization_report(report_filename(args, abstract_specs), e.message)
raise
return abstract_specs, concrete_specs
def concrete_specs_from_file(args):
"""Return the list of concrete specs read from files."""
result = []
for file in args.specfiles:
with open(file, "r") as f:
if file.endswith("yaml") or file.endswith("yml"):
s = spack.spec.Spec.from_yaml(f)
else:
s = spack.spec.Spec.from_json(f)
concretized = s.concretized()
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += "The file does not contain a concrete spec."
tty.warn(msg.format(file))
continue
result.append(concretized)
return result
def require_user_confirmation_for_overwrite(concrete_specs, args):
if args.yes_to_all:
return
@@ -298,40 +491,12 @@ def require_user_confirmation_for_overwrite(concrete_specs, args):
tty.die("Reinstallation aborted.")
def _dump_log_on_error(e: spack.build_environment.InstallError):
e.print_context()
assert e.pkg, "Expected InstallError to include the associated package"
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write("Full build log:\n")
with open(e.pkg.build_log_path, errors="replace") as log:
shutil.copyfileobj(log, sys.stderr)
def _die_require_env():
msg = "install requires a package argument or active environment"
if "spack.yaml" in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += (
"\n\n"
"Did you mean to install using the `spack.yaml`"
" in this directory? Try: \n"
" spack env activate .\n"
" spack install\n"
" OR\n"
" spack --env . install"
)
tty.die(msg)
def install(parser, args):
# TODO: unify args.verbose?
tty.set_verbose(args.verbose or args.install_verbose)
if args.help_cdash:
arguments.print_cdash_help()
print_cdash_help()
return
if args.no_checksum:
@@ -340,150 +505,43 @@ def install(parser, args):
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
arguments.sanitize_reporter_options(args)
def reporter_factory(specs):
if args.log_format is None:
return lang.nullcontext()
return None
return spack.report.build_context_manager(
reporter=args.reporter(), filename=report_filename(args, specs=specs), specs=specs
context_manager = spack.report.build_context_manager(
reporter=args.reporter(),
filename=report_filename(args, specs=specs),
specs=specs,
)
return context_manager
install_kwargs = install_kwargs_from_args(args)
env = ev.active_environment()
if not env and not args.spec and not args.specfiles:
_die_require_env()
try:
if env:
install_with_active_env(env, args, install_kwargs, reporter_factory)
else:
install_without_active_env(args, install_kwargs, reporter_factory)
except spack.build_environment.InstallError as e:
if args.show_log_on_error:
_dump_log_on_error(e)
raise
def _maybe_add_and_concretize(args, env, specs):
"""Handle the overloaded spack install behavior of adding
and automatically concretizing specs"""
# Users can opt out of accidental concretizations with --only-concrete
if args.only_concrete:
if not args.spec and not args.specfiles:
# If there are no args but an active environment then install the packages from it.
install_all_specs_from_active_environment(
install_kwargs=install_kwargs,
only_concrete=args.only_concrete,
cli_test_arg=args.test,
reporter_factory=reporter_factory,
)
return
# Otherwise, we will modify the environment.
with env.write_transaction():
# `spack add` adds these specs.
if args.add:
for spec in specs:
env.add(spec)
# Specs from CLI
abstract_specs, concrete_specs = specs_from_cli(args, install_kwargs)
# `spack concretize`
tests = compute_tests_install_kwargs(env.user_specs, args.test)
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter_factory):
specs = spack.cmd.parse_specs(args.spec)
# The following two commands are equivalent:
# 1. `spack install --add x y z`
# 2. `spack add x y z && spack concretize && spack install --only-concrete`
# here we do the `add` and `concretize` part.
_maybe_add_and_concretize(args, env, specs)
# Now we're doing `spack install --only-concrete`.
if args.add or not specs:
specs_to_install = env.concrete_roots()
if not specs_to_install:
tty.msg(f"{env.name} environment has no specs to install")
return
# `spack install x y z` without --add is installing matching specs in the env.
else:
specs_to_install = env.all_matching_specs(*specs)
if not specs_to_install:
msg = (
"Cannot install '{0}' because no matching specs are in the current environment."
" You can add specs to the environment with 'spack add {0}', or as part"
" of the install command with 'spack install --add {0}'"
).format(" ".join(args.spec))
tty.die(msg)
install_kwargs["tests"] = compute_tests_install_kwargs(specs_to_install, args.test)
if args.overwrite:
require_user_confirmation_for_overwrite(specs_to_install, args)
install_kwargs["overwrite"] = [spec.dag_hash() for spec in specs_to_install]
try:
with reporter_factory(specs_to_install):
env.install_specs(specs_to_install, **install_kwargs)
finally:
# TODO: this is doing way too much to trigger
# views and modules to be generated.
with env.write_transaction():
env.write(regenerate=True)
def concrete_specs_from_cli(args, install_kwargs):
"""Return abstract and concrete spec parsed from the command line."""
abstract_specs = spack.cmd.parse_specs(args.spec)
install_kwargs["tests"] = compute_tests_install_kwargs(abstract_specs, args.test)
try:
concrete_specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=install_kwargs["tests"]
)
except SpackError as e:
tty.debug(e)
if args.log_format is not None:
reporter = args.reporter()
reporter.concretization_report(report_filename(args, abstract_specs), e.message)
raise
return concrete_specs
def concrete_specs_from_file(args):
"""Return the list of concrete specs read from files."""
result = []
for file in args.specfiles:
with open(file, "r") as f:
if file.endswith("yaml") or file.endswith("yml"):
s = spack.spec.Spec.from_yaml(f)
else:
s = spack.spec.Spec.from_json(f)
concretized = s.concretized()
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += "The file does not contain a concrete spec."
tty.warn(msg.format(file))
continue
result.append(concretized)
return result
def install_without_active_env(args, install_kwargs, reporter_factory):
concrete_specs = concrete_specs_from_cli(args, install_kwargs) + concrete_specs_from_file(args)
# Concrete specs from YAML or JSON files
specs_from_file = concrete_specs_from_file(args)
abstract_specs.extend(specs_from_file)
concrete_specs.extend(specs_from_file)
if len(concrete_specs) == 0:
tty.die("The `spack install` command requires a spec to install.")
with reporter_factory(concrete_specs):
reporter = reporter_factory(concrete_specs) or lang.nullcontext()
with reporter:
if args.overwrite:
require_user_confirmation_for_overwrite(concrete_specs, args)
install_kwargs["overwrite"] = [spec.dag_hash() for spec in concrete_specs]
installs = [(s.package, install_kwargs) for s in concrete_specs]
builder = PackageInstaller(installs)
builder.install()
install_specs(zip(abstract_specs, concrete_specs), install_kwargs, args)

View File

@@ -58,7 +58,10 @@
#: licensed files that can have LGPL language in them
#: so far, just this command -- so it can find LGPL things elsewhere
lgpl_exceptions = [r"lib/spack/spack/cmd/license.py", r"lib/spack/spack/test/cmd/license.py"]
lgpl_exceptions = [
r"lib/spack/spack/cmd/license.py",
r"lib/spack/spack/test/cmd/license.py",
]
def _all_spack_files(root=spack.paths.prefix):
@@ -126,6 +129,7 @@ def error_messages(self):
def _check_license(lines, path):
found = []
for line in lines:

View File

@@ -98,7 +98,8 @@ def load(parser, args):
if not args.shell:
specs_str = " ".join(args.constraint) or "SPECS"
spack.cmd.common.shell_init_instructions(
"spack load", " eval `spack load {sh_arg} %s`" % specs_str
"spack load",
" eval `spack load {sh_arg} %s`" % specs_str,
)
return 1

View File

@@ -95,7 +95,7 @@ def location(parser, args):
spack.cmd.require_active_env("location -e")
path = ev.active_environment().path
else:
# Get path of requested environment
# Get named environment path
if not ev.exists(args.location_env):
tty.die("no such environment: '%s'" % args.location_env)
path = ev.root(args.location_env)

View File

@@ -27,7 +27,12 @@
"""
# Arguments for display_specs when we find ambiguity
display_args = {"long": True, "show_flags": False, "variants": False, "indent": 4}
display_args = {
"long": True,
"show_flags": False,
"variants": False,
"indent": 4,
}
def setup_parser(subparser):

View File

@@ -335,7 +335,7 @@ def not_excluded_fn(args):
exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split()))
def not_excluded(x):
return not any(x.satisfies(y) for y in exclude_specs)
return not any(x.satisfies(y, strict=True) for y in exclude_specs)
return not_excluded
@@ -445,7 +445,9 @@ def mirror_create(args):
mirror_specs = concrete_specs_from_user(args)
create_mirror_for_individual_specs(
mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions
mirror_specs,
path=path,
skip_unstable_versions=args.skip_unstable_versions,
)
@@ -465,7 +467,9 @@ def create_mirror_for_all_specs(path, skip_unstable_versions, selection_fn):
def create_mirror_for_all_specs_inside_environment(path, skip_unstable_versions, selection_fn):
mirror_specs = concrete_specs_from_environment(selection_fn=selection_fn)
create_mirror_for_individual_specs(
mirror_specs, path=path, skip_unstable_versions=skip_unstable_versions
mirror_specs,
path=path,
skip_unstable_versions=skip_unstable_versions,
)

View File

@@ -180,7 +180,10 @@ def loads(module_type, specs, args, out=None):
for spec in specs
)
module_commands = {"tcl": "module load ", "lmod": "module load "}
module_commands = {
"tcl": "module load ",
"lmod": "module load ",
}
d = {"command": "" if not args.shell else module_commands[module_type], "prefix": args.prefix}
@@ -365,14 +368,18 @@ def refresh(module_type, specs, args):
def modules_cmd(parser, args, module_type, callbacks=callbacks):
# Qualifiers to be used when querying the db for specs
constraint_qualifiers = {"refresh": {"installed": True, "known": True}}
constraint_qualifiers = {
"refresh": {"installed": True, "known": True},
}
query_args = constraint_qualifiers.get(args.subparser_name, {})
# Get the specs that match the query from the DB
specs = args.specs(**query_args)
try:
callbacks[args.subparser_name](module_type, specs, args)
except MultipleSpecsMatch:

View File

@@ -97,28 +97,41 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers)
opt, *_ = min(result.answers)
# dump the solutions as concretized specs
if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint("@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " "))
maxlen = max(len(name) for name in result.criteria)
max_depth = max(len(v) for v in result.criteria.values() if isinstance(v, list))
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
color.cprint(
fmt
% (
i,
name,
"-" if build_cost is None else installed_cost,
installed_cost if build_cost is None else build_cost,
header = "@*{"
header += "".join(f"{depth:<4}" for depth in range(max_depth))
header += "Criterion}"
color.cprint(header)
# make non-zero numbers red
def highlight(n, c):
return color.colorize(f"@{c}{{{n:<4}}}" if n > 0 else f"{n:<4}")
for i, (name, cost) in enumerate(result.criteria.items(), 1):
colored_name = name.replace("build:", "@c{build:}")
colored_name = colored_name.replace("reuse:", "@B{reuse:}")
colored_name = colored_name.replace("fixed:", "@G{fixed:}")
colored_name = color.colorize(colored_name)
if isinstance(cost, int):
print(highlight(cost, "G") + " " * (max_depth - 1) + colored_name)
else:
print(
"".join(highlight(c, "c" if "build:" in name else "B") for c in cost)
+ colored_name
)
)
print()
# dump the solutions as concretized specs
if "solutions" in show:
for spec in result.specs:
# With -y, just print YAML to output.
@@ -182,7 +195,11 @@ def solve(parser, args):
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
result = solver.solve(
specs, out=output, timers=args.timers, stats=args.stats, setup_only=setup_only
specs,
out=output,
timers=args.timers,
stats=args.stats,
setup_only=setup_only,
)
if not setup_only:
_process_result(result, show, required_format, kwargs)

View File

@@ -110,7 +110,7 @@ def spec(parser, args):
else:
tty.die("spack spec requires at least one spec or an active environment")
for input, output in specs:
for (input, output) in specs:
# With -y, just print YAML to output.
if args.format:
if args.format == "yaml":

Some files were not shown because too many files have changed in this diff Show More