Compare commits

..

39 Commits

Author SHA1 Message Date
Gregory Becker
f70d2c7ccb fixup Spec parsing in error messages 2022-05-20 00:56:15 -07:00
Gregory Becker
62d18d9af7 refactor to single error predicate 2022-05-19 14:12:21 -07:00
Gregory Becker
8824040eda conflicts: better message for conflicts with no when= 2022-05-19 12:00:09 -07:00
Gregory Becker
d744b83584 error messages: prefix error facts with error_ 2022-05-19 11:01:34 -07:00
Gregory Becker
14601b59ef missing self argument 2022-05-19 09:55:48 -07:00
Gregory Becker
8d7360106f internal_error: minor refactor for clarity 2022-05-19 09:52:31 -07:00
Gregory Becker
881f184eab version selection: improve comment 2022-05-19 09:34:40 -07:00
Gregory Becker
63bec85a24 version_unsatisfiable: improve comments 2022-05-19 09:32:06 -07:00
Gregory Becker
159975a561 concretizer: improve comments on one-sided comparisons 2022-05-19 09:17:56 -07:00
Gregory Becker
25ba7d071a error message for unsatisfiable compiler version 2022-05-18 16:37:06 -07:00
Gregory Becker
54b79d5661 cleanup virtual versions 2022-05-18 15:07:06 -07:00
Gregory Becker
235f93c241 cleanup error code in concretize.lp 2022-05-18 12:59:06 -07:00
Gregory Becker
a73c5ffb0b flake 2022-05-17 13:36:15 -07:00
Gregory Becker
ce53ce284b error messages: use production quality messages 2022-05-17 11:11:50 -07:00
Gregory Becker
ae68318475 update test for new error messages 2022-05-16 17:01:26 -07:00
Gregory Becker
bd63c19b94 ensure error messages evaluated first 2022-05-16 14:54:53 -07:00
Gregory Becker
3f3bcacd16 flake 2022-05-16 12:20:30 -07:00
Gregory Becker
d349d4ab0b internal errors: request bug report and give debug-level output automatically 2022-05-16 10:41:50 -07:00
Gregory Becker
e61b3c96f6 concretizer tests: clean up temporary values 2022-05-16 10:36:23 -07:00
Gregory Becker
bcbb0a3b85 concretizer: cleanup commented-out code 2022-05-16 10:28:27 -07:00
Gregory Becker
3c556ab318 cleanup, remove show-cores option 2022-05-16 10:20:35 -07:00
Gregory Becker
563ae5188e improve multiple-variant-value error 2022-05-15 11:59:58 -07:00
Gregory Becker
f4b3561f71 wip try without unsat cores 2022-05-13 17:08:26 -07:00
Gregory Becker
0748a1b290 fix version weight 2022-05-13 16:36:56 -07:00
Gregory Becker
0bfece0c5e fix platform errors 2022-05-13 16:23:58 -07:00
Gregory Becker
6e9b16279a wip 2022-05-13 12:25:57 -07:00
Gregory Becker
cf71baff30 refactor calculation of assumption_names 2022-05-13 12:25:57 -07:00
Gregory Becker
b9831acb44 solver: messages for dependency conditions 2022-05-13 12:25:57 -07:00
Gregory Becker
5f2edb1860 fixup 2022-05-13 12:25:57 -07:00
Gregory Becker
8b6809cf66 solver: bypass conditional logic for unconditional variants
add message to variant conditionals
2022-05-13 12:25:57 -07:00
Gregory Becker
a93a4274aa solver: add messages to variant_value/provider/external conditions 2022-05-13 12:25:57 -07:00
Gregory Becker
59bb42ee7f solver: add messages to conditions for conflict triggers/constraints 2022-05-13 12:25:57 -07:00
Gregory Becker
11d382bff1 show-cores: fewer modes required 2022-05-13 12:25:57 -07:00
Gregory Becker
57889ec446 solver: orthogonalize variant_value maxima and minima 2022-05-13 12:25:57 -07:00
Gregory Becker
66bda49c44 solver: rework which facts are assumptions 2022-05-13 12:25:57 -07:00
Gregory Becker
94a2ab5359 solver: error -> internal_error for errors that should not be exposed generally 2022-05-13 12:25:56 -07:00
Gregory Becker
6d0044f703 solver: rename version_satisfies/2 to node_version_satisfies/2 for clarity 2022-05-13 12:25:56 -07:00
Gregory Becker
3fa447a84a solver: clean up emission of conflicts 2022-05-13 12:25:56 -07:00
Gregory Becker
d501ce0c7e remove special handling for error/rule atoms 2022-05-13 12:25:56 -07:00
6882 changed files with 11759 additions and 20823 deletions

View File

@@ -24,7 +24,6 @@ jobs:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: "fedora:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -58,7 +57,6 @@ jobs:
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -95,7 +93,6 @@ jobs:
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -129,7 +126,6 @@ jobs:
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -158,7 +154,6 @@ jobs:
macos-clingo-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -179,7 +174,6 @@ jobs:
strategy:
matrix:
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -201,7 +195,6 @@ jobs:
strategy:
matrix:
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
@@ -223,7 +216,6 @@ jobs:
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -258,7 +250,6 @@ jobs:
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
env:
@@ -294,7 +285,6 @@ jobs:
macos-gnupg-binaries:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |
@@ -312,7 +302,6 @@ jobs:
macos-gnupg-sources:
runs-on: macos-latest
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
run: |

View File

@@ -43,7 +43,6 @@ jobs:
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
@@ -76,7 +75,7 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535
with:
name: dockerfiles
path: dockerfiles
@@ -95,7 +94,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # @v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}

View File

@@ -22,7 +22,6 @@ on:
jobs:
install_gcc:
name: gcc with clang
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
@@ -37,7 +36,6 @@ jobs:
install_jupyter_clang:
name: jupyter
if: github.repository == 'spack/spack'
runs-on: macos-latest
timeout-minutes: 700
steps:
@@ -52,7 +50,6 @@ jobs:
install_scipy_clang:
name: scipy, mpl, pd
if: github.repository == 'spack/spack'
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2

View File

@@ -4,7 +4,6 @@ Set-Location spack
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git config --global core.longpaths true
if ($(git branch --show-current) -ne "develop")
{

View File

@@ -139,11 +139,11 @@ jobs:
echo "installer_root=$((pwd).Path)" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
env:
ProgressPreference: SilentlyContinue
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer Bundle
path: ${{ env.installer_root }}\pkg\Spack.exe
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
- uses: actions/upload-artifact@v3
with:
name: Windows Spack Installer
path: ${{ env.installer_root}}\pkg\Spack.msi

View File

@@ -1,205 +1,3 @@
# v0.18.0 (2022-05-28)
`v0.18.0` is a major feature release.
## Major features in this release
1. **Concretizer now reuses by default**
`spack install --reuse` was introduced in `v0.17.0`, and `--reuse`
is now the default concretization mode. Spack will try hard to
resolve dependencies using installed packages or binaries (#30396).
To avoid reuse and to use the latest package configurations, (the
old default), you can use `spack install --fresh`, or add
configuration like this to your environment or `concretizer.yaml`:
```yaml
concretizer:
reuse: false
```
2. **Finer-grained hashes**
Spack hashes now include `link`, `run`, *and* `build` dependencies,
as well as a canonical hash of package recipes. Previously, hashes
only included `link` and `run` dependencies (though `build`
dependencies were stored by environments). We coarsened the hash to
reduce churn in user installations, but the new default concretizer
behavior mitigates this concern and gets us reuse *and* provenance.
You will be able to see the build dependencies of new installations
with `spack find`. Old installations will not change and their
hashes will not be affected. (#28156, #28504, #30717, #30861)
3. **Improved error messages**
Error handling with the new concretizer is now done with
optimization criteria rather than with unsatisfiable cores, and
Spack reports many more details about conflicting constraints.
(#30669)
4. **Unify environments when possible**
Environments have thus far supported `concretization: together` or
`concretization: separately`. These have been replaced by a new
preference in `concretizer.yaml`:
```yaml
concretizer:
unify: [true|false|when_possible]
```
`concretizer:unify:when_possible` will *try* to resolve a fully
unified environment, but if it cannot, it will create multiple
configurations of some packages where it has to. For large
environments that previously had to be concretized separately, this
can result in a huge speedup (40-50x). (#28941)
5. **Automatically find externals on Cray machines**
Spack can now automatically discover installed packages in the Cray
Programming Environment by running `spack external find` (or `spack
external read-cray-manifest` to *only* query the PE). Packages from
the PE (e.g., `cray-mpich` are added to the database with full
dependency information, and compilers from the PE are added to
`compilers.yaml`. Available with the June 2022 release of the Cray
Programming Environment. (#24894, #30428)
6. **New binary format and hardened signing**
Spack now has an updated binary format, with improvements for
security. The new format has a detached signature file, and Spack
verifies the signature before untarring or decompressing the binary
package. The previous format embedded the signature in a `tar`
file, which required the client to run `tar` *before* verifying
(#30750). Spack can still install from build caches using the old
format, but we encourage users to switch to the new format going
forward.
Production GitLab pipelines have been hardened to securely sign
binaries. There is now a separate signing stage so that signing
keys are never exposed to build system code, and signing keys are
ephemeral and only live as long as the signing pipeline stage.
(#30753)
7. **Bootstrap mirror generation**
The `spack bootstrap mirror` command can automatically create a
mirror for bootstrapping the concretizer and other needed
dependencies in an air-gapped environment. (#28556)
8. **Nascent Windows support**
Spack now has initial support for Windows. Spack core has been
refactored to run in the Windows environment, and a small number of
packages can now build for Windows. More details are
[in the documentation](https://spack.rtfd.io/en/latest/getting_started.html#spack-on-windows)
(#27021, #28385, many more)
9. **Makefile generation**
`spack env depfile` can be used to generate a `Makefile` from an
environment, which can be used to build packages the environment
in parallel on a single node. e.g.:
```console
spack -e myenv env depfile > Makefile
make
```
Spack propagates `gmake` jobserver information to builds so that
their jobs can share cores. (#30039, #30254, #30302, #30526)
10. **New variant features**
In addition to being conditional themselves, variants can now have
[conditional *values*](https://spack.readthedocs.io/en/latest/packaging_guide.html#conditional-possible-values)
that are only possible for certain configurations of a package. (#29530)
Variants can be
[declared "sticky"](https://spack.readthedocs.io/en/latest/packaging_guide.html#sticky-variants),
which prevents them from being enabled or disabled by the
concretizer. Sticky variants must be set explicitly by users
on the command line or in `packages.yaml`. (#28630)
* Allow conditional possible values in variants
* Add a "sticky" property to variants
## Other new features of note
* Environment views can optionally link only `run` dependencies
with `link:run` (#29336)
* `spack external find --all` finds library-only packages in
addition to build dependencies (#28005)
* Customizable `config:license_dir` option (#30135)
* `spack external find --path PATH` takes a custom search path (#30479)
* `spack spec` has a new `--format` argument like `spack find` (#27908)
* `spack concretize --quiet` skips printing concretized specs (#30272)
* `spack info` now has cleaner output and displays test info (#22097)
* Package-level submodule option for git commit versions (#30085, #30037)
* Using `/hash` syntax to refer to concrete specs in an environment
now works even if `/hash` is not installed. (#30276)
## Major internal refactors
* full hash (see above)
* new develop versioning scheme `0.19.0-dev0`
* Allow for multiple dependencies/dependents from the same package (#28673)
* Splice differing virtual packages (#27919)
## Performance Improvements
* Concretization of large environments with `unify: when_possible` is
much faster than concretizing separately (#28941, see above)
* Single-pass view generation algorithm is 2.6x faster (#29443)
## Archspec improvements
* `oneapi` and `dpcpp` flag support (#30783)
* better support for `M1` and `a64fx` (#30683)
## Removals and Deprecations
* Spack no longer supports Python `2.6` (#27256)
* Removed deprecated `--run-tests` option of `spack install`;
use `spack test` (#30461)
* Removed deprecated `spack flake8`; use `spack style` (#27290)
* Deprecate `spack:concretization` config option; use
`concretizer:unify` (#30038)
* Deprecate top-level module configuration; use module sets (#28659)
* `spack activate` and `spack deactivate` are deprecated in favor of
environments; will be removed in `0.19.0` (#29430; see also `link:run`
in #29336 above)
## Notable Bugfixes
* Fix bug that broke locks with many parallel builds (#27846)
* Many bugfixes and consistency improvements for the new concretizer
and `--reuse` (#30357, #30092, #29835, #29933, #28605, #29694, #28848)
## Packages
* `CMakePackage` uses `CMAKE_INSTALL_RPATH_USE_LINK_PATH` (#29703)
* Refactored `lua` support: `lua-lang` virtual supports both
`lua` and `luajit` via new `LuaPackage` build system(#28854)
* PythonPackage: now installs packages with `pip` (#27798)
* Python: improve site_packages_dir handling (#28346)
* Extends: support spec, not just package name (#27754)
* `find_libraries`: search for both .so and .dylib on macOS (#28924)
* Use stable URLs and `?full_index=1` for all github patches (#29239)
## Spack community stats
* 6,416 total packages, 458 new since `v0.17.0`
* 219 new Python packages
* 60 new R packages
* 377 people contributed to this release
* 337 committers to packages
* 85 committers to core
# v0.17.2 (2022-04-13)
### Spack bugfixes
@@ -213,7 +11,7 @@
* Fixed a few bugs affecting the spack ci command (#29518, #29419)
* Fix handling of Intel compiler environment (#29439)
* Fix a few edge cases when reindexing the DB (#28764)
* Remove "Known issues" from documentation (#29664)
* Remove "Known issues" from documentation (#29664)
* Other miscellaneous bugfixes (0b72e070583fc5bcd016f5adc8a84c99f2b7805f, #28403, #29261)
# v0.17.1 (2021-12-23)

View File

@@ -6,15 +6,34 @@ bootstrap:
# by Spack is installed in a "store" subfolder of this root directory
root: $user_cache_path/bootstrap
# Methods that can be used to bootstrap software. Each method may or
# may not be able to bootstrap all the software that Spack needs,
# may not be able to bootstrap all of the software that Spack needs,
# depending on its type.
sources:
- name: 'github-actions-v0.2'
metadata: $spack/share/spack/bootstrap/github-actions-v0.2
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.2
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
- name: 'github-actions-v0.1'
metadata: $spack/share/spack/bootstrap/github-actions-v0.1
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install
type: buildcache
description: |
Buildcache generated from a public workflow using Github Actions.
The sha256 checksum of binaries is checked before installation.
info:
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
homepage: https://github.com/spack/spack-bootstrap-mirrors
releases: https://github.com/spack/spack-bootstrap-mirrors/releases
# This method is just Spack bootstrapping the software it needs from sources.
# It has been added here so that users can selectively disable bootstrapping
# from sources by "untrusting" it.
- name: spack-install
type: install
description: |
Specs built from sources by Spack. May take a long time.
trusted:
# By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow

View File

@@ -28,9 +28,3 @@ concretizer:
# instance concretize with target "icelake" while running on "haswell").
# If "true" only allow targets that are compatible with the host.
host_compatible: true
# When "true" concretize root specs of environments together, so that each unique
# package in an environment corresponds to one concrete spec. This ensures
# environments can always be activated. When "false" perform concretization separately
# on each root spec, allowing different versions and variants of the same package in
# an environment.
unify: false

View File

@@ -33,9 +33,6 @@ config:
template_dirs:
- $spack/share/spack/templates
# Directory where licenses should be located
license_dir: $spack/etc/spack/licenses
# Temporary locations Spack can try to use for builds.
#
# Recommended options are given below.

View File

@@ -50,13 +50,6 @@ build cache files for the "ninja" spec:
Note that the targeted spec must already be installed. Once you have a build cache,
you can add it as a mirror, discussed next.
.. warning::
Spack improved the format used for binary caches in v0.18. The entire v0.18 series
will be able to verify and install binary caches both in the new and in the old format.
Support for using the old format is expected to end in v0.19, so we advise users to
recreate relevant buildcaches using Spack v0.18 or higher.
---------------------------------------
Finding or installing build cache files
---------------------------------------

View File

@@ -1,160 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _bootstrapping:
=============
Bootstrapping
=============
In the :ref:`Getting started <getting_started>` Section we already mentioned that
Spack can bootstrap some of its dependencies, including ``clingo``. In fact, there
is an entire command dedicated to the management of every aspect of bootstrapping:
.. command-output:: spack bootstrap --help
The first thing to know to understand bootstrapping in Spack is that each of
Spack's dependencies is bootstrapped lazily; i.e. the first time it is needed and
can't be found. You can readily check if any prerequisite for using Spack
is missing by running:
.. code-block:: console
% spack bootstrap status
Spack v0.17.1 - python@3.8
[FAIL] Core Functionalities
[B] MISSING "clingo": required to concretize specs
[FAIL] Binary packages
[B] MISSING "gpg2": required to sign/verify buildcaches
Spack will take care of bootstrapping any missing dependency marked as [B]. Dependencies marked as [-] are instead required to be found on the system.
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. Running a command that concretize a spec, like:
.. code-block:: console
% spack solve zlib
==> Bootstrapping clingo from pre-built binaries
==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.1/build_cache/darwin-catalina-x86_64/apple-clang-12.0.0/clingo-bootstrap-spack/darwin-catalina-x86_64-apple-clang-12.0.0-clingo-bootstrap-spack-p5on7i4hejl775ezndzfdkhvwra3hatn.spack
==> Installing "clingo-bootstrap@spack%apple-clang@12.0.0~docs~ipo+python build_type=Release arch=darwin-catalina-x86_64" from a buildcache
[ ... ]
triggers the bootstrapping of clingo from pre-built binaries as expected.
-----------------------
The Bootstrapping store
-----------------------
The software installed for bootstrapping purposes is deployed in a separate store.
Its location can be checked with the following command:
.. code-block:: console
% spack bootstrap root
It can also be changed with the same command by just specifying the newly desired path:
.. code-block:: console
% spack bootstrap root /opt/spack/bootstrap
You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
clingo-bootstrap@spack libassuan@2.5.5 libgpg-error@1.42 libksba@1.5.1 pinentry@1.1.1 zlib@1.2.11
gnupg@2.3.1 libgcrypt@1.9.3 libiconv@1.16 npth@1.6 python@3.8
In case it is needed you can remove all the software in the current bootstrapping store with:
.. code-block:: console
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
--------------------------------------------
Enabling and disabling bootstrapping methods
--------------------------------------------
Bootstrapping is always performed by trying the methods listed by:
.. command-output:: spack bootstrap list
in the order they appear, from top to bottom. By default Spack is
configured to try first bootstrapping from pre-built binaries and to
fall-back to bootstrapping from sources if that failed.
If need be, you can disable bootstrapping altogether by running:
.. code-block:: console
% spack bootstrap disable
in which case it's your responsibility to ensure Spack runs in an
environment where all its prerequisites are installed. You can
also configure Spack to skip certain bootstrapping methods by *untrusting*
them. For instance:
.. code-block:: console
% spack bootstrap untrust github-actions
==> "github-actions" is now untrusted and will not be used for bootstrapping
tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back you can:
.. code-block:: console
% spack bootstrap trust github-actions
There is also an option to reset the bootstrapping configuration to Spack's defaults:
.. code-block:: console
% spack bootstrap reset
==> Bootstrapping configuration is being reset to Spack's defaults. Current configuration will be lost.
Do you want to continue? [Y/n]
%
----------------------------------------
Creating a mirror for air-gapped systems
----------------------------------------
Spack's default configuration for bootstrapping relies on the user having
access to the internet, either to fetch pre-compiled binaries or source tarballs.
Sometimes though Spack is deployed on air-gapped systems where such access is denied.
To help with similar situations Spack has a command that recreates, in a local folder
of choice, a mirror containing the source tarballs and/or binary packages needed for
bootstrapping.
.. code-block:: console
% spack bootstrap mirror --binary-packages /opt/bootstrap
==> Adding "clingo-bootstrap@spack+python %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "gnupg@2.3: %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding "patchelf@0.13.1:0.13.99 %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror
==> Adding binary packages from "https://github.com/alalazo/spack-bootstrap-mirrors/releases/download/v0.1-rc.2/bootstrap-buildcache.tar.gz" to the mirror at /opt/bootstrap/local-mirror
To register the mirror on the platform where it's supposed to be used run the following command(s):
% spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources
% spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -39,7 +39,6 @@ on these ideas for each distinct build system that Spack supports:
build_systems/autotoolspackage
build_systems/cmakepackage
build_systems/cachedcmakepackage
build_systems/mesonpackage
build_systems/qmakepackage
build_systems/sippackage

View File

@@ -1,123 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _cachedcmakepackage:
------------------
CachedCMakePackage
------------------
The CachedCMakePackage base class is used for CMake-based workflows
that create a CMake cache file prior to running ``cmake``. This is
useful for packages with arguments longer than the system limit, and
for reproducibility.
The documentation for this class assumes that the user is familiar with
the ``CMakePackage`` class from which it inherits. See the documentation
for :ref:`CMakePackage <cmakepackage>`.
^^^^^^
Phases
^^^^^^
The ``CachedCMakePackage`` base class comes with the following phases:
#. ``initconfig`` - generate the CMake cache file
#. ``cmake`` - generate the Makefile
#. ``build`` - build the package
#. ``install`` - install the package
By default, these phases run:
.. code-block:: console
$ mkdir spack-build
$ cd spack-build
$ cat << EOF > name-arch-compiler@version.cmake
# Write information on compilers and dependencies
# includes information on mpi and cuda if applicable
$ cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/installation/prefix -C name-arch-compiler@version.cmake
$ make
$ make test # optional
$ make install
The ``CachedCMakePackage`` class inherits from the ``CMakePackage``
class, and accepts all of the same options and adds all of the same
flags to the ``cmake`` command. Similar to the ``CMakePAckage`` class,
you may need to add a few arguments yourself, and the
``CachedCMakePackage`` provides the same interface to add those
flags.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding entries to the CMake cache
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to adding flags to the ``cmake`` command, you may need to
add entries to the CMake cache in the ``initconfig`` phase. This can
be done by overriding one of four methods:
#. ``CachedCMakePackage.initconfig_compiler_entries``
#. ``CachedCMakePackage.initconfig_mpi_entries``
#. ``CachedCMakePackage.initconfig_hardware_entries``
#. ``CachedCMakePackage.initconfig_package_entries``
Each of these methods returns a list of CMake cache strings. The
distinction between these methods is merely to provide a
well-structured and legible cmake cache file -- otherwise, entries
from each of these methods are handled identically.
Spack also provides convenience methods for generating CMake cache
entries. These methods are available at module scope in every Spack
package. Because CMake parses boolean options, strings, and paths
differently, there are three such methods:
#. ``cmake_cache_option``
#. ``cmake_cache_string``
#. ``cmake_cache_path``
These methods each accept three parameters -- the name of the CMake
variable associated with the entry, the value of the entry, and an
optional comment -- and return strings in the appropriate format to be
returned from any of the ``initconfig*`` methods. Additionally, these
methods may return comments beginning with the ``#`` character.
A typical usage of these methods may look something like this:
.. code-block:: python
def initconfig_mpi_entries(self)
# Get existing MPI configurations
entries = super(self, Foo).initconfig_mpi_entries()
# The existing MPI configurations key on whether ``mpi`` is in the spec
# This spec has an MPI variant, and we need to enable MPI when it is on.
# This hypothetical package controls MPI with the ``FOO_MPI`` option to
# cmake.
if '+mpi' in self.spec:
entries.append(cmake_cache_option('FOO_MPI', True, "enable mpi"))
else:
entries.append(cmake_cache_option('FOO_MPI', False, "disable mpi"))
def initconfig_package_entries(self):
# Package specific options
entries = []
entries.append('#Entries for build options')
bar_on = '+bar' in self.spec
entries.append(cmake_cache_option('FOO_BAR', bar_on, 'toggle bar'))
entries.append('#Entries for dependencies')
if self.spec['blas'].name == 'baz': # baz is our blas provider
entries.append(cmake_cache_string('FOO_BLAS', 'baz', 'Use baz'))
entries.append(cmake_cache_path('BAZ_PREFIX', self.spec['baz'].prefix))
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on CMake cache files, see:
https://cmake.org/cmake/help/latest/manual/cmake.1.html

View File

@@ -59,8 +59,7 @@ other techniques to minimize the size of the final image:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer: together" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -246,8 +245,7 @@ software is respectively built and installed:
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
@@ -368,8 +366,7 @@ produces, for instance, the following ``Dockerfile``:
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretizer:" \
&& echo " unify: true" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml

View File

@@ -151,7 +151,7 @@ Package-related modules
^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.package`
Contains the :class:`~spack.package_base.Package` class, which
Contains the :class:`~spack.package.Package` class, which
is the superclass for all packages in Spack. Methods on ``Package``
implement all phases of the :ref:`package lifecycle
<package-lifecycle>` and manage the build process.

View File

@@ -273,9 +273,19 @@ or
Concretizing
^^^^^^^^^^^^
Once some user specs have been added to an environment, they can be concretized.
There are at the moment three different modes of operation to concretize an environment,
which are explained in details in :ref:`environments_concretization_config`.
Once some user specs have been added to an environment, they can be
concretized. *By default specs are concretized separately*, one after
the other. This mode of operation permits to deploy a full
software stack where multiple configurations of the same package
need to be installed alongside each other. Central installations done
at HPC centers by system administrators or user support groups
are a common case that fits in this behavior.
Environments *can also be configured to concretize all
the root specs in a self-consistent way* to ensure that
each package in the environment comes with a single configuration. This
mode of operation is usually what is required by software developers that
want to deploy their development environment.
Regardless of which mode of operation has been chosen, the following
command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration:
@@ -483,76 +493,32 @@ Appending to this list in the yaml is identical to using the ``spack
add`` command from the command line. However, there is more power
available from the yaml file.
.. _environments_concretization_config:
^^^^^^^^^^^^^^^^^^^
Spec concretization
^^^^^^^^^^^^^^^^^^^
An environment can be concretized in three different modes and the behavior active under any environment
is determined by the ``concretizer:unify`` property. By default specs are concretized *separately*, one after the other:
Specs can be concretized separately or together, as already
explained in :ref:`environments_concretization`. The behavior active
under any environment is determined by the ``concretization`` property:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: false
- ncview
- netcdf
- nco
- py-sphinx
concretization: together
This mode of operation permits to deploy a full software stack where multiple configurations of the same package
need to be installed alongside each other using the best possible selection of transitive dependencies. The downside
is that redundancy of installations is disregarded completely, and thus environments might be more bloated than
strictly needed. In the example above, for instance, if a version of ``zlib`` newer than ``1.2.8`` is known to Spack,
then it will be used for both ``hdf5`` installations.
If redundancy of the environment is a concern, Spack provides a way to install it *together where possible*,
i.e. trying to maximize reuse of dependencies across different specs:
.. code-block:: yaml
spack:
specs:
- hdf5~mpi
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: when_possible
Also in this case Spack allows having multiple configurations of the same package, but privileges the reuse of
specs over other factors. Going back to our example, this means that both ``hdf5`` installations will use
``zlib@1.2.8`` as a dependency even if newer versions of that library are available.
Central installations done at HPC centers by system administrators or user support groups are a common case
that fits either of these two modes.
Environments can also be configured to concretize all the root specs *together*, in a self-consistent way, to
ensure that each package in the environment comes with a single configuration:
.. code-block:: yaml
spack:
specs:
- hdf5+mpi
- zlib@1.2.8
concretizer:
unify: true
This mode of operation is usually what is required by software developers that want to deploy their development
environment and have a single view of it in the filesystem.
.. note::
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
replace the ``concretization`` property. For reference,
``concretization: separately`` is replaced by ``concretizer:unify:true``,
and ``concretization: together`` is replaced by ``concretizer:unify:false``.
which can currently take either one of the two allowed values ``together`` or ``separately``
(the default).
.. admonition:: Re-concretization of user specs
When concretizing specs *together* or *together where possible* the entire set of specs will be
When concretizing specs together the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent / minimal. When instead the specs are concretized
the environment remains consistent. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
^^^^^^^^^^^^^

View File

@@ -63,7 +63,6 @@ or refer to the full manual below.
configuration
config_yaml
bootstrapping
build_settings
environments
containers

View File

@@ -2393,9 +2393,9 @@ Influence how dependents are built or run
Spack provides a mechanism for dependencies to influence the
environment of their dependents by overriding the
:meth:`setup_dependent_run_environment <spack.package_base.PackageBase.setup_dependent_run_environment>`
:meth:`setup_dependent_run_environment <spack.package.PackageBase.setup_dependent_run_environment>`
or the
:meth:`setup_dependent_build_environment <spack.package_base.PackageBase.setup_dependent_build_environment>`
:meth:`setup_dependent_build_environment <spack.package.PackageBase.setup_dependent_build_environment>`
methods.
The Qt package, for instance, uses this call:
@@ -2417,7 +2417,7 @@ will have the ``PYTHONPATH``, ``PYTHONHOME`` and ``PATH`` environment
variables set appropriately before starting the installation. To make things
even simpler the ``python setup.py`` command is also inserted into the module
scope of dependents by overriding a third method called
:meth:`setup_dependent_package <spack.package_base.PackageBase.setup_dependent_package>`
:meth:`setup_dependent_package <spack.package.PackageBase.setup_dependent_package>`
:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
@@ -3022,7 +3022,7 @@ The classes that are currently provided by Spack are:
+----------------------------------------------------------+----------------------------------+
| **Base Class** | **Purpose** |
+==========================================================+==================================+
| :class:`~spack.package_base.Package` | General base class not |
| :class:`~spack.package.Package` | General base class not |
| | specialized for any build system |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.makefile.MakefilePackage` | Specialized class for packages |
@@ -3153,7 +3153,7 @@ for the install phase is:
For those not used to Python instance methods, this is the
package itself. In this case it's an instance of ``Foo``, which
extends ``Package``. For API docs on Package objects, see
:py:class:`Package <spack.package_base.Package>`.
:py:class:`Package <spack.package.Package>`.
``spec``
This is the concrete spec object created by Spack from an

View File

@@ -115,8 +115,7 @@ And here's the spack environment built by the pipeline represented as a
spack:
view: false
concretizer:
unify: false
concretization: separately
definitions:
- pkgs:

View File

@@ -61,7 +61,7 @@ You can see the packages we added earlier in the ``specs:`` section. If you
ever want to add more packages, you can either use ``spack add`` or manually
edit this file.
We also need to change the ``concretizer:unify`` option. By default, Spack
We also need to change the ``concretization:`` option. By default, Spack
concretizes each spec *separately*, allowing multiple versions of the same
package to coexist. Since we want a single consistent environment, we want to
concretize all of the specs *together*.
@@ -78,8 +78,7 @@ Here is what your ``spack.yaml`` looks like with this new setting:
# add package specs to the `specs` list
specs: [bash@5, python, py-numpy, py-scipy, py-matplotlib]
view: true
concretizer:
unify: true
concretization: together
^^^^^^^^^^^^^^^^
Symlink location

View File

@@ -25,5 +25,4 @@ spack:
- subversion
# Plotting
- graphviz
concretizer:
unify: true
concretization: together

6
lib/spack/env/cc vendored
View File

@@ -1,4 +1,4 @@
#!/bin/sh -f
#!/bin/sh
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
@@ -768,9 +768,7 @@ if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
echo "[$mode] ${full_command_list}" >> "$output_log"
fi
# Execute the full command, preserving spaces with IFS set

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.4 (commit 53fc4ac91e9b4c5e4079f15772503a80bece72ad)
* Version: 0.1.2 (commit 85757b6666422fca86aa882a769bf78b0f992f54)
argparse
--------

View File

@@ -61,7 +61,7 @@ def proc_cpuinfo():
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
with open("/proc/cpuinfo") as file:
for line in file:
key, separator, value = line.partition(":")
@@ -80,46 +80,26 @@ def proc_cpuinfo():
def _check_output(args, env):
output = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=subprocess.PIPE, env=env
).communicate()[0]
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
return six.text_type(output.decode("utf-8"))
def _machine():
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
#
# See: https://bugs.python.org/issue42704
output = _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return "aarch64"
return "x86_64"
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == "x86_64":
if platform.machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
@@ -145,18 +125,6 @@ def sysctl(*args):
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
return child_environment
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
@@ -216,7 +184,12 @@ def compatible_microarchitectures(info):
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
architecture_family = platform.machine()
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
# so we should normalize the name here
if architecture_family == "arm64":
architecture_family = "aarch64"
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
@@ -271,7 +244,12 @@ def compatibility_check(architecture_family):
architecture_family = (architecture_family,)
def decorator(func):
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
# pylint: disable=fixme
# TODO: on removal of Python 2.6 support this can be re-written as
# TODO: an update + a dict comprehension
for arch_family in architecture_family:
COMPATIBILITY_CHECKS[arch_family] = func
return func
return decorator
@@ -310,7 +288,7 @@ def compatibility_check_for_x86_64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)
@@ -325,9 +303,8 @@ def compatibility_check_for_aarch64(info, target):
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (vendor, "generic")
# On macOS it seems impossible to get all the CPU features with syctl info
and (target.features.issubset(features) or platform.system() == "Darwin")
and (target.vendor == vendor or target.vendor == "generic")
and target.features.issubset(features)
)

View File

@@ -11,7 +11,7 @@
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping # pylint: disable=deprecated-class
from collections import MutableMapping
class LazyDictionary(MutableMapping):
@@ -56,7 +56,7 @@ def _load_json_file(json_file):
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file: # pylint: disable=unspecified-encoding
with open(filename, "r") as file:
return json.load(file)
return _factory

View File

@@ -88,20 +88,6 @@
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -305,20 +291,6 @@
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -361,18 +333,6 @@
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -424,20 +384,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -486,20 +432,6 @@
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "corei7",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -558,18 +490,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -630,18 +550,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -707,18 +615,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -776,18 +672,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -848,18 +732,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -926,20 +798,6 @@
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "knl",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1010,20 +868,6 @@
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "skylake-avx512",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1093,18 +937,6 @@
"versions": "18.0:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1172,18 +1004,6 @@
"versions": "19.0.1:",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1278,20 +1098,6 @@
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"name": "icelake-client",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1336,20 +1142,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse2"
}
]
}
},
@@ -1400,20 +1192,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1468,20 +1246,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse3"
}
]
}
},
@@ -1537,20 +1301,6 @@
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"flags": "-msse4.2"
}
]
}
},
@@ -1610,22 +1360,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1688,22 +1422,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1767,22 +1485,6 @@
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -1841,30 +1543,6 @@
"name": "znver3",
"flags": "-march={name} -mtune={name}"
}
],
"intel": [
{
"versions": "16.0:",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"oneapi": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
],
"dpcpp": [
{
"versions": ":",
"warnings": "Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors",
"name": "core-avx2",
"flags": "-march={name} -mtune={name}"
}
]
}
},
@@ -2110,6 +1788,7 @@
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
@@ -2142,26 +1821,18 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "8:10.2",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve -msve-vector-bits=512"
},
{
"versions": "10.3:",
"flags": "-mcpu=a64fx -msve-vector-bits=512"
"versions": "8:",
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
}
],
"clang": [
{
"versions": "3.9:4.9",
"flags": "-march=armv8.2-a+crc+sha2+fp16"
"flags": "-march=armv8.2-a+crc+crypto+fp16"
},
{
"versions": "5:10",
"flags": "-march=armv8.2-a+crc+sha2+fp16+sve"
},
{
"versions": "11:",
"flags": "-mcpu=a64fx"
"versions": "5:",
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
],
"arm": [
@@ -2283,40 +1954,7 @@
"m1": {
"from": ["aarch64"],
"vendor": "Apple",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"flagm2",
"frint"
],
"features": [],
"compilers": {
"gcc": [
{
@@ -2326,22 +1964,14 @@
],
"clang" : [
{
"versions": "9.0:12.0",
"versions": "9.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
],
"apple-clang": [
{
"versions": "11.0:12.5",
"versions": "11.0:",
"flags" : "-march=armv8.4-a"
},
{
"versions": "13.0:",
"flags" : "-mcpu=apple-m1"
}
]
}

View File

@@ -367,7 +367,7 @@ def group_ids(uid=None):
@system_path_filter(arg_slice=slice(1))
def chgrp(path, group, follow_symlinks=True):
def chgrp(path, group):
"""Implement the bash chgrp function on a single path"""
if is_windows:
raise OSError("Function 'chgrp' is not supported on Windows")
@@ -376,10 +376,7 @@ def chgrp(path, group, follow_symlinks=True):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
if follow_symlinks:
os.chown(path, -1, gid)
else:
os.lchown(path, -1, gid)
os.chown(path, -1, gid)
@system_path_filter(arg_slice=slice(1))

View File

@@ -11,9 +11,7 @@
import os
import re
import sys
import traceback
from datetime import datetime, timedelta
from typing import List, Tuple
import six
from six import string_types
@@ -1011,64 +1009,3 @@ def __repr__(self):
def __str__(self):
return str(self.data)
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self):
"""Whether any exceptions were handled."""
return bool(self.exceptions)
def forward(self, context):
# type: (str) -> GroupedExceptionForwarder
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
return GroupedExceptionForwarder(context, self)
def _receive_forwarded(self, context, exc, tb):
# type: (str, Exception, List[str]) -> None
self.exceptions.append((context, exc, tb))
def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors."""
each_exception_message = [
'{0} raised {1}: {2}{3}'.format(
context,
exc.__class__.__name__,
exc,
'\n{0}'.format(''.join(tb)) if with_tracebacks else '',
)
for context, exc, tb in self.exceptions
]
return 'due to the following failures:\n{0}'.format(
'\n'.join(each_exception_message)
)
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
def __init__(self, context, handler):
# type: (str, GroupedExceptionHandler) -> None
self._context = context
self._handler = handler
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: (major, minor, micro, dev release) tuple
spack_version_info = (0, 19, 0, 'dev0')
spack_version_info = (0, 18, 0, 'dev0')
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
spack_version = '.'.join(str(s) for s in spack_version_info)

View File

@@ -12,7 +12,7 @@
import spack.error
import spack.hooks
import spack.monitor
import spack.package_base
import spack.package
import spack.repo
import spack.util.executable

View File

@@ -210,7 +210,7 @@ def get_all_built_specs(self):
return spec_list
def find_built_spec(self, spec, mirrors_to_check=None):
def find_built_spec(self, spec):
"""Look in our cache for the built spec corresponding to ``spec``.
If the spec can be found among the configured binary mirrors, a
@@ -225,8 +225,6 @@ def find_built_spec(self, spec, mirrors_to_check=None):
Args:
spec (spack.spec.Spec): Concrete spec to find
mirrors_to_check: Optional mapping containing mirrors to check. If
None, just assumes all configured mirrors.
Returns:
An list of objects containing the found specs and mirror url where
@@ -242,23 +240,17 @@ def find_built_spec(self, spec, mirrors_to_check=None):
]
"""
self.regenerate_spec_cache()
return self.find_by_hash(spec.dag_hash(), mirrors_to_check=mirrors_to_check)
return self.find_by_hash(spec.dag_hash())
def find_by_hash(self, find_hash, mirrors_to_check=None):
def find_by_hash(self, find_hash):
"""Same as find_built_spec but uses the hash of a spec.
Args:
find_hash (str): hash of the spec to search
mirrors_to_check: Optional mapping containing mirrors to check. If
None, just assumes all configured mirrors.
"""
if find_hash not in self._mirrors_for_spec:
return None
results = self._mirrors_for_spec[find_hash]
if not mirrors_to_check:
return results
mirror_urls = mirrors_to_check.values()
return [r for r in results if r['mirror_url'] in mirror_urls]
return self._mirrors_for_spec[find_hash]
def update_spec(self, spec, found_list):
"""
@@ -571,13 +563,6 @@ def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
def compute_hash(data):
return hashlib.sha256(data.encode('utf-8')).hexdigest()
@@ -624,14 +609,10 @@ def get_buildfile_manifest(spec):
"""
data = {"text_to_relocate": [], "binary_to_relocate": [],
"link_to_relocate": [], "other": [],
"binary_to_relocate_fullpath": [], "offsets": {}}
"binary_to_relocate_fullpath": []}
blacklist = (".spack", "man")
# Get all the paths we will want to relocate in binaries
paths_to_relocate = [s.prefix for s in spec.traverse(root=True)]
paths_to_relocate.append(spack.store.layout.root)
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(spec.prefix, topdown=True):
@@ -666,11 +647,6 @@ def get_buildfile_manifest(spec):
(m_subtype in ('x-mach-binary')
and sys.platform == 'darwin') or
(not filename.endswith('.o'))):
# Last path to relocate is the layout root, which is a substring
# of the others
indices = relocate.compute_indices(path_name, paths_to_relocate)
data['offsets'][rel_path_name] = indices
data['binary_to_relocate'].append(rel_path_name)
data['binary_to_relocate_fullpath'].append(path_name)
added = True
@@ -709,7 +685,6 @@ def write_buildinfo_file(spec, workdir, rel=False):
buildinfo['relocate_binaries'] = manifest['binary_to_relocate']
buildinfo['relocate_links'] = manifest['link_to_relocate']
buildinfo['prefix_to_hash'] = prefix_to_hash
buildinfo['offsets'] = manifest['offsets']
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
@@ -776,16 +751,15 @@ def select_signing_key(key=None):
return key
def sign_specfile(key, force, specfile_path):
signed_specfile_path = '%s.sig' % specfile_path
if os.path.exists(signed_specfile_path):
def sign_tarball(key, force, specfile_path):
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove(signed_specfile_path)
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException(signed_specfile_path)
raise NoOverwriteException('%s.asc' % specfile_path)
key = select_signing_key(key)
spack.util.gpg.sign(key, specfile_path, signed_specfile_path, clearsign=True)
spack.util.gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def _fetch_spec_from_mirror(spec_url):
@@ -794,10 +768,7 @@ def _fetch_spec_from_mirror(spec_url):
_, _, spec_file = web_util.read_from_url(spec_url)
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
# Need full spec.json name or this gets confused with index.json.
if spec_url.endswith('.json.sig'):
specfile_json = Spec.extract_json_from_clearsig(spec_file_contents)
s = Spec.from_dict(specfile_json)
elif spec_url.endswith('.json'):
if spec_url.endswith('.json'):
s = Spec.from_json(spec_file_contents)
elif spec_url.endswith('.yaml'):
s = Spec.from_yaml(spec_file_contents)
@@ -858,9 +829,7 @@ def generate_package_index(cache_prefix):
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml') or
entry.endswith('spec.json') or
entry.endswith('spec.json.sig'))
if entry.endswith('.yaml') or entry.endswith('spec.json'))
except KeyError as inst:
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
tty.warn(msg)
@@ -975,7 +944,7 @@ def _build_tarball(
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
tarfile_name = tarball_name(spec, '.spack')
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(
@@ -998,12 +967,10 @@ def _build_tarball(
spec_file = spack.store.layout.spec_file_path(spec)
specfile_name = tarball_name(spec, '.spec.json')
specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
signed_specfile_path = '{0}.sig'.format(specfile_path)
deprecated_specfile_path = specfile_path.replace('.spec.json', '.spec.yaml')
remote_specfile_path = url_util.join(
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
remote_signed_specfile_path = '{0}.sig'.format(remote_specfile_path)
remote_specfile_path_deprecated = url_util.join(
outdir, os.path.relpath(deprecated_specfile_path,
os.path.realpath(tmpdir)))
@@ -1012,12 +979,9 @@ def _build_tarball(
if force:
if web_util.url_exists(remote_specfile_path):
web_util.remove_url(remote_specfile_path)
if web_util.url_exists(remote_signed_specfile_path):
web_util.remove_url(remote_signed_specfile_path)
if web_util.url_exists(remote_specfile_path_deprecated):
web_util.remove_url(remote_specfile_path_deprecated)
elif (web_util.url_exists(remote_specfile_path) or
web_util.url_exists(remote_signed_specfile_path) or
web_util.url_exists(remote_specfile_path_deprecated)):
raise NoOverwriteException(url_util.format(remote_specfile_path))
@@ -1079,7 +1043,6 @@ def _build_tarball(
raise ValueError(
'{0} not a valid spec file type (json or yaml)'.format(
spec_file))
spec_dict['buildcache_layout_version'] = 1
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
@@ -1098,15 +1061,25 @@ def _build_tarball(
# sign the tarball and spec file with gpg
if not unsigned:
key = select_signing_key(key)
sign_specfile(key, force, specfile_path)
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
# push tarball and signed spec json to remote mirror
web_util.push_to_url(
spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
signed_specfile_path if not unsigned else specfile_path,
remote_signed_specfile_path if not unsigned else remote_specfile_path,
keep_original=False)
specfile_path, remote_specfile_path, keep_original=False)
tty.debug('Buildcache for "{0}" written to \n {1}'
.format(spec, remote_spackfile_path))
@@ -1189,174 +1162,48 @@ def push(specs, push_url, specs_kwargs=None, **kwargs):
warnings.warn(str(e))
def try_verify(specfile_path):
"""Utility function to attempt to verify a local file. Assumes the
file is a clearsigned signature file.
Args:
specfile_path (str): Path to file to be verified.
Returns:
``True`` if the signature could be verified, ``False`` otherwise.
"""
suppress = config.get('config:suppress_gpg_warnings', False)
try:
spack.util.gpg.verify(specfile_path, suppress_warnings=suppress)
except Exception:
return False
return True
def try_fetch(url_to_fetch):
"""Utility function to try and fetch a file from a url, stage it
locally, and return the path to the staged file.
Args:
url_to_fetch (str): Url pointing to remote resource to fetch
Returns:
Path to locally staged resource or ``None`` if it could not be fetched.
"""
stage = Stage(url_to_fetch, keep=True)
stage.create()
try:
stage.fetch()
except fs.FetchError:
stage.destroy()
return None
return stage
def _delete_staged_downloads(download_result):
"""Clean up stages used to download tarball and specfile"""
download_result['tarball_stage'].destroy()
download_result['specfile_stage'].destroy()
def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
def download_tarball(spec, preferred_mirrors=None):
"""
Download binary tarball for given package into stage area, returning
path to downloaded tarball if successful, None otherwise.
Args:
spec (spack.spec.Spec): Concrete spec
unsigned (bool): Whether or not to require signed binaries
mirrors_for_spec (list): Optional list of concrete specs and mirrors
obtained by calling binary_distribution.get_mirrors_for_spec().
These will be checked in order first before looking in other
configured mirrors.
preferred_mirrors (list): If provided, this is a list of preferred
mirror urls. Other configured mirrors will only be used if the
tarball can't be retrieved from one of these.
Returns:
``None`` if the tarball could not be downloaded (maybe also verified,
depending on whether new-style signed binary packages were found).
Otherwise, return an object indicating the path to the downloaded
tarball, the path to the downloaded specfile (in the case of new-style
buildcache), and whether or not the tarball is already verified.
.. code-block:: JSON
{
"tarball_path": "path-to-locally-saved-tarfile",
"specfile_path": "none-or-path-to-locally-saved-specfile",
"signature_verified": "true-if-binary-pkg-was-already-verified"
}
Path to the downloaded tarball, or ``None`` if the tarball could not
be downloaded from any configured mirrors.
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
specfile_prefix = tarball_name(spec, '.spec')
mirrors_to_try = []
urls_to_try = []
# Note on try_first and try_next:
# mirrors_for_spec mostly likely came from spack caching remote
# mirror indices locally and adding their specs to a local data
# structure supporting quick lookup of concrete specs. Those
# mirrors are likely a subset of all configured mirrors, and
# we'll probably find what we need in one of them. But we'll
# look in all configured mirrors if needed, as maybe the spec
# we need was in an un-indexed mirror. No need to check any
# mirror for the spec twice though.
try_first = [i['mirror_url'] for i in mirrors_for_spec] if mirrors_for_spec else []
try_next = [
i.fetch_url for i in spack.mirror.MirrorCollection().values()
if i.fetch_url not in try_first
]
if preferred_mirrors:
for preferred_url in preferred_mirrors:
urls_to_try.append(url_util.join(
preferred_url, _build_cache_relative_path, tarball))
for url in try_first + try_next:
mirrors_to_try.append({
'specfile': url_util.join(url,
_build_cache_relative_path, specfile_prefix),
'spackfile': url_util.join(url,
_build_cache_relative_path, tarball)
})
for mirror in spack.mirror.MirrorCollection().values():
if not preferred_mirrors or mirror.fetch_url not in preferred_mirrors:
urls_to_try.append(url_util.join(
mirror.fetch_url, _build_cache_relative_path, tarball))
tried_to_verify_sigs = []
# Assumes we care more about finding a spec file by preferred ext
# than by mirrory priority. This can be made less complicated as
# we remove support for deprecated spec formats and buildcache layouts.
for ext in ['json.sig', 'json', 'yaml']:
for mirror_to_try in mirrors_to_try:
specfile_url = '{0}.{1}'.format(mirror_to_try['specfile'], ext)
spackfile_url = mirror_to_try['spackfile']
local_specfile_stage = try_fetch(specfile_url)
if local_specfile_stage:
local_specfile_path = local_specfile_stage.save_filename
signature_verified = False
if ext.endswith('.sig') and not unsigned:
# If we found a signed specfile at the root, try to verify
# the signature immediately. We will not download the
# tarball if we could not verify the signature.
tried_to_verify_sigs.append(specfile_url)
signature_verified = try_verify(local_specfile_path)
if not signature_verified:
tty.warn("Failed to verify: {0}".format(specfile_url))
if unsigned or signature_verified or not ext.endswith('.sig'):
# We will download the tarball in one of three cases:
# 1. user asked for --no-check-signature
# 2. user didn't ask for --no-check-signature, but we
# found a spec.json.sig and verified the signature already
# 3. neither of the first two cases are true, but this file
# is *not* a signed json (not a spec.json.sig file). That
# means we already looked at all the mirrors and either didn't
# find any .sig files or couldn't verify any of them. But it
# is still possible to find an old style binary package where
# the signature is a detached .asc file in the outer archive
# of the tarball, and in that case, the only way to know is to
# download the tarball. This is a deprecated use case, so if
# something goes wrong during the extraction process (can't
# verify signature, checksum doesn't match) we will fail at
# that point instead of trying to download more tarballs from
# the remaining mirrors, looking for one we can use.
tarball_stage = try_fetch(spackfile_url)
if tarball_stage:
return {
'tarball_stage': tarball_stage,
'specfile_stage': local_specfile_stage,
'signature_verified': signature_verified,
}
local_specfile_stage.destroy()
# Falling through the nested loops meeans we exhaustively searched
# for all known kinds of spec files on all mirrors and did not find
# an acceptable one for which we could download a tarball.
if tried_to_verify_sigs:
raise NoVerifyException(("Spack found new style signed binary packages, "
"but was unable to verify any of them. Please "
"obtain and trust the correct public key. If "
"these are public spack binaries, please see the "
"spack docs for locations where keys can be found."))
for try_url in urls_to_try:
# stage the tarball into standard place
stage = Stage(try_url, name="build_cache", keep=True)
stage.create()
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
tty.warn("download_tarball() was unable to download " +
"{0} from any configured mirrors".format(spec))
@@ -1483,25 +1330,11 @@ def is_backup_file(file):
# If we are not installing back to the same install tree do the relocation
if old_prefix != new_prefix:
# Relocate links to the new install prefix
links = [link for link in buildinfo.get('relocate_links', [])]
relocate.relocate_links(
links, old_layout_root, old_prefix, new_prefix
)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names, prefix_to_prefix_text)
files_to_relocate = [os.path.join(workdir, filename)
for filename in buildinfo.get('relocate_binaries')
]
# If the buildcache was not created with relativized rpaths
# do the relocation of rpaths in binaries
# TODO: Is this necessary? How are null-terminated strings handled
# in the rpath header?
files_to_relocate = [
os.path.join(workdir, filename)
for filename in buildinfo.get('relocate_binaries')
]
# do the relocation of path in binaries
platform = spack.platforms.by_name(spec.platform)
if 'macho' in platform.binary_formats:
relocate.relocate_macho_binaries(files_to_relocate,
@@ -1517,11 +1350,25 @@ def is_backup_file(file):
prefix_to_prefix_bin, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
links = [link for link in buildinfo.get('relocate_links', [])]
relocate.relocate_links(
links, old_layout_root, old_prefix, new_prefix
)
# If offsets is None, we will recompute offsets when needed
offsets = buildinfo.get('offsets', None)
relocate.relocate_text_bin(
files_to_relocate, prefix_to_prefix_bin, offsets, workdir)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names, prefix_to_prefix_text)
paths_to_relocate = [old_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate, prefix_to_prefix_bin)
# If we are installing back to the same location
# relocate the sbang location if the spack directory changed
@@ -1530,55 +1377,7 @@ def is_backup_file(file):
relocate.relocate_text(text_names, prefix_to_prefix_text)
def _extract_inner_tarball(spec, filename, extract_to, unsigned, remote_checksum):
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(extract_to, tarfile_name)
deprecated_yaml_name = tarball_name(spec, '.spec.yaml')
deprecated_yaml_path = os.path.join(extract_to, deprecated_yaml_name)
json_name = tarball_name(spec, '.spec.json')
json_path = os.path.join(extract_to, json_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(extract_to)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(extract_to, tarfile_name)
if os.path.exists(json_path):
specfile_path = json_path
elif os.path.exists(deprecated_yaml_path):
specfile_path = deprecated_yaml_path
else:
raise ValueError('Cannot find spec file for {0}.'.format(extract_to))
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
suppress = config.get('config:suppress_gpg_warnings', False)
try:
spack.util.gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception:
raise NoVerifyException("Spack was unable to verify package "
"signature, please obtain and trust the "
"correct public key.")
else:
raise UnsignedPackageException(
"To install unsigned packages, use the --no-check-signature option.")
# get the sha256 checksum of the tarball
local_checksum = checksum_tarball(tarfile_path)
# if the checksums don't match don't install
if local_checksum != remote_checksum['hash']:
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
return tarfile_path
def extract_tarball(spec, download_result, allow_root=False, unsigned=False,
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
@@ -1589,56 +1388,66 @@ def extract_tarball(spec, download_result, allow_root=False, unsigned=False,
else:
raise NoOverwriteException(str(spec.prefix))
specfile_path = download_result['specfile_stage'].save_filename
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_is_json = True
deprecated_yaml_name = tarball_name(spec, '.spec.yaml')
deprecated_yaml_path = os.path.join(tmpdir, deprecated_yaml_name)
json_name = tarball_name(spec, '.spec.json')
json_path = os.path.join(tmpdir, json_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if os.path.exists(json_path):
specfile_path = json_path
elif os.path.exists(deprecated_yaml_path):
specfile_is_json = False
specfile_path = deprecated_yaml_path
else:
raise ValueError('Cannot find spec file for {0}.'.format(tmpdir))
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
suppress = config.get('config:suppress_gpg_warnings', False)
spack.util.gpg.verify(
'%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
if specfile_path.endswith('.json.sig'):
spec_dict = Spec.extract_json_from_clearsig(content)
elif specfile_path.endswith('.json'):
if specfile_is_json:
spec_dict = sjson.load(content)
else:
spec_dict = syaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
filename = download_result['tarball_stage'].save_filename
signature_verified = download_result['signature_verified']
tmpdir = None
if ('buildcache_layout_version' not in spec_dict or
int(spec_dict['buildcache_layout_version']) < 1):
# Handle the older buildcache layout where the .spack file
# contains a spec json/yaml, maybe an .asc file (signature),
# and another tarball containing the actual install tree.
tmpdir = tempfile.mkdtemp()
try:
tarfile_path = _extract_inner_tarball(
spec, filename, tmpdir, unsigned, bchecksum)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(tmpdir)
raise e
else:
# Newer buildcache layout: the .spack file contains just
# in the install tree, the signature, if it exists, is
# wrapped around the spec.json at the root. If sig verify
# was required, it was already done before downloading
# the tarball.
tarfile_path = filename
if not unsigned and not signature_verified:
raise UnsignedPackageException(
"To install unsigned packages, use the --no-check-signature option.")
# compute the sha256 checksum of the tarball
local_checksum = checksum_tarball(tarfile_path)
# if the checksums don't match don't install
if local_checksum != bchecksum['hash']:
_delete_staged_downloads(download_result)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
@@ -1663,13 +1472,11 @@ def extract_tarball(spec, download_result, allow_root=False, unsigned=False,
try:
tar.extractall(path=extract_tmp)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(extracted_dir)
raise e
try:
shutil.move(extracted_dir, spec.prefix)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(extracted_dir)
raise e
os.remove(tarfile_path)
@@ -1688,11 +1495,9 @@ def extract_tarball(spec, download_result, allow_root=False, unsigned=False,
spec_id = spec.format('{name}/{hash:7}')
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
_delete_staged_downloads(download_result)
def install_root_node(spec, allow_root, unsigned=False, force=False, sha256=None):
@@ -1720,23 +1525,21 @@ def install_root_node(spec, allow_root, unsigned=False, force=False, sha256=None
warnings.warn("Package for spec {0} already installed.".format(spec.format()))
return
download_result = download_tarball(spec, unsigned)
if not download_result:
tarball = download_tarball(spec)
if not tarball:
msg = 'download of binary cache file for spec "{0}" failed'
raise RuntimeError(msg.format(spec.format()))
if sha256:
checker = spack.util.crypto.Checker(sha256)
msg = 'cannot verify checksum for "{0}" [expected={1}]'
tarball_path = download_result['tarball_stage'].save_filename
msg = msg.format(tarball_path, sha256)
if not checker.check(tarball_path):
_delete_staged_downloads(download_result)
msg = msg.format(tarball, sha256)
if not checker.check(tarball):
raise spack.binary_distribution.NoChecksumException(msg)
tty.debug('Verified SHA256 checksum of the build cache')
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
extract_tarball(spec, download_result, allow_root, unsigned, force)
extract_tarball(spec, tarball, allow_root, unsigned, force)
spack.hooks.post_install(spec)
spack.store.db.add(spec, spack.store.layout)
@@ -1762,8 +1565,6 @@ def try_direct_fetch(spec, mirrors=None):
"""
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
specfile_name = tarball_name(spec, '.spec.json')
signed_specfile_name = tarball_name(spec, '.spec.json.sig')
specfile_is_signed = False
specfile_is_json = True
found_specs = []
@@ -1772,35 +1573,24 @@ def try_direct_fetch(spec, mirrors=None):
mirror.fetch_url, _build_cache_relative_path, deprecated_specfile_name)
buildcache_fetch_url_json = url_util.join(
mirror.fetch_url, _build_cache_relative_path, specfile_name)
buildcache_fetch_url_signed_json = url_util.join(
mirror.fetch_url, _build_cache_relative_path, signed_specfile_name)
try:
_, _, fs = web_util.read_from_url(buildcache_fetch_url_signed_json)
specfile_is_signed = True
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
except (URLError, web_util.SpackWebError, HTTPError) as url_err:
try:
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
except (URLError, web_util.SpackWebError, HTTPError) as url_err_x:
try:
_, _, fs = web_util.read_from_url(buildcache_fetch_url_yaml)
specfile_is_json = False
except (URLError, web_util.SpackWebError, HTTPError) as url_err_y:
tty.debug('Did not find {0} on {1}'.format(
specfile_name, buildcache_fetch_url_signed_json), url_err)
tty.debug('Did not find {0} on {1}'.format(
specfile_name, buildcache_fetch_url_json), url_err_x)
tty.debug('Did not find {0} on {1}'.format(
specfile_name, buildcache_fetch_url_yaml), url_err_y)
continue
_, _, fs = web_util.read_from_url(buildcache_fetch_url_yaml)
specfile_is_json = False
except (URLError, web_util.SpackWebError, HTTPError) as url_err_y:
tty.debug('Did not find {0} on {1}'.format(
specfile_name, buildcache_fetch_url_json), url_err)
tty.debug('Did not find {0} on {1}'.format(
specfile_name, buildcache_fetch_url_yaml), url_err_y)
continue
specfile_contents = codecs.getreader('utf-8')(fs).read()
# read the spec from the build cache file. All specs in build caches
# are concrete (as they are built) so we need to mark this spec
# concrete on read-in.
if specfile_is_signed:
specfile_json = Spec.extract_json_from_clearsig(specfile_contents)
fetched_spec = Spec.from_dict(specfile_json)
elif specfile_is_json:
if specfile_is_json:
fetched_spec = Spec.from_json(specfile_contents)
else:
fetched_spec = Spec.from_yaml(specfile_contents)
@@ -1837,7 +1627,7 @@ def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False):
tty.debug("No Spack mirrors are currently configured")
return {}
results = binary_index.find_built_spec(spec, mirrors_to_check=mirrors_to_check)
results = binary_index.find_built_spec(spec)
# Maybe we just didn't have the latest information from the mirror, so
# try to fetch directly, unless we are only considering the indices.
@@ -2127,8 +1917,7 @@ def download_single_spec(
'path': local_tarball_path,
'required': True,
}, {
'url': [tarball_name(concrete_spec, '.spec.json.sig'),
tarball_name(concrete_spec, '.spec.json'),
'url': [tarball_name(concrete_spec, '.spec.json'),
tarball_name(concrete_spec, '.spec.yaml')],
'path': destination,
'required': True,

View File

@@ -5,7 +5,6 @@
from __future__ import print_function
import contextlib
import copy
import fnmatch
import functools
import json
@@ -22,7 +21,6 @@
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.lang import GroupedExceptionHandler
import spack.binary_distribution
import spack.config
@@ -38,11 +36,6 @@
import spack.util.environment
import spack.util.executable
import spack.util.path
import spack.util.spack_yaml
import spack.util.url
#: Name of the file containing metadata about the bootstrapping source
METADATA_YAML_FILENAME = 'metadata.yaml'
#: Map a bootstrapper type to the corresponding class
_bootstrap_methods = {}
@@ -210,43 +203,12 @@ def _executables_in_store(executables, query_spec, query_info=None):
return False
class _BootstrapperBase(object):
"""Base class to derive types that can bootstrap software for Spack"""
config_scope_name = ''
@_bootstrapper(type='buildcache')
class _BuildcacheBootstrapper(object):
"""Install the software needed during bootstrapping from a buildcache."""
def __init__(self, conf):
self.name = conf['name']
self.url = conf['info']['url']
@property
def mirror_url(self):
# Absolute paths
if os.path.isabs(self.url):
return spack.util.url.format(self.url)
# Check for :// and assume it's an url if we find it
if '://' in self.url:
return self.url
# Otherwise, it's a relative path
return spack.util.url.format(os.path.join(self.metadata_dir, self.url))
@property
def mirror_scope(self):
return spack.config.InternalConfigScope(
self.config_scope_name, {'mirrors:': {self.name: self.mirror_url}}
)
@_bootstrapper(type='buildcache')
class _BuildcacheBootstrapper(_BootstrapperBase):
"""Install the software needed during bootstrapping from a buildcache."""
config_scope_name = 'bootstrap_buildcache'
def __init__(self, conf):
super(_BuildcacheBootstrapper, self).__init__(conf)
self.metadata_dir = spack.util.path.canonicalize_path(conf['metadata'])
self.last_search = None
@staticmethod
@@ -269,8 +231,9 @@ def _spec_and_platform(abstract_spec_str):
def _read_metadata(self, package_name):
"""Return metadata about the given package."""
json_filename = '{0}.json'.format(package_name)
json_dir = self.metadata_dir
json_path = os.path.join(json_dir, json_filename)
json_path = os.path.join(
spack.paths.share_path, 'bootstrap', self.name, json_filename
)
with open(json_path) as f:
data = json.load(f)
return data
@@ -344,6 +307,12 @@ def _install_and_test(
return True
return False
@property
def mirror_scope(self):
return spack.config.InternalConfigScope(
'bootstrap_buildcache', {'mirrors:': {self.name: self.url}}
)
def try_import(self, module, abstract_spec_str):
test_fn, info = functools.partial(_try_import_from_store, module), {}
if test_fn(query_spec=abstract_spec_str, query_info=info):
@@ -373,13 +342,9 @@ def try_search_path(self, executables, abstract_spec_str):
@_bootstrapper(type='install')
class _SourceBootstrapper(_BootstrapperBase):
class _SourceBootstrapper(object):
"""Install the software needed during bootstrapping from sources."""
config_scope_name = 'bootstrap_source'
def __init__(self, conf):
super(_SourceBootstrapper, self).__init__(conf)
self.metadata_dir = spack.util.path.canonicalize_path(conf['metadata'])
self.conf = conf
self.last_search = None
@@ -412,8 +377,7 @@ def try_import(self, module, abstract_spec_str):
tty.debug(msg.format(module, abstract_spec_str))
# Install the spec that should make the module importable
with spack.config.override(self.mirror_scope):
concrete_spec.package.do_install(fail_fast=True)
concrete_spec.package.do_install(fail_fast=True)
if _try_import_from_store(module, query_spec=concrete_spec, query_info=info):
self.last_search = info
@@ -426,8 +390,6 @@ def try_search_path(self, executables, abstract_spec_str):
self.last_search = info
return True
tty.info("Bootstrapping {0} from sources".format(abstract_spec_str))
# If we compile code from sources detecting a few build tools
# might reduce compilation time by a fair amount
_add_externals_if_missing()
@@ -440,8 +402,7 @@ def try_search_path(self, executables, abstract_spec_str):
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str))
with spack.config.override(self.mirror_scope):
concrete_spec.package.do_install()
concrete_spec.package.do_install()
if _executables_in_store(executables, concrete_spec, query_info=info):
self.last_search = info
return True
@@ -456,10 +417,11 @@ def _make_bootstrapper(conf):
return _bootstrap_methods[btype](conf)
def _validate_source_is_trusted(conf):
def _source_is_trusted(conf):
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
if name not in trusted:
raise ValueError('source is not trusted')
return False
return trusted[name]
def spec_for_current_python():
@@ -524,26 +486,36 @@ def ensure_module_importable_or_raise(module, abstract_spec=None):
return
abstract_spec = abstract_spec or module
source_configs = spack.config.get('bootstrap:sources', [])
h = GroupedExceptionHandler()
errors = {}
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
_validate_source_is_trusted(current_config)
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
'not trusted').format(module, current_config['name'])
tty.debug(msg)
continue
b = _make_bootstrapper(current_config)
b = _make_bootstrapper(current_config)
try:
if b.try_import(module, abstract_spec):
return
except Exception as e:
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
tty.debug(msg.format(module, str(e)))
errors[current_config['name']] = e
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(module) # noqa: E501
msg = 'cannot bootstrap the "{0}" Python module '.format(module)
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
if abstract_spec:
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
msg += ' from spec "{0}"'.format(abstract_spec)
msg += ' due to the following failures:\n'
for method in errors:
err = errors[method]
msg += " '{0}' raised {1}: {2}\n".format(
method, err.__class__.__name__, str(err))
msg += ' Please run `spack -d spec zlib` for more verbose error messages'
raise ImportError(msg)
@@ -566,14 +538,16 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
return cmd
executables_str = ', '.join(executables)
source_configs = spack.config.get('bootstrap:sources', [])
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP EXECUTABLES {0}] Skipping source "{1}" since it is '
'not trusted').format(executables_str, current_config['name'])
tty.debug(msg)
continue
h = GroupedExceptionHandler()
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
_validate_source_is_trusted(current_config)
b = _make_bootstrapper(current_config)
b = _make_bootstrapper(current_config)
try:
if b.try_search_path(executables, abstract_spec):
# Additional environment variables needed
concrete_spec, cmd = b.last_search['spec'], b.last_search['command']
@@ -588,16 +562,14 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
)
cmd.add_default_envmod(env_mods)
return cmd
except Exception as e:
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
tty.debug(msg.format(executables_str, str(e)))
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(executables_str) # noqa: E501
msg = 'cannot bootstrap any of the {0} executables '.format(executables_str)
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap any of the {0} executables'.format(executables_str)
if abstract_spec:
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
msg += ' from spec "{0}"'.format(abstract_spec)
raise RuntimeError(msg)
@@ -854,19 +826,6 @@ def ensure_flake8_in_path_or_raise():
return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
def all_root_specs(development=False):
"""Return a list of all the root specs that may be used to bootstrap Spack.
Args:
development (bool): if True include dev dependencies
"""
specs = [clingo_root_spec(), gnupg_root_spec(), patchelf_root_spec()]
if development:
specs += [isort_root_spec(), mypy_root_spec(),
black_root_spec(), flake8_root_spec()]
return specs
def _missing(name, purpose, system_only=True):
"""Message to be printed if an executable is not found"""
msg = '[{2}] MISSING "{0}": {1}'
@@ -1004,23 +963,3 @@ def status_message(section):
msg += '\n'
msg = msg.format(pass_token if not missing_software else fail_token)
return msg, missing_software
def bootstrapping_sources(scope=None):
"""Return the list of configured sources of software for bootstrapping Spack
Args:
scope (str or None): if a valid configuration scope is given, return the
list only from that scope
"""
source_configs = spack.config.get('bootstrap:sources', default=None, scope=scope)
source_configs = source_configs or []
list_of_sources = []
for entry in source_configs:
current = copy.copy(entry)
metadata_dir = spack.util.path.canonicalize_path(entry['metadata'])
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
with open(metadata_yaml) as f:
current.update(spack.util.spack_yaml.load(f))
list_of_sources.append(current)
return list_of_sources

View File

@@ -55,7 +55,7 @@
import spack.config
import spack.install_test
import spack.main
import spack.package_base
import spack.package
import spack.paths
import spack.platforms
import spack.repo
@@ -722,7 +722,7 @@ def get_std_cmake_args(pkg):
package were a CMakePackage instance.
Args:
pkg (spack.package_base.PackageBase): package under consideration
pkg (spack.package.PackageBase): package under consideration
Returns:
list: arguments for cmake
@@ -738,7 +738,7 @@ def get_std_meson_args(pkg):
package were a MesonPackage instance.
Args:
pkg (spack.package_base.PackageBase): package under consideration
pkg (spack.package.PackageBase): package under consideration
Returns:
list: arguments for meson
@@ -748,12 +748,12 @@ def get_std_meson_args(pkg):
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package_base.PackageBase
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package_base.PackageBase) or
issubclass(spack.package_base.PackageBase, cls)):
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
@@ -771,7 +771,7 @@ def load_external_modules(pkg):
associated with them.
Args:
pkg (spack.package_base.PackageBase): package to load deps for
pkg (spack.package.PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
external_modules = dep.external_modules or []
@@ -1109,7 +1109,7 @@ def start_build_process(pkg, function, kwargs):
Args:
pkg (spack.package_base.PackageBase): package whose environment we should set up the
pkg (spack.package.PackageBase): package whose environment we should set up the
child process for.
function (typing.Callable): argless function to run in the child
process.
@@ -1234,7 +1234,7 @@ def make_stack(tb, stack=None):
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package_base.PackageBase):
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.

View File

@@ -9,7 +9,7 @@
from spack.build_systems.autotools import AutotoolsPackage
from spack.directives import extends
from spack.package_base import ExtensionError
from spack.package import ExtensionError
from spack.util.executable import which

View File

@@ -16,7 +16,7 @@
from spack.build_environment import InstallError
from spack.directives import conflicts, depends_on
from spack.operating_systems.mac_os import macos_version
from spack.package_base import PackageBase, run_after, run_before
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
from spack.version import Version

View File

@@ -8,7 +8,7 @@
from llnl.util.filesystem import install, mkdirp
from spack.build_systems.cmake import CMakePackage
from spack.package_base import run_after
from spack.package import run_after
def cmake_cache_path(name, value, comment=""):
@@ -210,10 +210,6 @@ def std_initconfig_entries(self):
"#------------------{0}\n".format("-" * 60),
]
def initconfig_package_entries(self):
"""This method is to be overwritten by the package"""
return []
def initconfig(self, spec, prefix):
cache_entries = (self.std_initconfig_entries() +
self.initconfig_compiler_entries() +

View File

@@ -18,7 +18,7 @@
import spack.build_environment
from spack.directives import conflicts, depends_on, variant
from spack.package_base import InstallError, PackageBase, run_after
from spack.package import InstallError, PackageBase, run_after
from spack.util.path import convert_to_posix_path
# Regex to extract the primary generator from the CMake generator

View File

@@ -6,7 +6,7 @@
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.multimethod import when
from spack.package_base import PackageBase
from spack.package import PackageBase
class CudaPackage(PackageBase):
@@ -37,7 +37,6 @@ class CudaPackage(PackageBase):
variant('cuda_arch',
description='CUDA architecture',
values=spack.variant.any_combination_of(*cuda_arch_values),
sticky=True,
when='+cuda')
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples

View File

@@ -3,16 +3,14 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import Optional
import spack.package_base
import spack.package
import spack.util.url
class GNUMirrorPackage(spack.package_base.PackageBase):
class GNUMirrorPackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for GNU packages."""
#: Path of the package in a GNU mirror
gnu_mirror_path = None # type: Optional[str]
gnu_mirror_path = None
#: List of GNU mirrors used by Spack
base_mirrors = [

View File

@@ -26,7 +26,7 @@
import spack.error
from spack.build_environment import dso_suffix
from spack.package_base import InstallError, PackageBase, run_after
from spack.package import InstallError, PackageBase, run_after
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
from spack.util.prefix import Prefix
@@ -1115,7 +1115,7 @@ def _setup_dependent_env_callback(
raise InstallError('compilers_of_client arg required for MPI')
def setup_dependent_package(self, module, dep_spec):
# https://spack.readthedocs.io/en/latest/spack.html#spack.package_base.PackageBase.setup_dependent_package
# https://spack.readthedocs.io/en/latest/spack.html#spack.package.PackageBase.setup_dependent_package
# Reminder: "module" refers to Python module.
# Called before the install() method of dependents.

View File

@@ -10,7 +10,7 @@
from spack.directives import depends_on, extends
from spack.multimethod import when
from spack.package_base import PackageBase
from spack.package import PackageBase
from spack.util.executable import Executable

View File

@@ -11,7 +11,7 @@
from llnl.util.filesystem import working_dir
from spack.directives import conflicts
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class MakefilePackage(PackageBase):

View File

@@ -7,7 +7,7 @@
from llnl.util.filesystem import install_tree, working_dir
from spack.directives import depends_on
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
from spack.util.executable import which

View File

@@ -11,7 +11,7 @@
from llnl.util.filesystem import working_dir
from spack.directives import depends_on, variant
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class MesonPackage(PackageBase):

View File

@@ -6,7 +6,7 @@
import inspect
from spack.directives import extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class OctavePackage(PackageBase):

View File

@@ -14,7 +14,7 @@
from llnl.util.filesystem import find_headers, find_libraries, join_path
from spack.package_base import Package
from spack.package import Package
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable

View File

@@ -10,7 +10,7 @@
from llnl.util.filesystem import filter_file
from spack.directives import extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
from spack.util.executable import Executable

View File

@@ -6,7 +6,6 @@
import os
import re
import shutil
from typing import Optional
import llnl.util.tty as tty
from llnl.util.filesystem import (
@@ -20,13 +19,13 @@
from llnl.util.lang import match_predicate
from spack.directives import depends_on, extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class PythonPackage(PackageBase):
"""Specialized class for packages that are built using pip."""
#: Package name, version, and extension on PyPI
pypi = None # type: Optional[str]
pypi = None
maintainers = ['adamjstewart']
@@ -47,7 +46,7 @@ class PythonPackage(PackageBase):
# package manually
depends_on('py-wheel', type='build')
py_namespace = None # type: Optional[str]
py_namespace = None
@staticmethod
def _std_args(cls):

View File

@@ -9,7 +9,7 @@
from llnl.util.filesystem import working_dir
from spack.directives import depends_on
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class QMakePackage(PackageBase):

View File

@@ -5,10 +5,9 @@
import inspect
from typing import Optional
from spack.directives import extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class RPackage(PackageBase):
@@ -29,10 +28,10 @@ class RPackage(PackageBase):
# package attributes that can be expanded to set the homepage, url,
# list_url, and git values
# For CRAN packages
cran = None # type: Optional[str]
cran = None
# For Bioconductor packages
bioc = None # type: Optional[str]
bioc = None
maintainers = ['glennpj']

View File

@@ -3,14 +3,13 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from typing import Optional
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs
from spack.directives import extends
from spack.package_base import PackageBase
from spack.package import PackageBase
from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError
@@ -37,8 +36,8 @@ class RacketPackage(PackageBase):
extends('racket')
pkgs = False
subdirectory = None # type: Optional[str]
name = None # type: Optional[str]
subdirectory = None
name = None
parallel = True
@property

View File

@@ -77,7 +77,7 @@
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.package_base import PackageBase
from spack.package import PackageBase
class ROCmPackage(PackageBase):

View File

@@ -7,7 +7,7 @@
import inspect
from spack.directives import extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class RubyPackage(PackageBase):

View File

@@ -7,7 +7,7 @@
import inspect
from spack.directives import depends_on
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class SConsPackage(PackageBase):

View File

@@ -11,7 +11,7 @@
from llnl.util.filesystem import find, join_path, working_dir
from spack.directives import depends_on, extends
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class SIPPackage(PackageBase):

View File

@@ -3,17 +3,15 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import Optional
import spack.package_base
import spack.package
import spack.util.url
class SourceforgePackage(spack.package_base.PackageBase):
class SourceforgePackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for Sourceforge
packages."""
#: Path of the package in a Sourceforge mirror
sourceforge_mirror_path = None # type: Optional[str]
sourceforge_mirror_path = None
#: List of Sourceforge mirrors used by Spack
base_mirrors = [

View File

@@ -2,17 +2,16 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import Optional
import spack.package_base
import spack.package
import spack.util.url
class SourcewarePackage(spack.package_base.PackageBase):
class SourcewarePackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for Sourceware.org
packages."""
#: Path of the package in a Sourceware mirror
sourceware_mirror_path = None # type: Optional[str]
sourceware_mirror_path = None
#: List of Sourceware mirrors used by Spack
base_mirrors = [

View File

@@ -9,7 +9,7 @@
from llnl.util.filesystem import working_dir
from spack.directives import depends_on
from spack.package_base import PackageBase, run_after
from spack.package import PackageBase, run_after
class WafPackage(PackageBase):

View File

@@ -3,17 +3,15 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import Optional
import spack.package_base
import spack.package
import spack.util.url
class XorgPackage(spack.package_base.PackageBase):
class XorgPackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for x.org
packages."""
#: Path of the package in a x.org mirror
xorg_mirror_path = None # type: Optional[str]
xorg_mirror_path = None
#: List of x.org mirrors used by Spack
# Note: x.org mirrors are a bit tricky, since many are out-of-sync or off.

View File

@@ -33,6 +33,7 @@
import spack.util.executable as exe
import spack.util.gpg as gpg_util
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
import spack.util.web as web_util
from spack.error import SpackError
from spack.spec import Spec
@@ -41,8 +42,10 @@
'always',
]
SPACK_PR_MIRRORS_ROOT_URL = 's3://spack-binaries-prs'
SPACK_SHARED_PR_MIRROR_URL = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
'shared_pr_mirror')
TEMP_STORAGE_MIRROR_NAME = 'ci_temporary_mirror'
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
spack_gpg = spack.main.SpackCommand('gpg')
spack_compiler = spack.main.SpackCommand('compiler')
@@ -196,11 +199,6 @@ def _get_cdash_build_name(spec, build_group):
spec.name, spec.version, spec.compiler, spec.architecture, build_group)
def _remove_reserved_tags(tags):
"""Convenience function to strip reserved tags from jobs"""
return [tag for tag in tags if tag not in SPACK_RESERVED_TAGS]
def _get_spec_string(spec):
format_elements = [
'{name}{@version}',
@@ -233,10 +231,8 @@ def _add_dependency(spec_label, dep_label, deps):
deps[spec_label].add(dep_label)
def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False,
mirrors_to_check=None):
spec_deps_obj = _compute_spec_deps(specs, check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False):
spec_deps_obj = _compute_spec_deps(specs, check_index_only=check_index_only)
if spec_deps_obj:
dependencies = spec_deps_obj['dependencies']
@@ -253,7 +249,7 @@ def _get_spec_dependencies(specs, deps, spec_labels, check_index_only=False,
_add_dependency(entry['spec'], entry['depends'], deps)
def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
def stage_spec_jobs(specs, check_index_only=False):
"""Take a set of release specs and generate a list of "stages", where the
jobs in any stage are dependent only on jobs in previous stages. This
allows us to maximize build parallelism within the gitlab-ci framework.
@@ -265,8 +261,6 @@ def stage_spec_jobs(specs, check_index_only=False, mirrors_to_check=None):
are up to date on those mirrors. This flag limits that search to
the binary cache indices on those mirrors to speed the process up,
even though there is no garantee the index is up to date.
mirrors_to_checK: Optional mapping giving mirrors to check instead of
any configured mirrors.
Returns: A tuple of information objects describing the specs, dependencies
and stages:
@@ -303,8 +297,8 @@ def _remove_satisfied_deps(deps, satisfied_list):
deps = {}
spec_labels = {}
_get_spec_dependencies(specs, deps, spec_labels, check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
_get_spec_dependencies(
specs, deps, spec_labels, check_index_only=check_index_only)
# Save the original deps, as we need to return them at the end of the
# function. In the while loop below, the "dependencies" variable is
@@ -346,7 +340,7 @@ def _print_staging_summary(spec_labels, dependencies, stages):
_get_spec_string(s)))
def _compute_spec_deps(spec_list, check_index_only=False, mirrors_to_check=None):
def _compute_spec_deps(spec_list, check_index_only=False):
"""
Computes all the dependencies for the spec(s) and generates a JSON
object which provides both a list of unique spec names as well as a
@@ -419,7 +413,7 @@ def append_dep(s, d):
continue
up_to_date_mirrors = bindist.get_mirrors_for_spec(
spec=s, mirrors_to_check=mirrors_to_check, index_only=check_index_only)
spec=s, index_only=check_index_only)
skey = _spec_deps_key(s)
spec_labels[skey] = {
@@ -608,8 +602,8 @@ def get_spec_filter_list(env, affected_pkgs, dependencies=True, dependents=True)
def generate_gitlab_ci_yaml(env, print_summary, output_file,
prune_dag=False, check_index_only=False,
run_optimizer=False, use_dependencies=False,
artifacts_root=None, remote_mirror_override=None):
""" Generate a gitlab yaml file to run a dynamic child pipeline from
artifacts_root=None):
""" Generate a gitlab yaml file to run a dynamic chile pipeline from
the spec matrix in the active environment.
Arguments:
@@ -635,10 +629,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
artifacts_root (str): Path where artifacts like logs, environment
files (spack.yaml, spack.lock), etc should be written. GitLab
requires this to be within the project directory.
remote_mirror_override (str): Typically only needed when one spack.yaml
is used to populate several mirrors with binaries, based on some
criteria. Spack protected pipelines populate different mirrors based
on branch name, facilitated by this option.
"""
with spack.concretize.disable_compiler_existence_check():
with env.write_transaction():
@@ -688,19 +678,17 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
for s in affected_specs:
tty.debug(' {0}'.format(s.name))
# Downstream jobs will "need" (depend on, for both scheduling and
# artifacts, which include spack.lock file) this pipeline generation
# job by both name and pipeline id. If those environment variables
# do not exist, then maybe this is just running in a shell, in which
# case, there is no expectation gitlab will ever run the generated
# pipeline and those environment variables do not matter.
generate_job_name = os.environ.get('CI_JOB_NAME', 'job-does-not-exist')
parent_pipeline_id = os.environ.get('CI_PIPELINE_ID', 'pipeline-does-not-exist')
generate_job_name = os.environ.get('CI_JOB_NAME', None)
parent_pipeline_id = os.environ.get('CI_PIPELINE_ID', None)
# Values: "spack_pull_request", "spack_protected_branch", or not set
spack_pipeline_type = os.environ.get('SPACK_PIPELINE_TYPE', None)
is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
spack_buildcache_copy = os.environ.get('SPACK_COPY_BUILDCACHE', None)
spack_pr_branch = os.environ.get('SPACK_PR_BRANCH', None)
pr_mirror_url = None
if spack_pr_branch:
pr_mirror_url = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
spack_pr_branch)
if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
tty.die('spack ci generate requires an env containing a mirror')
@@ -755,25 +743,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
'strip-compilers': False,
})
# If a remote mirror override (alternate buildcache destination) was
# specified, add it here in case it has already built hashes we might
# generate.
mirrors_to_check = None
if remote_mirror_override:
if spack_pipeline_type == 'spack_protected_branch':
# Overriding the main mirror in this case might result
# in skipping jobs on a release pipeline because specs are
# up to date in develop. Eventually we want to notice and take
# advantage of this by scheduling a job to copy the spec from
# develop to the release, but until we have that, this makes
# sure we schedule a rebuild job if the spec isn't already in
# override mirror.
mirrors_to_check = {
'override': remote_mirror_override
}
else:
spack.mirror.add(
'ci_pr_mirror', remote_mirror_override, cfg.default_modify_scope())
# Add per-PR mirror (and shared PR mirror) if enabled, as some specs might
# be up to date in one of those and thus not need to be rebuilt.
if pr_mirror_url:
spack.mirror.add(
'ci_pr_mirror', pr_mirror_url, cfg.default_modify_scope())
spack.mirror.add('ci_shared_pr_mirror',
SPACK_SHARED_PR_MIRROR_URL,
cfg.default_modify_scope())
pipeline_artifacts_dir = artifacts_root
if not pipeline_artifacts_dir:
@@ -848,13 +825,11 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
phase_spec.concretize()
staged_phases[phase_name] = stage_spec_jobs(
concrete_phase_specs,
check_index_only=check_index_only,
mirrors_to_check=mirrors_to_check)
check_index_only=check_index_only)
finally:
# Clean up remote mirror override if enabled
if remote_mirror_override:
if spack_pipeline_type != 'spack_protected_branch':
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
# Clean up PR mirror if enabled
if pr_mirror_url:
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
all_job_names = []
output_object = {}
@@ -914,14 +889,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
tags = [tag for tag in runner_attribs['tags']]
if spack_pipeline_type is not None:
# For spack pipelines "public" and "protected" are reserved tags
tags = _remove_reserved_tags(tags)
if spack_pipeline_type == 'spack_protected_branch':
tags.extend(['aws', 'protected'])
elif spack_pipeline_type == 'spack_pull_request':
tags.extend(['public'])
variables = {}
if 'variables' in runner_attribs:
variables.update(runner_attribs['variables'])
@@ -1207,10 +1174,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
service_job_config,
cleanup_job)
if 'tags' in cleanup_job:
service_tags = _remove_reserved_tags(cleanup_job['tags'])
cleanup_job['tags'] = service_tags
cleanup_job['stage'] = 'cleanup-temp-storage'
cleanup_job['script'] = [
'spack -d mirror destroy --mirror-url {0}/$CI_PIPELINE_ID'.format(
@@ -1218,74 +1181,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
cleanup_job['when'] = 'always'
cleanup_job['retry'] = service_job_retries
cleanup_job['interruptible'] = True
output_object['cleanup'] = cleanup_job
if ('signing-job-attributes' in gitlab_ci and
spack_pipeline_type == 'spack_protected_branch'):
# External signing: generate a job to check and sign binary pkgs
stage_names.append('stage-sign-pkgs')
signing_job_config = gitlab_ci['signing-job-attributes']
signing_job = {}
signing_job_attrs_to_copy = [
'image',
'tags',
'variables',
'before_script',
'script',
'after_script',
]
_copy_attributes(signing_job_attrs_to_copy,
signing_job_config,
signing_job)
signing_job_tags = []
if 'tags' in signing_job:
signing_job_tags = _remove_reserved_tags(signing_job['tags'])
for tag in ['aws', 'protected', 'notary']:
if tag not in signing_job_tags:
signing_job_tags.append(tag)
signing_job['tags'] = signing_job_tags
signing_job['stage'] = 'stage-sign-pkgs'
signing_job['when'] = 'always'
signing_job['retry'] = {
'max': 2,
'when': ['always']
}
signing_job['interruptible'] = True
output_object['sign-pkgs'] = signing_job
if spack_buildcache_copy:
# Generate a job to copy the contents from wherever the builds are getting
# pushed to the url specified in the "SPACK_BUILDCACHE_COPY" environment
# variable.
src_url = remote_mirror_override or remote_mirror_url
dest_url = spack_buildcache_copy
stage_names.append('stage-copy-buildcache')
copy_job = {
'stage': 'stage-copy-buildcache',
'tags': ['spack', 'public', 'medium', 'aws', 'x86_64'],
'image': 'ghcr.io/spack/python-aws-bash:0.0.1',
'when': 'on_success',
'interruptible': True,
'retry': service_job_retries,
'script': [
'. ./share/spack/setup-env.sh',
'spack --version',
'aws s3 sync --exclude *index.json* --exclude *pgp* {0} {1}'.format(
src_url, dest_url)
]
}
output_object['copy-mirror'] = copy_job
if rebuild_index_enabled:
# Add a final job to regenerate the index
stage_names.append('stage-rebuild-index')
@@ -1296,13 +1194,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
service_job_config,
final_job)
if 'tags' in final_job:
service_tags = _remove_reserved_tags(final_job['tags'])
final_job['tags'] = service_tags
index_target_mirror = mirror_urls[0]
if remote_mirror_override:
index_target_mirror = remote_mirror_override
if is_pr_pipeline:
index_target_mirror = pr_mirror_url
final_job['stage'] = 'stage-rebuild-index'
final_job['script'] = [
@@ -1311,7 +1205,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
final_job['when'] = 'always'
final_job['retry'] = service_job_retries
final_job['interruptible'] = True
output_object['rebuild-index'] = final_job
@@ -1344,9 +1237,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
'SPACK_PIPELINE_TYPE': str(spack_pipeline_type)
}
if remote_mirror_override:
(output_object['variables']
['SPACK_REMOTE_MIRROR_OVERRIDE']) = remote_mirror_override
if pr_mirror_url:
output_object['variables']['SPACK_PR_MIRROR_URL'] = pr_mirror_url
spack_stack_name = os.environ.get('SPACK_CI_STACK_NAME', None)
if spack_stack_name:

View File

@@ -155,17 +155,31 @@ def parse_specs(args, **kwargs):
normalize = kwargs.get('normalize', False)
tests = kwargs.get('tests', False)
sargs = args
if not isinstance(args, six.string_types):
sargs = ' '.join(spack.util.string.quote(args))
specs = spack.spec.parse(sargs)
for spec in specs:
if concretize:
spec.concretize(tests=tests) # implies normalize
elif normalize:
spec.normalize(tests=tests)
try:
sargs = args
if not isinstance(args, six.string_types):
sargs = ' '.join(spack.util.string.quote(args))
specs = spack.spec.parse(sargs)
for spec in specs:
if concretize:
spec.concretize(tests=tests) # implies normalize
elif normalize:
spec.normalize(tests=tests)
return specs
return specs
except spack.spec.SpecParseError as e:
msg = e.message + "\n" + str(e.string) + "\n"
msg += (e.pos + 2) * " " + "^"
raise spack.error.SpackError(msg)
except spack.error.SpecError as e:
msg = e.message
if e.long_message:
msg += e.long_message
raise spack.error.SpackError(msg)
def matching_spec_from_env(spec):

View File

@@ -6,9 +6,7 @@
import os.path
import shutil
import tempfile
import llnl.util.filesystem
import llnl.util.tty
import llnl.util.tty.color
@@ -17,9 +15,6 @@
import spack.cmd.common.arguments
import spack.config
import spack.main
import spack.mirror
import spack.spec
import spack.stage
import spack.util.path
description = "manage bootstrap configuration"
@@ -27,38 +22,6 @@
level = "long"
# Tarball to be downloaded if binary packages are requested in a local mirror
BINARY_TARBALL = 'https://github.com/spack/spack-bootstrap-mirrors/releases/download/v0.2/bootstrap-buildcache.tar.gz'
#: Subdirectory where to create the mirror
LOCAL_MIRROR_DIR = 'bootstrap_cache'
# Metadata for a generated binary mirror
BINARY_METADATA = {
'type': 'buildcache',
'description': ('Buildcache copied from a public tarball available on Github.'
'The sha256 checksum of binaries is checked before installation.'),
'info': {
'url': os.path.join('..', '..', LOCAL_MIRROR_DIR),
'homepage': 'https://github.com/spack/spack-bootstrap-mirrors',
'releases': 'https://github.com/spack/spack-bootstrap-mirrors/releases',
'tarball': BINARY_TARBALL
}
}
CLINGO_JSON = '$spack/share/spack/bootstrap/github-actions-v0.2/clingo.json'
GNUPG_JSON = '$spack/share/spack/bootstrap/github-actions-v0.2/gnupg.json'
# Metadata for a generated source mirror
SOURCE_METADATA = {
'type': 'install',
'description': 'Mirror with software needed to bootstrap Spack',
'info': {
'url': os.path.join('..', '..', LOCAL_MIRROR_DIR)
}
}
def _add_scope_option(parser):
scopes = spack.config.scopes()
scopes_metavar = spack.config.scopes_metavar
@@ -104,61 +67,24 @@ def setup_parser(subparser):
)
list = sp.add_parser(
'list', help='list all the sources of software to bootstrap Spack'
'list', help='list the methods available for bootstrapping'
)
_add_scope_option(list)
trust = sp.add_parser(
'trust', help='trust a bootstrapping source'
'trust', help='trust a bootstrapping method'
)
_add_scope_option(trust)
trust.add_argument(
'name', help='name of the source to be trusted'
'name', help='name of the method to be trusted'
)
untrust = sp.add_parser(
'untrust', help='untrust a bootstrapping source'
'untrust', help='untrust a bootstrapping method'
)
_add_scope_option(untrust)
untrust.add_argument(
'name', help='name of the source to be untrusted'
)
add = sp.add_parser(
'add', help='add a new source for bootstrapping'
)
_add_scope_option(add)
add.add_argument(
'--trust', action='store_true',
help='trust the source immediately upon addition')
add.add_argument(
'name', help='name of the new source of software'
)
add.add_argument(
'metadata_dir', help='directory where to find metadata files'
)
remove = sp.add_parser(
'remove', help='remove a bootstrapping source'
)
remove.add_argument(
'name', help='name of the source to be removed'
)
mirror = sp.add_parser(
'mirror', help='create a local mirror to bootstrap Spack'
)
mirror.add_argument(
'--binary-packages', action='store_true',
help='download public binaries in the mirror'
)
mirror.add_argument(
'--dev', action='store_true',
help='download dev dependencies too'
)
mirror.add_argument(
metavar='DIRECTORY', dest='root_dir',
help='root directory in which to create the mirror and metadata'
'name', help='name of the method to be untrusted'
)
@@ -211,7 +137,10 @@ def _root(args):
def _list(args):
sources = spack.bootstrap.bootstrapping_sources(scope=args.scope)
sources = spack.config.get(
'bootstrap:sources', default=None, scope=args.scope
)
if not sources:
llnl.util.tty.msg(
"No method available for bootstrapping Spack's dependencies"
@@ -320,119 +249,6 @@ def _status(args):
print()
def _add(args):
initial_sources = spack.bootstrap.bootstrapping_sources()
names = [s['name'] for s in initial_sources]
# If the name is already used error out
if args.name in names:
msg = 'a source named "{0}" already exist. Please choose a different name'
raise RuntimeError(msg.format(args.name))
# Check that the metadata file exists
metadata_dir = spack.util.path.canonicalize_path(args.metadata_dir)
if not os.path.exists(metadata_dir) or not os.path.isdir(metadata_dir):
raise RuntimeError(
'the directory "{0}" does not exist'.format(args.metadata_dir)
)
file = os.path.join(metadata_dir, 'metadata.yaml')
if not os.path.exists(file):
raise RuntimeError('the file "{0}" does not exist'.format(file))
# Insert the new source as the highest priority one
write_scope = args.scope or spack.config.default_modify_scope(section='bootstrap')
sources = spack.config.get('bootstrap:sources', scope=write_scope) or []
sources = [
{'name': args.name, 'metadata': args.metadata_dir}
] + sources
spack.config.set('bootstrap:sources', sources, scope=write_scope)
msg = 'New bootstrapping source "{0}" added in the "{1}" configuration scope'
llnl.util.tty.msg(msg.format(args.name, write_scope))
if args.trust:
_trust(args)
def _remove(args):
initial_sources = spack.bootstrap.bootstrapping_sources()
names = [s['name'] for s in initial_sources]
if args.name not in names:
msg = ('cannot find any bootstrapping source named "{0}". '
'Run `spack bootstrap list` to see available sources.')
raise RuntimeError(msg.format(args.name))
for current_scope in spack.config.scopes():
sources = spack.config.get('bootstrap:sources', scope=current_scope) or []
if args.name in [s['name'] for s in sources]:
sources = [s for s in sources if s['name'] != args.name]
spack.config.set('bootstrap:sources', sources, scope=current_scope)
msg = ('Removed the bootstrapping source named "{0}" from the '
'"{1}" configuration scope.')
llnl.util.tty.msg(msg.format(args.name, current_scope))
trusted = spack.config.get('bootstrap:trusted', scope=current_scope) or []
if args.name in trusted:
trusted.pop(args.name)
spack.config.set('bootstrap:trusted', trusted, scope=current_scope)
msg = 'Deleting information on "{0}" from list of trusted sources'
llnl.util.tty.msg(msg.format(args.name))
def _mirror(args):
mirror_dir = os.path.join(args.root_dir, LOCAL_MIRROR_DIR)
# TODO: Here we are adding gnuconfig manually, but this can be fixed
# TODO: as soon as we have an option to add to a mirror all the possible
# TODO: dependencies of a spec
root_specs = spack.bootstrap.all_root_specs(development=args.dev) + ['gnuconfig']
for spec_str in root_specs:
msg = 'Adding "{0}" and dependencies to the mirror at {1}'
llnl.util.tty.msg(msg.format(spec_str, mirror_dir))
# Suppress tty from the call below for terser messages
llnl.util.tty.set_msg_enabled(False)
spec = spack.spec.Spec(spec_str).concretized()
for node in spec.traverse():
spack.mirror.create(mirror_dir, [node])
llnl.util.tty.set_msg_enabled(True)
if args.binary_packages:
msg = 'Adding binary packages from "{0}" to the mirror at {1}'
llnl.util.tty.msg(msg.format(BINARY_TARBALL, mirror_dir))
llnl.util.tty.set_msg_enabled(False)
stage = spack.stage.Stage(BINARY_TARBALL, path=tempfile.mkdtemp())
stage.create()
stage.fetch()
stage.expand_archive()
build_cache_dir = os.path.join(stage.source_path, 'build_cache')
shutil.move(build_cache_dir, mirror_dir)
llnl.util.tty.set_msg_enabled(True)
def write_metadata(subdir, metadata):
metadata_rel_dir = os.path.join('metadata', subdir)
metadata_yaml = os.path.join(
args.root_dir, metadata_rel_dir, 'metadata.yaml'
)
llnl.util.filesystem.mkdirp(os.path.dirname(metadata_yaml))
with open(metadata_yaml, mode='w') as f:
spack.util.spack_yaml.dump(metadata, stream=f)
return os.path.dirname(metadata_yaml), metadata_rel_dir
instructions = ('\nTo register the mirror on the platform where it\'s supposed '
'to be used, move "{0}" to its final location and run the '
'following command(s):\n\n').format(args.root_dir)
cmd = ' % spack bootstrap add --trust {0} <final-path>/{1}\n'
_, rel_directory = write_metadata(subdir='sources', metadata=SOURCE_METADATA)
instructions += cmd.format('local-sources', rel_directory)
if args.binary_packages:
abs_directory, rel_directory = write_metadata(
subdir='binaries', metadata=BINARY_METADATA
)
shutil.copy(spack.util.path.canonicalize_path(CLINGO_JSON), abs_directory)
shutil.copy(spack.util.path.canonicalize_path(GNUPG_JSON), abs_directory)
instructions += cmd.format('local-binaries', rel_directory)
print(instructions)
def bootstrap(parser, args):
callbacks = {
'status': _status,
@@ -442,9 +258,6 @@ def bootstrap(parser, args):
'root': _root,
'list': _list,
'trust': _trust,
'untrust': _untrust,
'add': _add,
'remove': _remove,
'mirror': _mirror
'untrust': _untrust
}
callbacks[args.subcommand](args)

View File

@@ -478,12 +478,11 @@ def save_specfile_fn(args):
if args.root_specfile:
with open(args.root_specfile) as fd:
root_spec_as_json = fd.read()
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
else:
root_spec = Spec(args.root_spec)
root_spec.concretize()
root_spec_as_json = root_spec.to_json(hash=ht.dag_hash)
spec_format = 'json'
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
save_dependency_specfiles(
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format)

View File

@@ -14,7 +14,7 @@
import spack.repo
import spack.stage
import spack.util.crypto
from spack.package_base import preferred_version
from spack.package import preferred_version
from spack.util.naming import valid_fully_qualified_module_name
from spack.version import Version, ver

View File

@@ -64,11 +64,6 @@ def setup_parser(subparser):
'--dependencies', action='store_true', default=False,
help="(Experimental) disable DAG scheduling; use "
' "plain" dependencies.')
generate.add_argument(
'--buildcache-destination', default=None,
help="Override the mirror configured in the environment (spack.yaml) " +
"in order to push binaries from the generated pipeline to a " +
"different location.")
prune_group = generate.add_mutually_exclusive_group()
prune_group.add_argument(
'--prune-dag', action='store_true', dest='prune_dag',
@@ -132,7 +127,6 @@ def ci_generate(args):
prune_dag = args.prune_dag
index_only = args.index_only
artifacts_root = args.artifacts_root
buildcache_destination = args.buildcache_destination
if not output_file:
output_file = os.path.abspath(".gitlab-ci.yml")
@@ -146,8 +140,7 @@ def ci_generate(args):
spack_ci.generate_gitlab_ci_yaml(
env, True, output_file, prune_dag=prune_dag,
check_index_only=index_only, run_optimizer=run_optimizer,
use_dependencies=use_dependencies, artifacts_root=artifacts_root,
remote_mirror_override=buildcache_destination)
use_dependencies=use_dependencies, artifacts_root=artifacts_root)
if copy_yaml_to:
copy_to_dir = os.path.dirname(copy_yaml_to)
@@ -187,9 +180,6 @@ def ci_rebuild(args):
if not gitlab_ci:
tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')
tty.msg('SPACK_BUILDCACHE_DESTINATION={0}'.format(
os.environ.get('SPACK_BUILDCACHE_DESTINATION', None)))
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
@@ -206,7 +196,7 @@ def ci_rebuild(args):
compiler_action = get_env_var('SPACK_COMPILER_ACTION')
cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
remote_mirror_override = get_env_var('SPACK_REMOTE_MIRROR_OVERRIDE')
pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')
# Construct absolute paths relative to current $CI_PROJECT_DIR
@@ -254,10 +244,6 @@ def ci_rebuild(args):
tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
spack_is_pr_pipeline, spack_is_develop_pipeline))
# If no override url exists, then just push binary package to the
# normal remote mirror url.
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
@@ -387,24 +373,7 @@ def ci_rebuild(args):
cfg.default_modify_scope())
# Check configured mirrors for a built spec with a matching hash
mirrors_to_check = None
if remote_mirror_override and spack_pipeline_type == 'spack_protected_branch':
# Passing "mirrors_to_check" below means we *only* look in the override
# mirror to see if we should skip building, which is what we want.
mirrors_to_check = {
'override': remote_mirror_override
}
# Adding this mirror to the list of configured mirrors means dependencies
# could be installed from either the override mirror or any other configured
# mirror (e.g. remote_mirror_url which is defined in the environment or
# pipeline_mirror_url), which is also what we want.
spack.mirror.add('mirror_override',
remote_mirror_override,
cfg.default_modify_scope())
matches = bindist.get_mirrors_for_spec(
job_spec, mirrors_to_check=mirrors_to_check, index_only=False)
matches = bindist.get_mirrors_for_spec(job_spec, index_only=False)
if matches:
# Got a hash match on at least one configured mirror. All
@@ -548,6 +517,13 @@ def ci_rebuild(args):
# any logs from the staging directory to artifacts now
spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)
# Create buildcache on remote mirror, either on pr-specific mirror or
# on the main mirror defined in the gitlab-enabled spack environment
if spack_is_pr_pipeline:
buildcache_mirror_url = pr_mirror_url
else:
buildcache_mirror_url = remote_mirror_url
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure

View File

@@ -57,7 +57,7 @@
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
from spack.package import *
from spack import *
class {class_name}({base_class_name}):

View File

@@ -11,7 +11,7 @@
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.package_base
import spack.package
import spack.repo
import spack.store
@@ -57,7 +57,7 @@ def dependencies(parser, args):
else:
spec = specs[0]
dependencies = spack.package_base.possible_dependencies(
dependencies = spack.package.possible_dependencies(
spec,
transitive=args.transitive,
expand_virtuals=args.expand_virtuals,

View File

@@ -200,7 +200,7 @@ def external_list(args):
list(spack.repo.path.all_packages())
# Print all the detectable packages
tty.msg("Detectable packages per repository")
for namespace, pkgs in sorted(spack.package_base.detectable_packages.items()):
for namespace, pkgs in sorted(spack.package.detectable_packages.items()):
print("Repository:", namespace)
colify.colify(pkgs, indent=4, output=sys.stdout)

View File

@@ -18,7 +18,7 @@
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
from spack.package import has_test_method, preferred_version
description = 'get detailed information on a particular package'
section = 'basic'
@@ -269,14 +269,14 @@ def print_tests(pkg):
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
pkg_base = spack.package.PackageBase
test_pkgs = [str(cls.test) for cls in inspect.getmro(pkg_cls) if
issubclass(cls, pkg_base) and cls.test != pkg_base.test]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# Use code from package.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))

View File

@@ -302,7 +302,7 @@ def install(parser, args, **kwargs):
)
reporter = spack.report.collect_info(
spack.package_base.PackageInstaller, '_install_task', args.log_format, args)
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file

View File

@@ -12,7 +12,7 @@
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.error
import spack.package_base
import spack.package
import spack.repo
import spack.store
from spack.database import InstallStatuses

View File

@@ -15,10 +15,8 @@
import spack
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.config
import spack.environment
import spack.hash_types as ht
import spack.package_base
import spack.package
import spack.solver.asp as asp
description = "concretize a specs using an ASP solver"
@@ -76,51 +74,6 @@ def setup_parser(subparser):
spack.cmd.common.arguments.add_concretizer_args(subparser)
def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers)
if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint(
"@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")
)
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
color.cprint(
fmt % (
i,
name,
"-" if build_cost is None else installed_cost,
installed_cost if build_cost is None else build_cost,
)
)
print()
# dump the solutions as concretized specs
if 'solutions' in show:
for spec in result.specs:
# With -y, just print YAML to output.
if required_format == 'yaml':
# use write because to_yaml already has a newline.
sys.stdout.write(spec.to_yaml(hash=ht.dag_hash))
elif required_format == 'json':
sys.stdout.write(spec.to_json(hash=ht.dag_hash))
else:
sys.stdout.write(
spec.tree(color=sys.stdout.isatty(), **kwargs))
print()
if result.unsolved_specs and "solutions" in show:
tty.msg("Unsolved specs")
for spec in result.unsolved_specs:
print(spec)
print()
def solve(parser, args):
# these are the same options as `spack spec`
name_fmt = '{namespace}.{name}' if args.namespaces else '{name}'
@@ -149,42 +102,58 @@ def solve(parser, args):
if models < 0:
tty.die("model count must be non-negative: %d")
# Format required for the output (JSON, YAML or None)
required_format = args.format
# If we have an active environment, pick the specs from there
env = spack.environment.active_environment()
if env and args.specs:
msg = "cannot give explicit specs when an environment is active"
raise RuntimeError(msg)
specs = list(env.user_specs) if env else spack.cmd.parse_specs(args.specs)
specs = spack.cmd.parse_specs(args.specs)
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
solver = asp.Solver()
output = sys.stdout if "asp" in show else None
setup_only = set(show) == {'asp'}
unify = spack.config.get('concretizer:unify')
if unify != 'when_possible':
# set up solver parameters
# Note: reuse and other concretizer prefs are passed as configuration
result = solver.solve(
specs,
out=output,
models=models,
timers=args.timers,
stats=args.stats,
setup_only=setup_only
)
if not setup_only:
_process_result(result, show, required_format, kwargs)
else:
for idx, result in enumerate(solver.solve_in_rounds(
specs, out=output, models=models, timers=args.timers, stats=args.stats
)):
if "solutions" in show:
tty.msg("ROUND {0}".format(idx))
tty.msg("")
result = solver.solve(
specs,
out=output,
models=models,
timers=args.timers,
stats=args.stats,
setup_only=(set(show) == {'asp'})
)
if 'solutions' not in show:
return
# die if no solution was found
result.raise_if_unsat()
# show the solutions as concretized specs
if 'solutions' in show:
opt, _, _ = min(result.answers)
if ("opt" in show) and (not args.format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint(
"@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")
)
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
color.cprint(
fmt % (
i,
name,
"-" if build_cost is None else installed_cost,
installed_cost if build_cost is None else build_cost,
)
)
print()
for spec in result.specs:
# With -y, just print YAML to output.
if args.format == 'yaml':
# use write because to_yaml already has a newline.
sys.stdout.write(spec.to_yaml(hash=ht.dag_hash))
elif args.format == 'json':
sys.stdout.write(spec.to_json(hash=ht.dag_hash))
else:
print("% END ROUND {0}\n".format(idx))
if not setup_only:
_process_result(result, show, required_format, kwargs)
sys.stdout.write(
spec.tree(color=sys.stdout.isatty(), **kwargs))

View File

@@ -27,6 +27,12 @@ def setup_parser(subparser):
def stage(parser, args):
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
if custom_path:
spack.stage.create_stage_root(custom_path)
if not args.specs:
env = ev.active_environment()
if env:
@@ -48,10 +54,6 @@ def stage(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=False)
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
# prevent multiple specs from extracting in the same folder
if len(specs) > 1 and custom_path:
tty.die("`--path` requires a single spec, but multiple were provided")

View File

@@ -65,7 +65,7 @@ def is_package(f):
packages, since we allow `from spack import *` and poking globals
into packages.
"""
return f.startswith("var/spack/repos/") and f.endswith('package.py')
return f.startswith("var/spack/repos/")
#: decorator for adding tools to the list
@@ -236,7 +236,7 @@ def translate(match):
continue
if not args.root_relative and re_obj:
line = re_obj.sub(translate, line)
print(line)
print(" " + line)
def print_style_header(file_list, args):
@@ -290,26 +290,20 @@ def run_flake8(flake8_cmd, file_list, args):
@tool("mypy")
def run_mypy(mypy_cmd, file_list, args):
# always run with config from running spack prefix
common_mypy_args = [
mypy_args = [
"--config-file", os.path.join(spack.paths.prefix, "pyproject.toml"),
"--show-error-codes",
]
mypy_arg_sets = [common_mypy_args + [
"--package", "spack",
"--package", "llnl",
]]
if 'SPACK_MYPY_CHECK_PACKAGES' in os.environ:
mypy_arg_sets.append(common_mypy_args + [
'--package', 'packages',
'--disable-error-code', 'no-redef',
])
"--show-error-codes",
]
# not yet, need other updates to enable this
# if any([is_package(f) for f in file_list]):
# mypy_args.extend(["--package", "packages"])
returncode = 0
for mypy_args in mypy_arg_sets:
output = mypy_cmd(*mypy_args, fail_on_error=False, output=str)
returncode |= mypy_cmd.returncode
output = mypy_cmd(*mypy_args, fail_on_error=False, output=str)
returncode = mypy_cmd.returncode
rewrite_and_print_output(output, args)
rewrite_and_print_output(output, args)
print_tool_result("mypy", returncode)
return returncode
@@ -324,29 +318,16 @@ def run_isort(isort_cmd, file_list, args):
pat = re.compile("ERROR: (.*) Imports are incorrectly sorted")
replacement = "ERROR: {0} Imports are incorrectly sorted"
returncode = [0]
returncode = 0
for chunk in grouper(file_list, 100):
packed_args = isort_args + tuple(chunk)
output = isort_cmd(*packed_args, fail_on_error=False, output=str, error=str)
returncode |= isort_cmd.returncode
def process_files(file_list, is_args):
for chunk in grouper(file_list, 100):
packed_args = is_args + tuple(chunk)
output = isort_cmd(*packed_args, fail_on_error=False, output=str, error=str)
returncode[0] |= isort_cmd.returncode
rewrite_and_print_output(output, args, pat, replacement)
rewrite_and_print_output(output, args, pat, replacement)
packages_isort_args = ('--rm', 'spack', '--rm', 'spack.pkgkit', '--rm',
'spack.package_defs', '-a', 'from spack.package import *')
packages_isort_args = packages_isort_args + isort_args
# packages
process_files(filter(is_package, file_list),
packages_isort_args)
# non-packages
process_files(filter(lambda f: not is_package(f), file_list),
isort_args)
print_tool_result("isort", returncode[0])
return returncode[0]
print_tool_result("isort", returncode)
return returncode
@tool("black")

View File

@@ -20,7 +20,7 @@
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.install_test
import spack.package_base
import spack.package
import spack.repo
import spack.report
@@ -189,7 +189,7 @@ def test_run(args):
# Set up reporter
setattr(args, 'package', [s.format() for s in test_suite.specs])
reporter = spack.report.collect_info(
spack.package_base.PackageBase, 'do_test', args.log_format, args)
spack.package.PackageBase, 'do_test', args.log_format, args)
if not reporter.filename:
if args.log_file:
if os.path.isabs(args.log_file):
@@ -217,7 +217,7 @@ def test_list(args):
else set()
def has_test_and_tags(pkg_class):
return spack.package_base.has_test_method(pkg_class) and \
return spack.package.has_test_method(pkg_class) and \
(not args.tag or pkg_class.name in tagged)
if args.list_all:

View File

@@ -24,7 +24,7 @@
# tutorial configuration parameters
tutorial_branch = "releases/v0.18"
tutorial_branch = "releases/v0.17"
tutorial_mirror = "file:///mirror"
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")

View File

@@ -15,7 +15,7 @@
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.error
import spack.package_base
import spack.package
import spack.repo
import spack.store
from spack.database import InstallStatuses
@@ -221,7 +221,7 @@ def do_uninstall(env, specs, force):
except spack.repo.UnknownEntityError:
# The package.py file has gone away -- but still
# want to uninstall.
spack.package_base.Package.uninstall_by_spec(item, force=True)
spack.package.Package.uninstall_by_spec(item, force=True)
# A package is ready to be uninstalled when nothing else references it,
# unless we are requested to force uninstall it.
@@ -235,7 +235,7 @@ def is_ready(dag_hash):
# If this spec is only used as a build dependency, we can uninstall
return all(
dspec.deptypes == ("build",) or not dspec.parent.installed
dspec.deptypes == ("build",)
for dspec in record.spec.edges_from_dependents()
)

View File

@@ -422,7 +422,7 @@ def url_list_parsing(args, urls, url, pkg):
urls (set): List of URLs that have already been added
url (str or None): A URL to potentially add to ``urls`` depending on
``args``
pkg (spack.package_base.PackageBase): The Spack package
pkg (spack.package.PackageBase): The Spack package
Returns:
set: The updated set of ``urls``
@@ -470,7 +470,7 @@ def name_parsed_correctly(pkg, name):
"""Determine if the name of a package was correctly parsed.
Args:
pkg (spack.package_base.PackageBase): The Spack package
pkg (spack.package.PackageBase): The Spack package
name (str): The name that was extracted from the URL
Returns:
@@ -487,7 +487,7 @@ def version_parsed_correctly(pkg, version):
"""Determine if the version of a package was correctly parsed.
Args:
pkg (spack.package_base.PackageBase): The Spack package
pkg (spack.package.PackageBase): The Spack package
version (str): The version that was extracted from the URL
Returns:

View File

@@ -766,8 +766,7 @@ def name_matches(name, name_list):
toolchains.add(compiler_cls.__name__)
if len(toolchains) > 1:
if toolchains == set(['Clang', 'AppleClang', 'Aocc']) or \
toolchains == set(['Dpcpp', 'Oneapi']):
if toolchains == set(['Clang', 'AppleClang', 'Aocc']):
return False
tty.debug("[TOOLCHAINS] {0}".format(toolchains))
return True

View File

@@ -88,7 +88,7 @@
#: Path to the default configuration
configuration_defaults_path = (
'defaults', os.path.join(spack.paths.etc_path, 'defaults')
'defaults', os.path.join(spack.paths.etc_path, 'spack', 'defaults')
)
#: Hard-coded default values for some key configuration options.
@@ -104,7 +104,6 @@
'build_jobs': min(16, cpus_available()),
'build_stage': '$tempdir/spack-stage',
'concretizer': 'clingo',
'license_dir': spack.paths.default_license_dir,
}
}
@@ -816,7 +815,7 @@ def _config():
# Site configuration is per spack instance, for sites or projects
# No site-level configs should be checked into spack by default.
configuration_paths.append(
('site', os.path.join(spack.paths.etc_path)),
('site', os.path.join(spack.paths.etc_path, 'spack')),
)
# User configuration can override both spack defaults and site config

View File

@@ -356,10 +356,10 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
self.prefix_fail_path = os.path.join(self._db_dir, 'prefix_failures')
# Create needed directories and files
if not is_upstream and not os.path.exists(self._db_dir):
if not os.path.exists(self._db_dir):
fs.mkdirp(self._db_dir)
if not is_upstream and not os.path.exists(self._failure_dir):
if not os.path.exists(self._failure_dir) and not is_upstream:
fs.mkdirp(self._failure_dir)
self.is_upstream = is_upstream
@@ -1064,7 +1064,9 @@ def _read(self):
self._state_is_inconsistent = False
return
elif self.is_upstream:
tty.warn('upstream not found: {0}'.format(self._index_path))
raise UpstreamDatabaseLockingError(
"No database index file is present, and upstream"
" databases cannot generate an index file")
def _add(
self,

View File

@@ -240,7 +240,7 @@ def compute_windows_program_path_for_package(pkg):
program files location, return list of best guesses
Args:
pkg (spack.package_base.Package): package for which
pkg (spack.package.Package): package for which
Program Files location is to be computed
"""
if not is_windows:

View File

@@ -79,9 +79,8 @@
env_subdir_name = '.spack-env'
def default_manifest_yaml():
"""default spack.yaml file to put in new environments"""
return """\
#: default spack.yaml file to put in new environments
default_manifest_yaml = """\
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
@@ -90,11 +89,7 @@ def default_manifest_yaml():
# add package specs to the `specs` list
specs: []
view: true
concretizer:
unify: {}
""".format('true' if spack.config.get('concretizer:unify') else 'false')
"""
#: regex for validating enviroment names
valid_environment_name_re = r'^\w[\w-]*$'
@@ -628,7 +623,7 @@ def __init__(self, path, init_file=None, with_view=None, keep_relative=False):
# This attribute will be set properly from configuration
# during concretization
self.unify = None
self.concretization = None
self.clear()
if init_file:
@@ -637,11 +632,11 @@ def __init__(self, path, init_file=None, with_view=None, keep_relative=False):
# the init file.
with fs.open_if_filename(init_file) as f:
if hasattr(f, 'name') and f.name.endswith('.lock'):
self._read_manifest(default_manifest_yaml())
self._read_manifest(default_manifest_yaml)
self._read_lockfile(f)
self._set_user_specs_from_lockfile()
else:
self._read_manifest(f, raw_yaml=default_manifest_yaml())
self._read_manifest(f, raw_yaml=default_manifest_yaml)
# Rewrite relative develop paths when initializing a new
# environment in a different location from the spack.yaml file.
@@ -705,7 +700,7 @@ def _read(self):
default_manifest = not os.path.exists(self.manifest_path)
if default_manifest:
# No manifest, use default yaml
self._read_manifest(default_manifest_yaml())
self._read_manifest(default_manifest_yaml)
else:
with open(self.manifest_path) as f:
self._read_manifest(f)
@@ -771,15 +766,8 @@ def _read_manifest(self, f, raw_yaml=None):
self.views = {}
# Retrieve the current concretization strategy
configuration = config_dict(self.yaml)
# Let `concretization` overrule `concretize:unify` config for now,
# but use a translation table to have internally a representation
# as if we were using the new configuration
translation = {'separately': False, 'together': True}
try:
self.unify = translation[configuration['concretization']]
except KeyError:
self.unify = spack.config.get('concretizer:unify', False)
# default concretization to separately
self.concretization = configuration.get('concretization', 'separately')
# Retrieve dev-build packages:
self.dev_specs = configuration.get('develop', {})
@@ -1160,44 +1148,14 @@ def concretize(self, force=False, tests=False):
self.specs_by_hash = {}
# Pick the right concretization strategy
if self.unify == 'when_possible':
return self._concretize_together_where_possible(tests=tests)
if self.unify is True:
if self.concretization == 'together':
return self._concretize_together(tests=tests)
if self.unify is False:
if self.concretization == 'separately':
return self._concretize_separately(tests=tests)
msg = 'concretization strategy not implemented [{0}]'
raise SpackEnvironmentError(msg.format(self.unify))
def _concretize_together_where_possible(self, tests=False):
# Avoid cyclic dependency
import spack.solver.asp
# Exit early if the set of concretized specs is the set of user specs
user_specs_did_not_change = not bool(
set(self.user_specs) - set(self.concretized_user_specs)
)
if user_specs_did_not_change:
return []
# Proceed with concretization
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
result_by_user_spec = {}
solver = spack.solver.asp.Solver()
for result in solver.solve_in_rounds(self.user_specs, tests=tests):
result_by_user_spec.update(result.specs_by_input)
result = []
for abstract, concrete in sorted(result_by_user_spec.items()):
self._add_concrete_spec(abstract, concrete)
result.append((abstract, concrete))
return result
raise SpackEnvironmentError(msg.format(self.concretization))
def _concretize_together(self, tests=False):
"""Concretization strategy that concretizes all the specs
@@ -1350,7 +1308,7 @@ def concretize_and_add(self, user_spec, concrete_spec=None, tests=False):
concrete_spec: if provided, then it is assumed that it is the
result of concretizing the provided ``user_spec``
"""
if self.unify is True:
if self.concretization == 'together':
msg = 'cannot install a single spec in an environment that is ' \
'configured to be concretized together. Run instead:\n\n' \
' $ spack add <spec>\n' \
@@ -1653,14 +1611,7 @@ def all_specs(self):
"""Return all specs, even those a user spec would shadow."""
all_specs = set()
for h in self.concretized_order:
try:
spec = self.specs_by_hash[h]
except KeyError:
tty.warn(
'Environment %s appears to be corrupt: missing spec '
'"%s"' % (self.name, h))
continue
all_specs.update(spec.traverse())
all_specs.update(self.specs_by_hash[h].traverse())
return sorted(all_specs)
@@ -1918,15 +1869,17 @@ def write(self, regenerate=True):
regenerate (bool): regenerate views and run post-write hooks as
well as writing if True.
"""
# Warn that environments are not in the latest format.
if not is_latest_format(self.manifest_path):
ver = '.'.join(str(s) for s in spack.spack_version_info[:2])
msg = ('The environment "{}" is written to disk in a deprecated format. '
'Please update it using:\n\n'
'\tspack env update {}\n\n'
'Note that versions of Spack older than {} may not be able to '
# Intercept environment not using the latest schema format and prevent
# them from being modified
manifest_exists = os.path.exists(self.manifest_path)
if manifest_exists and not is_latest_format(self.manifest_path):
msg = ('The environment "{0}" needs to be written to disk, but '
'is currently using a deprecated format. Please update it '
'using:\n\n'
'\tspack env update {0}\n\n'
'Note that previous versions of Spack will not be able to '
'use the updated configuration.')
tty.warn(msg.format(self.name, self.name, ver))
raise RuntimeError(msg.format(self.name))
# ensure path in var/spack/environments
fs.mkdirp(self.path)
@@ -2278,16 +2231,14 @@ def _top_level_key(data):
def is_latest_format(manifest):
"""Return False if the manifest file exists and is not in the latest schema format.
"""Return True if the manifest file is at the latest schema format,
False otherwise.
Args:
manifest (str): manifest file to be analyzed
"""
try:
with open(manifest) as f:
data = syaml.load(f)
except (OSError, IOError):
return True
with open(manifest) as f:
data = syaml.load(f)
top_level_key = _top_level_key(data)
changed = spack.schema.env.update(data[top_level_key])
return not changed

View File

@@ -1556,7 +1556,7 @@ def _extrapolate(pkg, version):
try:
return URLFetchStrategy(pkg.url_for_version(version),
fetch_options=pkg.fetch_options)
except spack.package_base.NoURLError:
except spack.package.NoURLError:
msg = ("Can't extrapolate a URL for version %s "
"because package %s defines no URLs")
raise ExtrapolationError(msg % (version, pkg.name))

View File

@@ -95,7 +95,10 @@ def view_copy(src, dst, view, spec=None):
view.get_projection_for_spec(dep)
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin([dst], prefix_to_projection)
spack.relocate.relocate_text_bin(
binaries=[dst],
prefixes=prefix_to_projection
)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang

View File

@@ -493,11 +493,9 @@ def write(self, spec, color=None, out=None):
# Replace node with its dependencies
self._frontier.pop(i)
edges = sorted(
node.edges_to_dependencies(deptype=self.deptype), reverse=True
)
if edges:
deps = [e.spec.dag_hash() for e in edges]
deps = node.dependencies(deptype=self.deptype)
if deps:
deps = sorted((d.dag_hash() for d in deps), reverse=True)
self._connect_deps(i, deps, "new-deps") # anywhere.
elif self._frontier:

View File

@@ -50,7 +50,7 @@
import spack.error
import spack.hooks
import spack.monitor
import spack.package_base
import spack.package
import spack.package_prefs as prefs
import spack.repo
import spack.store
@@ -103,7 +103,7 @@ def _check_last_phase(pkg):
package already.
Args:
pkg (spack.package_base.PackageBase): the package being installed
pkg (spack.package.PackageBase): the package being installed
Raises:
``BadInstallPhase`` if stop_before or last phase is invalid
@@ -125,7 +125,7 @@ def _handle_external_and_upstream(pkg, explicit):
database if it is external package.
Args:
pkg (spack.package_base.Package): the package whose installation is under
pkg (spack.package.Package): the package whose installation is under
consideration
explicit (bool): the package was explicitly requested by the user
Return:
@@ -265,7 +265,7 @@ def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
Extract the package from binary cache
Args:
pkg (spack.package_base.PackageBase): package to install from the binary cache
pkg (spack.package.PackageBase): the package to install from the binary cache
cache_only (bool): only extract from binary cache
explicit (bool): ``True`` if installing the package was explicitly
requested by the user, otherwise, ``False``
@@ -350,27 +350,27 @@ def _process_external_package(pkg, explicit):
def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
mirrors_for_spec=None):
preferred_mirrors=None):
"""
Process the binary cache tarball.
Args:
pkg (spack.package_base.PackageBase): the package being installed
pkg (spack.package.PackageBase): the package being installed
binary_spec (spack.spec.Spec): the spec whose cache has been confirmed
explicit (bool): the package was explicitly requested by the user
unsigned (bool): ``True`` if binary package signatures to be checked,
otherwise, ``False``
mirrors_for_spec (list): Optional list of concrete specs and mirrors
obtained by calling binary_distribution.get_mirrors_for_spec().
preferred_mirrors (list): Optional list of urls to prefer when
attempting to download the tarball
Return:
bool: ``True`` if the package was extracted from binary cache,
else ``False``
"""
download_result = binary_distribution.download_tarball(
binary_spec, unsigned, mirrors_for_spec=mirrors_for_spec)
tarball = binary_distribution.download_tarball(
binary_spec, preferred_mirrors=preferred_mirrors)
# see #10063 : install from source if tarball doesn't exist
if download_result is None:
if tarball is None:
tty.msg('{0} exists in binary cache but with different hash'
.format(pkg.name))
return False
@@ -380,9 +380,9 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
# don't print long padded paths while extracting/relocating binaries
with spack.util.path.filter_padding():
binary_distribution.extract_tarball(binary_spec, download_result,
allow_root=False, unsigned=unsigned,
force=False)
binary_distribution.extract_tarball(
binary_spec, tarball, allow_root=False, unsigned=unsigned, force=False
)
pkg.installed_from_binary_cache = True
spack.store.db.add(pkg.spec, spack.store.layout, explicit=explicit)
@@ -394,7 +394,7 @@ def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
Try to extract the package from binary cache.
Args:
pkg (spack.package_base.PackageBase): package to be extracted from binary cache
pkg (spack.package.PackageBase): the package to be extracted from binary cache
explicit (bool): the package was explicitly requested by the user
unsigned (bool): ``True`` if binary package signatures to be checked,
otherwise, ``False``
@@ -406,8 +406,12 @@ def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
if not matches:
return False
return _process_binary_cache_tarball(pkg, pkg.spec, explicit, unsigned,
mirrors_for_spec=matches)
# In the absence of guidance from user or some other reason to prefer one
# mirror over another, any match will suffice, so just pick the first one.
preferred_mirrors = [match['mirror_url'] for match in matches]
binary_spec = matches[0]['spec']
return _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
preferred_mirrors=preferred_mirrors)
def clear_failures():
@@ -530,7 +534,7 @@ def log(pkg):
Copy provenance into the install directory on success
Args:
pkg (spack.package_base.Package): the package that was built and installed
pkg (spack.package.Package): the package that was built and installed
"""
packages_dir = spack.store.layout.build_packages_path(pkg.spec)
@@ -616,7 +620,7 @@ def package_id(pkg):
and packages for combinatorial environments.
Args:
pkg (spack.package_base.PackageBase): the package from which the identifier is
pkg (spack.package.PackageBase): the package from which the identifier is
derived
"""
if not pkg.spec.concrete:
@@ -769,7 +773,7 @@ def _add_bootstrap_compilers(
Args:
compiler: the compiler to boostrap
architecture: the architecture for which to bootstrap the compiler
pkgs (spack.package_base.PackageBase): the package with possible compiler
pkgs (spack.package.PackageBase): the package with possible compiler
dependencies
request (BuildRequest): the associated install request
all_deps (defaultdict(set)): dictionary of all dependencies and
@@ -786,7 +790,7 @@ def _add_init_task(self, pkg, request, is_compiler, all_deps):
Creates and queus the initial build task for the package.
Args:
pkg (spack.package_base.Package): the package to be built and installed
pkg (spack.package.Package): the package to be built and installed
request (BuildRequest or None): the associated install request
where ``None`` can be used to indicate the package was
explicitly requested by the user
@@ -968,7 +972,7 @@ def _cleanup_task(self, pkg):
Cleanup the build task for the spec
Args:
pkg (spack.package_base.PackageBase): the package being installed
pkg (spack.package.PackageBase): the package being installed
"""
self._remove_task(package_id(pkg))
@@ -982,7 +986,7 @@ def _ensure_install_ready(self, pkg):
already locked.
Args:
pkg (spack.package_base.PackageBase): the package being locally installed
pkg (spack.package.PackageBase): the package being locally installed
"""
pkg_id = package_id(pkg)
pre = "{0} cannot be installed locally:".format(pkg_id)
@@ -1014,8 +1018,7 @@ def _ensure_locked(self, lock_type, pkg):
Args:
lock_type (str): 'read' for a read lock, 'write' for a write lock
pkg (spack.package_base.PackageBase): the package whose spec is being
installed
pkg (spack.package.PackageBase): the package whose spec is being installed
Return:
(lock_type, lock) tuple where lock will be None if it could not
@@ -1229,7 +1232,7 @@ def _install_task(self, task):
# Create a child process to do the actual installation.
# Preserve verbosity settings across installs.
spack.package_base.PackageBase._verbose = (
spack.package.PackageBase._verbose = (
spack.build_environment.start_build_process(
pkg, build_process, install_args)
)
@@ -1374,7 +1377,7 @@ def _setup_install_dir(self, pkg):
Write a small metadata file with the current spack environment.
Args:
pkg (spack.package_base.Package): the package to be built and installed
pkg (spack.package.Package): the package to be built and installed
"""
if not os.path.exists(pkg.spec.prefix):
tty.debug('Creating the installation directory {0}'.format(pkg.spec.prefix))
@@ -1448,7 +1451,7 @@ def _flag_installed(self, pkg, dependent_ids=None):
known dependents.
Args:
pkg (spack.package_base.Package): Package that has been installed locally,
pkg (spack.package.Package): Package that has been installed locally,
externally or upstream
dependent_ids (list or None): list of the package's
dependent ids, or None if the dependent ids are limited to
@@ -1537,7 +1540,7 @@ def install(self):
Install the requested package(s) and or associated dependencies.
Args:
pkg (spack.package_base.Package): the package to be built and installed"""
pkg (spack.package.Package): the package to be built and installed"""
self._init_queue()
fail_fast_err = 'Terminating after first install failure'
@@ -1789,7 +1792,7 @@ def __init__(self, pkg, install_args):
process in the build.
Arguments:
pkg (spack.package_base.PackageBase) the package being installed.
pkg (spack.package.PackageBase) the package being installed.
install_args (dict) arguments to do_install() from parent process.
"""
@@ -1849,8 +1852,8 @@ def run(self):
# get verbosity from do_install() parameter or saved value
self.echo = self.verbose
if spack.package_base.PackageBase._verbose is not None:
self.echo = spack.package_base.PackageBase._verbose
if spack.package.PackageBase._verbose is not None:
self.echo = spack.package.PackageBase._verbose
self.pkg.stage.keep = self.keep_stage
@@ -2002,7 +2005,7 @@ def build_process(pkg, install_args):
This function's return value is returned to the parent process.
Arguments:
pkg (spack.package_base.PackageBase): the package being installed.
pkg (spack.package.PackageBase): the package being installed.
install_args (dict): arguments to do_install() from parent process.
"""
@@ -2050,7 +2053,7 @@ def __init__(self, pkg, request, compiler, start, attempts, status,
Instantiate a build task for a package.
Args:
pkg (spack.package_base.Package): the package to be built and installed
pkg (spack.package.Package): the package to be built and installed
request (BuildRequest or None): the associated install request
where ``None`` can be used to indicate the package was
explicitly requested by the user
@@ -2063,7 +2066,7 @@ def __init__(self, pkg, request, compiler, start, attempts, status,
"""
# Ensure dealing with a package that has a concrete spec
if not isinstance(pkg, spack.package_base.PackageBase):
if not isinstance(pkg, spack.package.PackageBase):
raise ValueError("{0} must be a package".format(str(pkg)))
self.pkg = pkg
@@ -2241,11 +2244,11 @@ def __init__(self, pkg, install_args):
Instantiate a build request for a package.
Args:
pkg (spack.package_base.Package): the package to be built and installed
pkg (spack.package.Package): the package to be built and installed
install_args (dict): the install arguments associated with ``pkg``
"""
# Ensure dealing with a package that has a concrete spec
if not isinstance(pkg, spack.package_base.PackageBase):
if not isinstance(pkg, spack.package.PackageBase):
raise ValueError("{0} must be a package".format(str(pkg)))
self.pkg = pkg
@@ -2315,7 +2318,7 @@ def get_deptypes(self, pkg):
"""Determine the required dependency types for the associated package.
Args:
pkg (spack.package_base.PackageBase): explicit or implicit package being
pkg (spack.package.PackageBase): explicit or implicit package being
installed
Returns:
@@ -2338,7 +2341,7 @@ def run_tests(self, pkg):
"""Determine if the tests should be run for the provided packages
Args:
pkg (spack.package_base.PackageBase): explicit or implicit package being
pkg (spack.package.PackageBase): explicit or implicit package being
installed
Returns:

View File

@@ -196,14 +196,6 @@ def provides(self):
if self.spec.name == 'llvm-amdgpu':
provides['compiler'] = spack.spec.CompilerSpec(str(self.spec))
provides['compiler'].name = 'rocmcc'
# Special case for oneapi
if self.spec.name == 'intel-oneapi-compilers':
provides['compiler'] = spack.spec.CompilerSpec(str(self.spec))
provides['compiler'].name = 'oneapi'
# Special case for oneapi classic
if self.spec.name == 'intel-oneapi-compilers-classic':
provides['compiler'] = spack.spec.CompilerSpec(str(self.spec))
provides['compiler'].name = 'intel'
# All the other tokens in the hierarchy must be virtual dependencies
for x in self.hierarchy_tokens:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -123,11 +123,11 @@ def accept(self, id):
def next_token_error(self, message):
"""Raise an error about the next token in the stream."""
raise ParseError(message, self.text[0], self.token.end)
raise ParseError(message, self.text, self.token.end)
def last_token_error(self, message):
"""Raise an error about the previous token in the stream."""
raise ParseError(message, self.text[0], self.token.start)
raise ParseError(message, self.text, self.token.start)
def unexpected_token(self):
self.next_token_error("Unexpected token: '%s'" % self.next.value)

View File

@@ -356,7 +356,7 @@ def patch_for_package(self, sha256, pkg):
Arguments:
sha256 (str): sha256 hash to look up
pkg (spack.package_base.Package): Package object to get patch for.
pkg (spack.package.Package): Package object to get patch for.
We build patch objects lazily because building them requires that
we have information about the package's location in its repo.

View File

@@ -43,12 +43,8 @@
hooks_path = os.path.join(module_path, "hooks")
opt_path = os.path.join(prefix, "opt")
share_path = os.path.join(prefix, "share", "spack")
etc_path = os.path.join(prefix, "etc", "spack")
etc_path = os.path.join(prefix, "etc")
#
# Things in $spack/etc/spack
#
default_license_dir = os.path.join(etc_path, "licenses")
#
# Things in $spack/var/spack

83
lib/spack/spack/pkgkit.py Normal file
View File

@@ -0,0 +1,83 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# flake8: noqa: F401
"""pkgkit is a set of useful build tools and directives for packages.
Everything in this module is automatically imported into Spack package files.
"""
import llnl.util.filesystem
from llnl.util.filesystem import *
import spack.directives
import spack.util.executable
from spack.build_systems.aspell_dict import AspellDictPackage
from spack.build_systems.autotools import AutotoolsPackage
from spack.build_systems.cached_cmake import (
CachedCMakePackage,
cmake_cache_option,
cmake_cache_path,
cmake_cache_string,
)
from spack.build_systems.cmake import CMakePackage
from spack.build_systems.cuda import CudaPackage
from spack.build_systems.gnu import GNUMirrorPackage
from spack.build_systems.intel import IntelPackage
from spack.build_systems.lua import LuaPackage
from spack.build_systems.makefile import MakefilePackage
from spack.build_systems.maven import MavenPackage
from spack.build_systems.meson import MesonPackage
from spack.build_systems.octave import OctavePackage
from spack.build_systems.oneapi import (
IntelOneApiLibraryPackage,
IntelOneApiPackage,
IntelOneApiStaticLibraryList,
)
from spack.build_systems.perl import PerlPackage
from spack.build_systems.python import PythonPackage
from spack.build_systems.qmake import QMakePackage
from spack.build_systems.r import RPackage
from spack.build_systems.racket import RacketPackage
from spack.build_systems.rocm import ROCmPackage
from spack.build_systems.ruby import RubyPackage
from spack.build_systems.scons import SConsPackage
from spack.build_systems.sip import SIPPackage
from spack.build_systems.sourceforge import SourceforgePackage
from spack.build_systems.sourceware import SourcewarePackage
from spack.build_systems.waf import WafPackage
from spack.build_systems.xorg import XorgPackage
from spack.dependency import all_deptypes
from spack.directives import *
from spack.install_test import get_escaped_text_output
from spack.installer import (
ExternalPackageError,
InstallError,
InstallLockError,
UpstreamPackageError,
)
from spack.mixins import filter_compiler_wrappers
from spack.multimethod import when
from spack.package import (
BundlePackage,
DependencyConflictError,
Package,
build_system_flags,
env_flags,
flatten_dependencies,
inject_flags,
install_dependency_symlinks,
on_package_attributes,
run_after,
run_before,
)
from spack.spec import InvalidSpecDetected, Spec
from spack.util.executable import *
from spack.variant import (
any_combination_of,
auto_or_any_combination_of,
conditional,
disjoint_sets,
)
from spack.version import Version, ver

View File

@@ -469,6 +469,47 @@ def _replace_prefix_text(filename, compiled_prefixes):
f.truncate()
def _replace_prefix_bin(filename, byte_prefixes):
"""Replace all the occurrences of the old install prefix with a
new install prefix in binary files.
The new install prefix is prefixed with ``os.sep`` until the
lengths of the prefixes are the same.
Args:
filename (str): target binary file
byte_prefixes (OrderedDict): OrderedDictionary where the keys are
precompiled regex of the old prefixes and the values are the new
prefixes (uft-8 encoded)
"""
with open(filename, 'rb+') as f:
data = f.read()
f.seek(0)
for orig_bytes, new_bytes in byte_prefixes.items():
original_data_len = len(data)
# Skip this hassle if not found
if orig_bytes not in data:
continue
# We only care about this problem if we are about to replace
length_compatible = len(new_bytes) <= len(orig_bytes)
if not length_compatible:
tty.debug('Binary failing to relocate is %s' % filename)
raise BinaryTextReplaceError(orig_bytes, new_bytes)
pad_length = len(orig_bytes) - len(new_bytes)
padding = os.sep * pad_length
padding = padding.encode('utf-8')
data = data.replace(orig_bytes, new_bytes + padding)
# Really needs to be the same length
if not len(data) == original_data_len:
print('Length of pad:', pad_length, 'should be', len(padding))
print(new_bytes, 'was to replace', orig_bytes)
raise BinaryStringReplacementError(
filename, original_data_len, len(data))
f.write(data)
f.truncate()
def relocate_macho_binaries(path_names, old_layout_root, new_layout_root,
prefix_to_prefix, rel, old_prefix, new_prefix):
"""
@@ -776,6 +817,49 @@ def relocate_text(files, prefixes, concurrency=32):
tp.join()
def relocate_text_bin(binaries, prefixes, concurrency=32):
"""Replace null terminated path strings hard coded into binaries.
The new install prefix must be shorter than the original one.
Args:
binaries (list): binaries to be relocated
prefixes (OrderedDict): String prefixes which need to be changed.
concurrency (int): Desired degree of parallelism.
Raises:
BinaryTextReplaceError: when the new path is longer than the old path
"""
byte_prefixes = collections.OrderedDict({})
for orig_prefix, new_prefix in prefixes.items():
if orig_prefix != new_prefix:
if isinstance(orig_prefix, bytes):
orig_bytes = orig_prefix
else:
orig_bytes = orig_prefix.encode('utf-8')
if isinstance(new_prefix, bytes):
new_bytes = new_prefix
else:
new_bytes = new_prefix.encode('utf-8')
byte_prefixes[orig_bytes] = new_bytes
# Do relocations on text in binaries that refers to the install tree
# multiprocesing.ThreadPool.map requires single argument
args = []
for binary in binaries:
args.append((binary, byte_prefixes))
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
tp.map(llnl.util.lang.star(_replace_prefix_bin), args)
finally:
tp.terminate()
tp.join()
def is_relocatable(spec):
"""Returns True if an installed spec is relocatable.
@@ -1042,120 +1126,3 @@ def fixup_macos_rpaths(spec):
))
else:
tty.debug('No rpath fixup needed for ' + specname)
def compute_indices(filename, paths_to_relocate):
"""
Compute the indices in filename at which each of paths_to_relocate occurs.
Arguments:
filename (str): file to compute indices for
paths_to_relocate (List[str]): paths to find indices of
Returns:
Dict
"""
with open(filename, 'rb') as f:
contents = f.read()
substring_prefix = os.path.commonprefix(paths_to_relocate).encode('utf-8')
indices = {}
index = 0
max_length = max(len(path) for path in paths_to_relocate)
while True:
try:
# We search for the smallest substring of all paths we relocate
# In practice, this is the spack install root, and we relocate
# prefixes in the root and the root itself
index = contents.index(substring_prefix, index)
except ValueError:
# The string isn't found in the rest of the binary
break
else:
# only copy the smallest portion of the binary for comparisons
substring_to_check = contents[index:index + max_length]
for path in paths_to_relocate:
# We guarantee any substring in the list comes after any superstring
p = path.encode('utf-8')
if substring_to_check.startswith(p):
indices[index] = str(path)
index += len(path)
break
else:
index += 1
return indices
def _relocate_binary_text(filename, offsets, prefix_to_prefix):
"""
Relocate the text of a single binary file, given the offsets at which the
replacements need to be made
Arguments:
filename (str): file to modify
offsets (Dict[int, str]): locations of the strings to replace
prefix_to_prefix (Dict[str, str]): strings to replace and their replacements
"""
with open(filename, 'rb+') as f:
for index, prefix in offsets.items():
replacement = prefix_to_prefix[prefix].encode('utf-8')
if len(replacement) > len(prefix):
raise BinaryTextReplaceError(prefix, replacement)
# read forward until we find the end of the string including
# the prefix and compute the replacement as we go
f.seek(index + len(prefix))
c = f.read(1)
while c not in (None, b'\x00'):
replacement += c
c = f.read(1)
# seek back to the index position and write the replacement in
# and add null-terminator
f.seek(index)
f.write(replacement)
f.write(b'\x00')
def relocate_text_bin(
files_to_relocate, prefix_to_prefix, offsets=None,
relative_root=None, concurrency=32
):
"""
For each file given, replace all keys in the given translation dict with
the associated values. Optionally executes using precomputed memoized offsets
for the substitutions.
Arguments:
files_to_relocate (List[str]): The files to modify
prefix_to_prefix (Dict[str, str]): keys are strings to replace, values are
replacements
offsets (Dict[str, Dict[int, str]): (optional) Mapping from relative filenames to
a mapping from indices to strings to replace found at each index
relative_root (str): (optional) prefix for relative paths in offsets
"""
# defaults to the common prefix of all input files
rel_root = relative_root or os.path.commonprefix(files_to_relocate)
if offsets is None:
offsets = {}
for filename in files_to_relocate:
indices = compute_indices(
filename,
list(prefix_to_prefix.keys()),
)
relpath = os.path.relpath(filename, rel_root)
offsets[relpath] = indices
args = [
(filename, offsets[os.path.relpath(filename, rel_root)], prefix_to_prefix)
for filename in files_to_relocate
]
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
tp.map(llnl.util.lang.star(_relocate_binary_text), args)
finally:
tp.terminate()
tp.join()

View File

@@ -202,11 +202,17 @@ class RepoLoader(_PrependFileLoader):
"""Loads a Python module associated with a package in specific repository"""
#: Code in ``_package_prepend`` is prepended to imported packages.
#:
#: Spack packages are expected to call `from spack.package import *`
#: themselves, but we are allowing a deprecation period before breaking
#: external repos that don't do this yet.
#: Spack packages were originally expected to call `from spack import *`
#: themselves, but it became difficult to manage and imports in the Spack
#: core the top-level namespace polluted by package symbols this way. To
#: solve this, the top-level ``spack`` package contains very few symbols
#: of its own, and importing ``*`` is essentially a no-op. The common
#: routines and directives that packages need are now in ``spack.pkgkit``,
#: and the import system forces packages to automatically include
#: this. This way, old packages that call ``from spack import *`` will
#: continue to work without modification, but it's no longer required.
_package_prepend = ('from __future__ import absolute_import;'
'from spack.package import *')
'from spack.pkgkit import *')
def __init__(self, fullname, repo, package_name):
self.repo = repo
@@ -444,10 +450,10 @@ def is_package_file(filename):
# Package files are named `package.py` and are not in lib/spack/spack
# We have to remove the file extension because it can be .py and can be
# .pyc depending on context, and can differ between the files
import spack.package_base # break cycle
import spack.package # break cycle
filename_noext = os.path.splitext(filename)[0]
packagebase_filename_noext = os.path.splitext(
inspect.getfile(spack.package_base.PackageBase))[0]
inspect.getfile(spack.package.PackageBase))[0]
return (filename_noext != packagebase_filename_noext and
os.path.basename(filename_noext) == 'package')

View File

@@ -15,7 +15,7 @@
import spack.build_environment
import spack.fetch_strategy
import spack.package_base
import spack.package
from spack.install_test import TestSuite
from spack.reporter import Reporter
from spack.reporters.cdash import CDash
@@ -131,7 +131,7 @@ def gather_info(do_fn):
"""
@functools.wraps(do_fn)
def wrapper(instance, *args, **kwargs):
if isinstance(instance, spack.package_base.PackageBase):
if isinstance(instance, spack.package.PackageBase):
pkg = instance
elif hasattr(args[0], 'pkg'):
pkg = args[0].pkg

View File

@@ -22,7 +22,7 @@
import spack.build_environment
import spack.fetch_strategy
import spack.package_base
import spack.package
from spack.error import SpackError
from spack.reporter import Reporter
from spack.util.crypto import checksum

View File

@@ -7,7 +7,7 @@
import spack.build_environment
import spack.fetch_strategy
import spack.package_base
import spack.package
from spack.reporter import Reporter
__all__ = ['JUnit']

View File

@@ -93,10 +93,8 @@ def rewire_node(spec, explicit):
False,
spec.build_spec.prefix,
spec.prefix)
# Relocate text strings of prefixes embedded in binaries
relocate.relocate_text_bin(bins_to_relocate, prefix_to_prefix)
relocate.relocate_text_bin(binaries=bins_to_relocate,
prefixes=prefix_to_prefix)
# Copy package into place, except for spec.json (because spec.json
# describes the old spec and not the new spliced spec).
shutil.copytree(os.path.join(tempdir, spec.dag_hash()), spec.prefix,

Some files were not shown because too many files have changed in this diff Show More