Compare commits
167 Commits
hs/fix/tar
...
hs/rocm-op
Author | SHA1 | Date | |
---|---|---|---|
![]() |
eff56cd845 | ||
![]() |
9747978c7f | ||
![]() |
f043455ccc | ||
![]() |
fb9d6427e6 | ||
![]() |
76e83e10c1 | ||
![]() |
af89bdf632 | ||
![]() |
46f5b192ef | ||
![]() |
18cd922aab | ||
![]() |
5518ad9611 | ||
![]() |
57a1807443 | ||
![]() |
3909308d5c | ||
![]() |
54210270c8 | ||
![]() |
1a71bb046e | ||
![]() |
dbd6857d32 | ||
![]() |
025bc24996 | ||
![]() |
01e16b58a3 | ||
![]() |
f71e202f24 | ||
![]() |
f7edd10c17 | ||
![]() |
153c0805dd | ||
![]() |
5d8517ef69 | ||
![]() |
f23cae6a86 | ||
![]() |
e6e67f8e0a | ||
![]() |
e6bef4ca9b | ||
![]() |
e3e0bef0de | ||
![]() |
42486d93ec | ||
![]() |
6d608a9664 | ||
![]() |
04313afc63 | ||
![]() |
f839d2ba56 | ||
![]() |
2b1a8b1913 | ||
![]() |
8907003648 | ||
![]() |
8afdba4bf7 | ||
![]() |
57cabbfb10 | ||
![]() |
c71efb9040 | ||
![]() |
c5dd2d43d2 | ||
![]() |
34338ef757 | ||
![]() |
c0bdc37226 | ||
![]() |
8bad9fb804 | ||
![]() |
2df7cc0087 | ||
![]() |
40d40ccc52 | ||
![]() |
afe7d6c39e | ||
![]() |
113733d9fb | ||
![]() |
a8e2da5bb8 | ||
![]() |
97750189b6 | ||
![]() |
bcd40835a0 | ||
![]() |
2c3f2c5733 | ||
![]() |
302d74394b | ||
![]() |
cf94dc7823 | ||
![]() |
4411ee3382 | ||
![]() |
f790ce0f72 | ||
![]() |
64d53037db | ||
![]() |
4aef50739b | ||
![]() |
a6e966f6f2 | ||
![]() |
1f428c4188 | ||
![]() |
731e48b1bd | ||
![]() |
74ff9ad821 | ||
![]() |
16a4eff689 | ||
![]() |
d0b0d8db50 | ||
![]() |
54f591cce5 | ||
![]() |
8677bb4d43 | ||
![]() |
b66b80a96a | ||
![]() |
10e21f399c | ||
![]() |
56892f6140 | ||
![]() |
7eddc4b1f8 | ||
![]() |
3c7392bbcc | ||
![]() |
bb0517f4d9 | ||
![]() |
c8994ee50f | ||
![]() |
4b2f5638f2 | ||
![]() |
31312a379f | ||
![]() |
b0d5f272b0 | ||
![]() |
1c93fef160 | ||
![]() |
8bb5f4faf4 | ||
![]() |
f76ab5f72f | ||
![]() |
49c831edc3 | ||
![]() |
c943c8c1d2 | ||
![]() |
e0e6f29584 | ||
![]() |
72bc3bb803 | ||
![]() |
dba8fe2b96 | ||
![]() |
4487598d60 | ||
![]() |
495537cf56 | ||
![]() |
22c3b4099f | ||
![]() |
13978d11a0 | ||
![]() |
a22114b20b | ||
![]() |
c10624390f | ||
![]() |
fb3d9de80b | ||
![]() |
fbb688af07 | ||
![]() |
d34b709425 | ||
![]() |
cb0b188cf6 | ||
![]() |
9a2b0aca66 | ||
![]() |
89a8ab3233 | ||
![]() |
5d87166c07 | ||
![]() |
15c989b3fe | ||
![]() |
b7f556e4b4 | ||
![]() |
36f32ceda3 | ||
![]() |
01d77ed915 | ||
![]() |
0049f8332d | ||
![]() |
39c10c3116 | ||
![]() |
71d1901831 | ||
![]() |
41e0863b86 | ||
![]() |
a75d83f65c | ||
![]() |
f2f13964fb | ||
![]() |
9b032018d6 | ||
![]() |
7d470c05be | ||
![]() |
664fe9e9e6 | ||
![]() |
2745a519e2 | ||
![]() |
4348ee1c75 | ||
![]() |
8e39fb1e54 | ||
![]() |
09458312a3 | ||
![]() |
5fd0693df4 | ||
![]() |
f58684429d | ||
![]() |
409611a479 | ||
![]() |
dd98cfb839 | ||
![]() |
5c91667dab | ||
![]() |
9efd6f3f11 | ||
![]() |
a8f5289801 | ||
![]() |
ac635aa777 | ||
![]() |
45dcddf9c3 | ||
![]() |
f1660722e7 | ||
![]() |
04b44d841c | ||
![]() |
7f30502297 | ||
![]() |
61b1586c51 | ||
![]() |
8579efcadf | ||
![]() |
1c3e2b5425 | ||
![]() |
011ef0aaaf | ||
![]() |
9642f3f49a | ||
![]() |
a6c9b55fad | ||
![]() |
608ed967e1 | ||
![]() |
742eaa32b7 | ||
![]() |
763b35a2e0 | ||
![]() |
12280f864c | ||
![]() |
253ba05732 | ||
![]() |
195b869e1c | ||
![]() |
393961ffd6 | ||
![]() |
392a58e9be | ||
![]() |
0e8e97a811 | ||
![]() |
43a0cbe7a2 | ||
![]() |
bb35a98079 | ||
![]() |
fa7e0e8230 | ||
![]() |
2c128751f5 | ||
![]() |
fb0493a366 | ||
![]() |
6d1b6e7087 | ||
![]() |
759518182c | ||
![]() |
7ebabfcf0e | ||
![]() |
6203ae31d2 | ||
![]() |
6b13017ded | ||
![]() |
2c51b5853f | ||
![]() |
d0cbd056a8 | ||
![]() |
e1b579a8b4 | ||
![]() |
b02dcf697d | ||
![]() |
6e046b04c7 | ||
![]() |
d196795437 | ||
![]() |
0d444fb4e7 | ||
![]() |
467e631260 | ||
![]() |
f21de698f7 | ||
![]() |
59532986be | ||
![]() |
36fd547b40 | ||
![]() |
b5f9dea6d0 | ||
![]() |
5904834295 | ||
![]() |
2da8a1d1e3 | ||
![]() |
d50eba40d9 | ||
![]() |
8d3a733b77 | ||
![]() |
dfa86dce08 | ||
![]() |
3d82e5c573 | ||
![]() |
a77f903f4d | ||
![]() |
92260b179d | ||
![]() |
196c912b8a | ||
![]() |
0f54995e53 | ||
![]() |
9d1332f1a1 |
18
.github/workflows/build-containers.yml
vendored
18
.github/workflows/build-containers.yml
vendored
@@ -40,17 +40,17 @@ jobs:
|
||||
# 1: Platforms to build for
|
||||
# 2: Base image (e.g. ubuntu:22.04)
|
||||
dockerfile: [[amazon-linux, 'linux/amd64,linux/arm64', 'amazonlinux:2'],
|
||||
[centos-stream9, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:stream9'],
|
||||
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
|
||||
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
|
||||
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'],
|
||||
[ubuntu-noble, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:24.04'],
|
||||
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
|
||||
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
|
||||
[centos-stream9, 'linux/amd64,linux/arm64', 'centos:stream9'],
|
||||
[leap15, 'linux/amd64,linux/arm64', 'opensuse/leap:15'],
|
||||
[ubuntu-focal, 'linux/amd64,linux/arm64', 'ubuntu:20.04'],
|
||||
[ubuntu-jammy, 'linux/amd64,linux/arm64', 'ubuntu:22.04'],
|
||||
[ubuntu-noble, 'linux/amd64,linux/arm64', 'ubuntu:24.04'],
|
||||
[almalinux8, 'linux/amd64,linux/arm64', 'almalinux:8'],
|
||||
[almalinux9, 'linux/amd64,linux/arm64', 'almalinux:9'],
|
||||
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
|
||||
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
|
||||
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'],
|
||||
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']]
|
||||
[fedora39, 'linux/amd64,linux/arm64', 'fedora:39'],
|
||||
[fedora40, 'linux/amd64,linux/arm64', 'fedora:40']]
|
||||
name: Build ${{ matrix.dockerfile[0] }}
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
|
4
.github/workflows/ci.yaml
vendored
4
.github/workflows/ci.yaml
vendored
@@ -81,6 +81,10 @@ jobs:
|
||||
with:
|
||||
with_coverage: ${{ needs.changes.outputs.core }}
|
||||
|
||||
import-check:
|
||||
needs: [ changes ]
|
||||
uses: ./.github/workflows/import-check.yaml
|
||||
|
||||
all-prechecks:
|
||||
needs: [ prechecks ]
|
||||
if: ${{ always() }}
|
||||
|
1
.github/workflows/coverage.yml
vendored
1
.github/workflows/coverage.yml
vendored
@@ -33,3 +33,4 @@ jobs:
|
||||
with:
|
||||
verbose: true
|
||||
fail_ci_if_error: false
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
49
.github/workflows/import-check.yaml
vendored
Normal file
49
.github/workflows/import-check.yaml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: import-check
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
# Check we don't make the situation with circular imports worse
|
||||
import-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: julia-actions/setup-julia@v2
|
||||
with:
|
||||
version: '1.10'
|
||||
- uses: julia-actions/cache@v2
|
||||
|
||||
# PR: use the base of the PR as the old commit
|
||||
- name: Checkout PR base commit
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
path: old
|
||||
# not a PR: use the previous commit as the old commit
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 2
|
||||
path: old
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
run: git -C old reset --hard HEAD^
|
||||
|
||||
- name: Checkout new commit
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
path: new
|
||||
- name: Install circular import checker
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
repository: haampie/circular-import-fighter
|
||||
ref: 4cdb0bf15f04ab6b49041d5ef1bfd9644cce7f33
|
||||
path: circular-import-fighter
|
||||
- name: Install dependencies
|
||||
working-directory: circular-import-fighter
|
||||
run: make -j dependencies
|
||||
- name: Circular import check
|
||||
working-directory: circular-import-fighter
|
||||
run: make -j compare "SPACK_ROOT=../old ../new"
|
60
.github/workflows/valid-style.yml
vendored
60
.github/workflows/valid-style.yml
vendored
@@ -86,66 +86,6 @@ jobs:
|
||||
spack -d bootstrap now --dev
|
||||
spack -d style -t black
|
||||
spack unit-test -V
|
||||
# Check we don't make the situation with circular imports worse
|
||||
import-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: julia-actions/setup-julia@v2
|
||||
with:
|
||||
version: '1.10'
|
||||
- uses: julia-actions/cache@v2
|
||||
|
||||
# PR: use the base of the PR as the old commit
|
||||
- name: Checkout PR base commit
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
path: old
|
||||
# not a PR: use the previous commit as the old commit
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 2
|
||||
path: old
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
run: git -C old reset --hard HEAD^
|
||||
|
||||
- name: Checkout new commit
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
path: new
|
||||
- name: Install circular import checker
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
repository: haampie/circular-import-fighter
|
||||
ref: b5d6ce9be35f602cca7d5a6aa0259fca10639cca
|
||||
path: circular-import-fighter
|
||||
- name: Install dependencies
|
||||
working-directory: circular-import-fighter
|
||||
run: make -j dependencies
|
||||
- name: Problematic imports before
|
||||
working-directory: circular-import-fighter
|
||||
run: make SPACK_ROOT=../old SUFFIX=.old
|
||||
- name: Problematic imports after
|
||||
working-directory: circular-import-fighter
|
||||
run: make SPACK_ROOT=../new SUFFIX=.new
|
||||
- name: Compare import cycles
|
||||
working-directory: circular-import-fighter
|
||||
run: |
|
||||
edges_before="$(head -n1 solution.old)"
|
||||
edges_after="$(head -n1 solution.new)"
|
||||
if [ "$edges_after" -gt "$edges_before" ]; then
|
||||
printf '\033[1;31mImport check failed: %s imports need to be deleted, ' "$edges_after"
|
||||
printf 'previously this was %s\033[0m\n' "$edges_before"
|
||||
printf 'Compare \033[1;97m"Problematic imports before"\033[0m and '
|
||||
printf '\033[1;97m"Problematic imports after"\033[0m.\n'
|
||||
exit 1
|
||||
else
|
||||
printf '\033[1;32mImport check passed: %s <= %s\033[0m\n' "$edges_after" "$edges_before"
|
||||
fi
|
||||
|
||||
# Further style checks from pylint
|
||||
pylint:
|
||||
|
@@ -63,3 +63,7 @@ concretizer:
|
||||
# Setting this to false yields unreproducible results, so we advise to use that value only
|
||||
# for debugging purposes (e.g. check which constraints can help Spack concretize faster).
|
||||
error_on_timeout: true
|
||||
|
||||
# Static analysis may reduce the concretization time by generating smaller ASP problems, in
|
||||
# cases where there are requirements that prevent part of the search space to be explored.
|
||||
static_analysis: false
|
||||
|
@@ -1,5 +1,5 @@
|
||||
config:
|
||||
locks: false
|
||||
build_stage::
|
||||
- '$spack/.staging'
|
||||
- '$user_cache_path/stage'
|
||||
stage_name: '{name}-{version}-{hash:7}'
|
||||
|
@@ -272,9 +272,9 @@ often lists dependencies and the flags needed to locate them. The
|
||||
"environment variables" section lists environment variables that the
|
||||
build system uses to pass flags to the compiler and linker.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Addings flags to configure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Adding flags to configure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For most of the flags you encounter, you will want a variant to
|
||||
optionally enable/disable them. You can then optionally pass these
|
||||
@@ -285,7 +285,7 @@ function like so:
|
||||
|
||||
def configure_args(self):
|
||||
args = []
|
||||
|
||||
...
|
||||
if self.spec.satisfies("+mpi"):
|
||||
args.append("--enable-mpi")
|
||||
else:
|
||||
@@ -299,7 +299,10 @@ Alternatively, you can use the :ref:`enable_or_disable <autotools_enable_or_dis
|
||||
.. code-block:: python
|
||||
|
||||
def configure_args(self):
|
||||
return [self.enable_or_disable("mpi")]
|
||||
args = []
|
||||
...
|
||||
args.extend(self.enable_or_disable("mpi"))
|
||||
return args
|
||||
|
||||
|
||||
Note that we are explicitly disabling MPI support if it is not
|
||||
@@ -344,7 +347,14 @@ typically used to enable or disable some feature within the package.
|
||||
default=False,
|
||||
description="Memchecker support for debugging [degrades performance]"
|
||||
)
|
||||
config_args.extend(self.enable_or_disable("memchecker"))
|
||||
...
|
||||
|
||||
def configure_args(self):
|
||||
args = []
|
||||
...
|
||||
args.extend(self.enable_or_disable("memchecker"))
|
||||
|
||||
return args
|
||||
|
||||
In this example, specifying the variant ``+memchecker`` will generate
|
||||
the following configuration options:
|
||||
|
@@ -361,7 +361,6 @@ and the tags associated with the class of runners to build on.
|
||||
* ``.linux_neoverse_n1``
|
||||
* ``.linux_neoverse_v1``
|
||||
* ``.linux_neoverse_v2``
|
||||
* ``.linux_power``
|
||||
* ``.linux_skylake``
|
||||
* ``.linux_x86_64``
|
||||
* ``.linux_x86_64_v4``
|
||||
|
@@ -112,6 +112,19 @@ the original but may concretize differently in the presence of different
|
||||
explicit or default configuration settings (e.g., a different version of
|
||||
Spack or for a different user account).
|
||||
|
||||
Environments created from a manifest will copy any included configs
|
||||
from relative paths inside the environment. Relative paths from
|
||||
outside the environment will cause errors, and absolute paths will be
|
||||
kept absolute. For example, if ``spack.yaml`` includes:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
include: [./config.yaml]
|
||||
|
||||
then the created environment will have its own copy of the file
|
||||
``config.yaml`` copied from the location in the original environment.
|
||||
|
||||
Create an environment from a ``spack.lock`` file using:
|
||||
|
||||
.. code-block:: console
|
||||
@@ -160,7 +173,7 @@ accepts. If an environment already exists then spack will simply activate it
|
||||
and ignore the create-specific flags.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
||||
$ spack env activate --create -p myenv
|
||||
# ...
|
||||
# [creates if myenv does not exist yet]
|
||||
@@ -424,8 +437,8 @@ Developing Packages in a Spack Environment
|
||||
|
||||
The ``spack develop`` command allows one to develop Spack packages in
|
||||
an environment. It requires a spec containing a concrete version, and
|
||||
will configure Spack to install the package from local source.
|
||||
If a version is not provided from the command line interface then spack
|
||||
will configure Spack to install the package from local source.
|
||||
If a version is not provided from the command line interface then spack
|
||||
will automatically pick the highest version the package has defined.
|
||||
This means any infinity versions (``develop``, ``main``, ``stable``) will be
|
||||
preferred in this selection process.
|
||||
@@ -435,9 +448,9 @@ set, and Spack will ensure the package and its dependents are rebuilt
|
||||
any time the environment is installed if the package's local source
|
||||
code has been modified. Spack's native implementation to check for modifications
|
||||
is to check if ``mtime`` is newer than the installation.
|
||||
A custom check can be created by overriding the ``detect_dev_src_change`` method
|
||||
in your package class. This is particularly useful for projects using custom spack repo's
|
||||
to drive development and want to optimize performance.
|
||||
A custom check can be created by overriding the ``detect_dev_src_change`` method
|
||||
in your package class. This is particularly useful for projects using custom spack repo's
|
||||
to drive development and want to optimize performance.
|
||||
|
||||
Spack ensures that all instances of a
|
||||
developed package in the environment are concretized to match the
|
||||
@@ -453,7 +466,7 @@ Further development on ``foo`` can be tested by re-installing the environment,
|
||||
and eventually committed and pushed to the upstream git repo.
|
||||
|
||||
If the package being developed supports out-of-source builds then users can use the
|
||||
``--build_directory`` flag to control the location and name of the build directory.
|
||||
``--build_directory`` flag to control the location and name of the build directory.
|
||||
This is a shortcut to set the ``package_attributes:build_directory`` in the
|
||||
``packages`` configuration (see :ref:`assigning-package-attributes`).
|
||||
The supplied location will become the build-directory for that package in all future builds.
|
||||
|
@@ -668,7 +668,7 @@ def copy(src, dest, _permissions=False):
|
||||
_permissions (bool): for internal use only
|
||||
|
||||
Raises:
|
||||
IOError: if *src* does not match any files or directories
|
||||
OSError: if *src* does not match any files or directories
|
||||
ValueError: if *src* matches multiple files but *dest* is
|
||||
not a directory
|
||||
"""
|
||||
@@ -679,7 +679,7 @@ def copy(src, dest, _permissions=False):
|
||||
|
||||
files = glob.glob(src)
|
||||
if not files:
|
||||
raise IOError("No such file or directory: '{0}'".format(src))
|
||||
raise OSError("No such file or directory: '{0}'".format(src))
|
||||
if len(files) > 1 and not os.path.isdir(dest):
|
||||
raise ValueError(
|
||||
"'{0}' matches multiple files but '{1}' is not a directory".format(src, dest)
|
||||
@@ -710,7 +710,7 @@ def install(src, dest):
|
||||
dest (str): the destination file or directory
|
||||
|
||||
Raises:
|
||||
IOError: if *src* does not match any files or directories
|
||||
OSError: if *src* does not match any files or directories
|
||||
ValueError: if *src* matches multiple files but *dest* is
|
||||
not a directory
|
||||
"""
|
||||
@@ -748,7 +748,7 @@ def copy_tree(
|
||||
_permissions (bool): for internal use only
|
||||
|
||||
Raises:
|
||||
IOError: if *src* does not match any files or directories
|
||||
OSError: if *src* does not match any files or directories
|
||||
ValueError: if *src* is a parent directory of *dest*
|
||||
"""
|
||||
if _permissions:
|
||||
@@ -762,7 +762,7 @@ def copy_tree(
|
||||
|
||||
files = glob.glob(src)
|
||||
if not files:
|
||||
raise IOError("No such file or directory: '{0}'".format(src))
|
||||
raise OSError("No such file or directory: '{0}'".format(src))
|
||||
|
||||
# For Windows hard-links and junctions, the source path must exist to make a symlink. Add
|
||||
# all symlinks to this list while traversing the tree, then when finished, make all
|
||||
@@ -843,7 +843,7 @@ def install_tree(src, dest, symlinks=True, ignore=None):
|
||||
ignore (typing.Callable): function indicating which files to ignore
|
||||
|
||||
Raises:
|
||||
IOError: if *src* does not match any files or directories
|
||||
OSError: if *src* does not match any files or directories
|
||||
ValueError: if *src* is a parent directory of *dest*
|
||||
"""
|
||||
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True)
|
||||
|
@@ -308,7 +308,7 @@ class LinkTree:
|
||||
|
||||
def __init__(self, source_root):
|
||||
if not os.path.exists(source_root):
|
||||
raise IOError("No such file or directory: '%s'", source_root)
|
||||
raise OSError("No such file or directory: '%s'", source_root)
|
||||
|
||||
self._root = source_root
|
||||
|
||||
|
@@ -391,7 +391,7 @@ def _poll_lock(self, op: int) -> bool:
|
||||
|
||||
return True
|
||||
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
# EAGAIN and EACCES == locked by another process (so try again)
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
raise
|
||||
|
@@ -344,26 +344,6 @@ def close(self):
|
||||
self.file.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def replace_environment(env):
|
||||
"""Replace the current environment (`os.environ`) with `env`.
|
||||
|
||||
If `env` is empty (or None), this unsets all current environment
|
||||
variables.
|
||||
"""
|
||||
env = env or {}
|
||||
old_env = os.environ.copy()
|
||||
try:
|
||||
os.environ.clear()
|
||||
for name, val in env.items():
|
||||
os.environ[name] = val
|
||||
yield
|
||||
finally:
|
||||
os.environ.clear()
|
||||
for name, val in old_env.items():
|
||||
os.environ[name] = val
|
||||
|
||||
|
||||
def log_output(*args, **kwargs):
|
||||
"""Context manager that logs its output to a file.
|
||||
|
||||
@@ -447,7 +427,6 @@ def __init__(
|
||||
self.echo = echo
|
||||
self.debug = debug
|
||||
self.buffer = buffer
|
||||
self.env = env # the environment to use for _writer_daemon
|
||||
self.filter_fn = filter_fn
|
||||
|
||||
self._active = False # used to prevent re-entry
|
||||
@@ -519,21 +498,20 @@ def __enter__(self):
|
||||
# just don't forward input if this fails
|
||||
pass
|
||||
|
||||
with replace_environment(self.env):
|
||||
self.process = multiprocessing.Process(
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_fd,
|
||||
read_fd,
|
||||
self.write_fd,
|
||||
self.echo,
|
||||
self.log_file,
|
||||
child_pipe,
|
||||
self.filter_fn,
|
||||
),
|
||||
)
|
||||
self.process.daemon = True # must set before start()
|
||||
self.process.start()
|
||||
self.process = multiprocessing.Process(
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_fd,
|
||||
read_fd,
|
||||
self.write_fd,
|
||||
self.echo,
|
||||
self.log_file,
|
||||
child_pipe,
|
||||
self.filter_fn,
|
||||
),
|
||||
)
|
||||
self.process.daemon = True # must set before start()
|
||||
self.process.start()
|
||||
|
||||
finally:
|
||||
if input_fd:
|
||||
@@ -729,10 +707,7 @@ class winlog:
|
||||
Does not support the use of 'v' toggling as nixlog does.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, file_like=None, echo=False, debug=0, buffer=False, env=None, filter_fn=None
|
||||
):
|
||||
self.env = env
|
||||
def __init__(self, file_like=None, echo=False, debug=0, buffer=False, filter_fn=None):
|
||||
self.debug = debug
|
||||
self.echo = echo
|
||||
self.logfile = file_like
|
||||
@@ -789,11 +764,10 @@ def background_reader(reader, echo_writer, _kill):
|
||||
reader.close()
|
||||
|
||||
self._active = True
|
||||
with replace_environment(self.env):
|
||||
self._thread = Thread(
|
||||
target=background_reader, args=(self.reader, self.echo_writer, self._kill)
|
||||
)
|
||||
self._thread.start()
|
||||
self._thread = Thread(
|
||||
target=background_reader, args=(self.reader, self.echo_writer, self._kill)
|
||||
)
|
||||
self._thread.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
@@ -918,7 +892,7 @@ def _writer_daemon(
|
||||
try:
|
||||
if stdin_file.read(1) == "v":
|
||||
echo = not echo
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
# If SIGTTIN is ignored, the system gives EIO
|
||||
# to let the caller know the read failed b/c it
|
||||
# was in the bg. Ignore that too.
|
||||
@@ -1013,7 +987,7 @@ def wrapped(*args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return function(*args, **kwargs)
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
|
@@ -10,7 +10,7 @@
|
||||
import spack.util.git
|
||||
|
||||
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string
|
||||
__version__ = "0.24.0.dev0"
|
||||
__version__ = "1.0.0.dev0"
|
||||
spack_version = __version__
|
||||
|
||||
|
||||
|
@@ -27,9 +27,9 @@
|
||||
class ClingoBootstrapConcretizer:
|
||||
def __init__(self, configuration):
|
||||
self.host_platform = spack.platforms.host()
|
||||
self.host_os = self.host_platform.operating_system("frontend")
|
||||
self.host_os = self.host_platform.default_operating_system()
|
||||
self.host_target = archspec.cpu.host().family
|
||||
self.host_architecture = spack.spec.ArchSpec.frontend_arch()
|
||||
self.host_architecture = spack.spec.ArchSpec.default_arch()
|
||||
self.host_architecture.target = str(self.host_target)
|
||||
self.host_compiler = self._valid_compiler_or_raise()
|
||||
self.host_python = self.python_external_spec()
|
||||
|
@@ -141,7 +141,7 @@ def _bootstrap_config_scopes() -> Sequence["spack.config.ConfigScope"]:
|
||||
|
||||
|
||||
def _add_compilers_if_missing() -> None:
|
||||
arch = spack.spec.ArchSpec.frontend_arch()
|
||||
arch = spack.spec.ArchSpec.default_arch()
|
||||
if not spack.compilers.compilers_for_arch(arch):
|
||||
spack.compilers.find_compilers()
|
||||
|
||||
|
@@ -11,6 +11,7 @@
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util import tty
|
||||
from llnl.util.lang import stable_partition
|
||||
|
||||
import spack.builder
|
||||
@@ -458,11 +459,23 @@ def cmake(
|
||||
) -> None:
|
||||
"""Runs ``cmake`` in the build directory"""
|
||||
|
||||
# skip cmake phase if it is an incremental develop build
|
||||
if spec.is_develop and os.path.isfile(
|
||||
os.path.join(self.build_directory, "CMakeCache.txt")
|
||||
):
|
||||
return
|
||||
if spec.is_develop:
|
||||
# skip cmake phase if it is an incremental develop build
|
||||
|
||||
# Determine the files that will re-run CMake that are generated from a successful
|
||||
# configure step based on state
|
||||
primary_generator = _extract_primary_generator(self.generator)
|
||||
configure_artifact = "Makefile"
|
||||
if primary_generator == "Ninja":
|
||||
configure_artifact = "ninja.build"
|
||||
|
||||
if os.path.isfile(os.path.join(self.build_directory, configure_artifact)):
|
||||
tty.msg(
|
||||
"Incremental build criteria satisfied."
|
||||
"Skipping CMake configure step. To force configuration run"
|
||||
f" `spack clean {pkg.name}`"
|
||||
)
|
||||
return
|
||||
|
||||
options = self.std_cmake_args
|
||||
options += self.cmake_args()
|
||||
|
@@ -15,7 +15,7 @@ class CudaPackage(PackageBase):
|
||||
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
|
||||
and is meant to unify and facilitate its usage.
|
||||
|
||||
Maintainers: ax3l, Rombur, davidbeckingsale
|
||||
Maintainers: ax3l, Rombur, davidbeckingsale, pauleonix
|
||||
"""
|
||||
|
||||
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
||||
@@ -47,6 +47,12 @@ class CudaPackage(PackageBase):
|
||||
"89",
|
||||
"90",
|
||||
"90a",
|
||||
"100",
|
||||
"100a",
|
||||
"101",
|
||||
"101a",
|
||||
"120",
|
||||
"120a",
|
||||
)
|
||||
|
||||
# FIXME: keep cuda and cuda_arch separate to make usage easier until
|
||||
@@ -99,39 +105,56 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
# CUDA version vs Architecture
|
||||
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
|
||||
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
|
||||
# Tesla support:
|
||||
depends_on("cuda@:6.0", when="cuda_arch=10")
|
||||
depends_on("cuda@:6.5", when="cuda_arch=11")
|
||||
depends_on("cuda@2.1:6.5", when="cuda_arch=12")
|
||||
depends_on("cuda@2.1:6.5", when="cuda_arch=13")
|
||||
|
||||
# Fermi support:
|
||||
depends_on("cuda@3.0:8.0", when="cuda_arch=20")
|
||||
depends_on("cuda@3.2:8.0", when="cuda_arch=21")
|
||||
|
||||
# Kepler support:
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=30")
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=32")
|
||||
depends_on("cuda@5.0:11.8", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:11.8", when="cuda_arch=37")
|
||||
|
||||
# Maxwell support:
|
||||
depends_on("cuda@6.0:", when="cuda_arch=50")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=52")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=53")
|
||||
|
||||
# Pascal support:
|
||||
depends_on("cuda@8.0:", when="cuda_arch=60")
|
||||
depends_on("cuda@8.0:", when="cuda_arch=61")
|
||||
depends_on("cuda@8.0:", when="cuda_arch=62")
|
||||
|
||||
# Volta support:
|
||||
depends_on("cuda@9.0:", when="cuda_arch=70")
|
||||
# Turing support:
|
||||
depends_on("cuda@9.0:", when="cuda_arch=72")
|
||||
depends_on("cuda@10.0:", when="cuda_arch=75")
|
||||
|
||||
# Ampere support:
|
||||
depends_on("cuda@11.0:", when="cuda_arch=80")
|
||||
depends_on("cuda@11.1:", when="cuda_arch=86")
|
||||
depends_on("cuda@11.4:", when="cuda_arch=87")
|
||||
# Ada support:
|
||||
depends_on("cuda@11.8:", when="cuda_arch=89")
|
||||
|
||||
# Hopper support:
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90")
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90a")
|
||||
|
||||
# Blackwell support:
|
||||
depends_on("cuda@12.8:", when="cuda_arch=100")
|
||||
depends_on("cuda@12.8:", when="cuda_arch=100a")
|
||||
depends_on("cuda@12.8:", when="cuda_arch=101")
|
||||
depends_on("cuda@12.8:", when="cuda_arch=101a")
|
||||
depends_on("cuda@12.8:", when="cuda_arch=120")
|
||||
depends_on("cuda@12.8:", when="cuda_arch=120a")
|
||||
# From the NVIDIA install guide we know of conflicts for particular
|
||||
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
|
||||
# (gcc, clang). We don't restrict %gcc and %clang conflicts to
|
||||
@@ -163,6 +186,7 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
conflicts("%gcc@12:", when="+cuda ^cuda@:11.8")
|
||||
conflicts("%gcc@13:", when="+cuda ^cuda@:12.3")
|
||||
conflicts("%gcc@14:", when="+cuda ^cuda@:12.6")
|
||||
conflicts("%gcc@15:", when="+cuda ^cuda@:12.8")
|
||||
conflicts("%clang@12:", when="+cuda ^cuda@:11.4.0")
|
||||
conflicts("%clang@13:", when="+cuda ^cuda@:11.5")
|
||||
conflicts("%clang@14:", when="+cuda ^cuda@:11.7")
|
||||
@@ -171,6 +195,7 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
conflicts("%clang@17:", when="+cuda ^cuda@:12.3")
|
||||
conflicts("%clang@18:", when="+cuda ^cuda@:12.5")
|
||||
conflicts("%clang@19:", when="+cuda ^cuda@:12.6")
|
||||
conflicts("%clang@20:", when="+cuda ^cuda@:12.8")
|
||||
|
||||
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
|
||||
conflicts("%gcc@10", when="+cuda ^cuda@:11.4.0")
|
||||
|
@@ -264,16 +264,17 @@ def update_external_dependencies(self, extendee_spec=None):
|
||||
# Ensure architecture information is present
|
||||
if not python.architecture:
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system("default_os")
|
||||
host_target = host_platform.target("default_target")
|
||||
host_os = host_platform.default_operating_system()
|
||||
host_target = host_platform.default_target()
|
||||
python.architecture = spack.spec.ArchSpec(
|
||||
(str(host_platform), str(host_os), str(host_target))
|
||||
)
|
||||
else:
|
||||
if not python.architecture.platform:
|
||||
python.architecture.platform = spack.platforms.host()
|
||||
platform = spack.platforms.by_name(python.architecture.platform)
|
||||
if not python.architecture.os:
|
||||
python.architecture.os = "default_os"
|
||||
python.architecture.os = platform.default_operating_system()
|
||||
if not python.architecture.target:
|
||||
python.architecture.target = archspec.cpu.host().family.name
|
||||
|
||||
|
@@ -14,7 +14,7 @@
|
||||
import zipfile
|
||||
from collections import namedtuple
|
||||
from typing import Callable, Dict, List, Set
|
||||
from urllib.request import HTTPHandler, Request, build_opener
|
||||
from urllib.request import Request
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -62,6 +62,8 @@
|
||||
|
||||
PushResult = namedtuple("PushResult", "success url")
|
||||
|
||||
urlopen = web_util.urlopen # alias for mocking in tests
|
||||
|
||||
|
||||
def get_change_revisions():
|
||||
"""If this is a git repo get the revisions to use when checking
|
||||
@@ -627,29 +629,19 @@ def download_and_extract_artifacts(url, work_dir):
|
||||
if token:
|
||||
headers["PRIVATE-TOKEN"] = token
|
||||
|
||||
opener = build_opener(HTTPHandler)
|
||||
|
||||
request = Request(url, headers=headers)
|
||||
request.get_method = lambda: "GET"
|
||||
|
||||
response = opener.open(request, timeout=SPACK_CDASH_TIMEOUT)
|
||||
response_code = response.getcode()
|
||||
|
||||
if response_code != 200:
|
||||
msg = f"Error response code ({response_code}) in reproduce_ci_job"
|
||||
raise SpackError(msg)
|
||||
|
||||
request = Request(url, headers=headers, method="GET")
|
||||
artifacts_zip_path = os.path.join(work_dir, "artifacts.zip")
|
||||
os.makedirs(work_dir, exist_ok=True)
|
||||
|
||||
if not os.path.exists(work_dir):
|
||||
os.makedirs(work_dir)
|
||||
try:
|
||||
response = urlopen(request, timeout=SPACK_CDASH_TIMEOUT)
|
||||
with open(artifacts_zip_path, "wb") as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
except OSError as e:
|
||||
raise SpackError(f"Error fetching artifacts: {e}")
|
||||
|
||||
with open(artifacts_zip_path, "wb") as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
zip_file = zipfile.ZipFile(artifacts_zip_path)
|
||||
zip_file.extractall(work_dir)
|
||||
zip_file.close()
|
||||
with zipfile.ZipFile(artifacts_zip_path) as zip_file:
|
||||
zip_file.extractall(work_dir)
|
||||
|
||||
os.remove(artifacts_zip_path)
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import collections
|
||||
import warnings
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
@@ -51,10 +52,10 @@ def setup_parser(subparser):
|
||||
"-t", "--target", action="store_true", default=False, help="print only the target"
|
||||
)
|
||||
parts2.add_argument(
|
||||
"-f", "--frontend", action="store_true", default=False, help="print frontend"
|
||||
"-f", "--frontend", action="store_true", default=False, help="print frontend (DEPRECATED)"
|
||||
)
|
||||
parts2.add_argument(
|
||||
"-b", "--backend", action="store_true", default=False, help="print backend"
|
||||
"-b", "--backend", action="store_true", default=False, help="print backend (DEPRECATED)"
|
||||
)
|
||||
|
||||
|
||||
@@ -98,15 +99,14 @@ def arch(parser, args):
|
||||
display_targets(archspec.cpu.TARGETS)
|
||||
return
|
||||
|
||||
os_args, target_args = "default_os", "default_target"
|
||||
if args.frontend:
|
||||
os_args, target_args = "frontend", "frontend"
|
||||
warnings.warn("the argument --frontend is deprecated, and will be removed in Spack v1.0")
|
||||
elif args.backend:
|
||||
os_args, target_args = "backend", "backend"
|
||||
warnings.warn("the argument --backend is deprecated, and will be removed in Spack v1.0")
|
||||
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system(os_args)
|
||||
host_target = host_platform.target(target_args)
|
||||
host_os = host_platform.default_operating_system()
|
||||
host_target = host_platform.default_target()
|
||||
if args.family:
|
||||
host_target = host_target.family
|
||||
elif args.generic:
|
||||
|
@@ -86,8 +86,8 @@ def create_db_tarball(args):
|
||||
|
||||
def report(args):
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system("frontend")
|
||||
host_target = host_platform.target("frontend")
|
||||
host_os = host_platform.default_operating_system()
|
||||
host_target = host_platform.default_target()
|
||||
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
print("* **Spack:**", spack.get_version())
|
||||
print("* **Python:**", platform.python_version())
|
||||
|
@@ -9,9 +9,9 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.environment as ev
|
||||
import spack.package_base
|
||||
import spack.store
|
||||
from spack.cmd.common import arguments
|
||||
from spack.solver.input_analysis import create_graph_analyzer
|
||||
|
||||
description = "show dependencies of a package"
|
||||
section = "basic"
|
||||
@@ -68,15 +68,17 @@ def dependencies(parser, args):
|
||||
|
||||
else:
|
||||
spec = specs[0]
|
||||
dependencies = spack.package_base.possible_dependencies(
|
||||
dependencies, virtuals, _ = create_graph_analyzer().possible_dependencies(
|
||||
spec,
|
||||
transitive=args.transitive,
|
||||
expand_virtuals=args.expand_virtuals,
|
||||
depflag=args.deptype,
|
||||
allowed_deps=args.deptype,
|
||||
)
|
||||
if not args.expand_virtuals:
|
||||
dependencies.update(virtuals)
|
||||
|
||||
if spec.name in dependencies:
|
||||
del dependencies[spec.name]
|
||||
dependencies.remove(spec.name)
|
||||
|
||||
if dependencies:
|
||||
colify(sorted(dependencies))
|
||||
|
@@ -125,7 +125,7 @@ def develop(parser, args):
|
||||
version = spec.versions.concrete_range_as_version
|
||||
if not version:
|
||||
# look up the maximum version so infintiy versions are preferred for develop
|
||||
version = max(spec.package_class.versions.keys())
|
||||
version = max(spack.repo.PATH.get_pkg_class(spec.fullname).versions.keys())
|
||||
tty.msg(f"Defaulting to highest version: {spec.name}@{version}")
|
||||
spec.versions = spack.version.VersionList([version])
|
||||
|
||||
|
@@ -110,10 +110,7 @@ def external_find(args):
|
||||
# Note that KeyboardInterrupt does not subclass Exception
|
||||
# (so CTRL-C will terminate the program as expected).
|
||||
skip_msg = "Skipping manifest and continuing with other external checks"
|
||||
if (isinstance(e, IOError) or isinstance(e, OSError)) and e.errno in [
|
||||
errno.EPERM,
|
||||
errno.EACCES,
|
||||
]:
|
||||
if isinstance(e, OSError) and e.errno in (errno.EPERM, errno.EACCES):
|
||||
# The manifest file does not have sufficient permissions enabled:
|
||||
# print a warning and keep going
|
||||
tty.warn("Unable to read manifest due to insufficient permissions.", skip_msg)
|
||||
|
@@ -54,10 +54,6 @@
|
||||
@m{target=target} specific <target> processor
|
||||
@m{arch=platform-os-target} shortcut for all three above
|
||||
|
||||
cross-compiling:
|
||||
@m{os=backend} or @m{os=be} build for compute node (backend)
|
||||
@m{os=frontend} or @m{os=fe} build for login node (frontend)
|
||||
|
||||
dependencies:
|
||||
^dependency [constraints] specify constraints on dependencies
|
||||
^@K{/hash} build with a specific installed
|
||||
|
@@ -545,7 +545,7 @@ def _not_license_excluded(self, x):
|
||||
package does not explicitly forbid redistributing source."""
|
||||
if self.private:
|
||||
return True
|
||||
elif x.package_class.redistribute_source(x):
|
||||
elif spack.repo.PATH.get_pkg_class(x.fullname).redistribute_source(x):
|
||||
return True
|
||||
else:
|
||||
tty.debug(
|
||||
|
@@ -6,7 +6,7 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from itertools import zip_longest
|
||||
from itertools import islice, zip_longest
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import llnl.util.tty as tty
|
||||
@@ -423,7 +423,8 @@ def _run_import_check(
|
||||
continue
|
||||
|
||||
for m in is_abs_import.finditer(contents):
|
||||
if contents.count(m.group(1)) == 1:
|
||||
# Find at most two occurences: the first is the import itself, the second is its usage.
|
||||
if len(list(islice(re.finditer(rf"{re.escape(m.group(1))}(?!\w)", contents), 2))) == 1:
|
||||
to_remove.append(m.group(0))
|
||||
exit_code = 1
|
||||
print(f"{pretty_path}: redundant import: {m.group(1)}", file=out)
|
||||
@@ -438,7 +439,7 @@ def _run_import_check(
|
||||
module = _module_part(root, m.group(0))
|
||||
if not module or module in to_add:
|
||||
continue
|
||||
if re.search(rf"import {re.escape(module)}\b(?!\.)", contents):
|
||||
if re.search(rf"import {re.escape(module)}(?!\w|\.)", contents):
|
||||
continue
|
||||
to_add.add(module)
|
||||
exit_code = 1
|
||||
|
@@ -177,16 +177,15 @@ def test_run(args):
|
||||
matching = spack.store.STORE.db.query_local(spec, hashes=hashes, explicit=explicit)
|
||||
if spec and not matching:
|
||||
tty.warn("No {0}installed packages match spec {1}".format(explicit_str, spec))
|
||||
"""
|
||||
TODO: Need to write out a log message and/or CDASH Testing
|
||||
output that package not installed IF continue to process
|
||||
these issues here.
|
||||
|
||||
if args.log_format:
|
||||
# Proceed with the spec assuming the test process
|
||||
# to ensure report package as skipped (e.g., for CI)
|
||||
specs_to_test.append(spec)
|
||||
"""
|
||||
# TODO: Need to write out a log message and/or CDASH Testing
|
||||
# output that package not installed IF continue to process
|
||||
# these issues here.
|
||||
|
||||
# if args.log_format:
|
||||
# # Proceed with the spec assuming the test process
|
||||
# # to ensure report package as skipped (e.g., for CI)
|
||||
# specs_to_test.append(spec)
|
||||
|
||||
specs_to_test.extend(matching)
|
||||
|
||||
@@ -253,7 +252,9 @@ def has_test_and_tags(pkg_class):
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
specs = spack.store.STORE.db.query(hashes=hashes)
|
||||
specs = list(filter(lambda s: has_test_and_tags(s.package_class), specs))
|
||||
specs = list(
|
||||
filter(lambda s: has_test_and_tags(spack.repo.PATH.get_pkg_class(s.fullname)), specs)
|
||||
)
|
||||
|
||||
spack.cmd.display_specs(specs, long=True)
|
||||
|
||||
|
@@ -801,17 +801,17 @@ def _extract_compiler_paths(spec: "spack.spec.Spec") -> Optional[Dict[str, str]]
|
||||
def _extract_os_and_target(spec: "spack.spec.Spec"):
|
||||
if not spec.architecture:
|
||||
host_platform = spack.platforms.host()
|
||||
operating_system = host_platform.operating_system("default_os")
|
||||
target = host_platform.target("default_target")
|
||||
operating_system = host_platform.default_operating_system()
|
||||
target = host_platform.default_target()
|
||||
else:
|
||||
target = spec.architecture.target
|
||||
if not target:
|
||||
target = spack.platforms.host().target("default_target")
|
||||
target = spack.platforms.host().default_target()
|
||||
|
||||
operating_system = spec.os
|
||||
if not operating_system:
|
||||
host_platform = spack.platforms.host()
|
||||
operating_system = host_platform.operating_system("default_os")
|
||||
operating_system = host_platform.default_operating_system()
|
||||
return operating_system, target
|
||||
|
||||
|
||||
|
@@ -57,7 +57,7 @@ def validate(configuration_file):
|
||||
# Set the default value of the concretization strategy to unify and
|
||||
# warn if the user explicitly set another value
|
||||
env_dict.setdefault("concretizer", {"unify": True})
|
||||
if not env_dict["concretizer"]["unify"] is True:
|
||||
if env_dict["concretizer"]["unify"] is not True:
|
||||
warnings.warn(
|
||||
'"concretizer:unify" is not set to "true", which means the '
|
||||
"generated image may contain different variants of the same "
|
||||
|
@@ -581,7 +581,7 @@ def _error_on_nonempty_view_dir(new_root):
|
||||
# Check if the target path lexists
|
||||
try:
|
||||
st = os.lstat(new_root)
|
||||
except (IOError, OSError):
|
||||
except OSError:
|
||||
return
|
||||
|
||||
# Empty directories are fine
|
||||
@@ -861,7 +861,7 @@ def regenerate(self, concrete_roots: List[Spec]) -> None:
|
||||
):
|
||||
try:
|
||||
shutil.rmtree(old_root)
|
||||
except (IOError, OSError) as e:
|
||||
except OSError as e:
|
||||
msg = "Failed to remove old view at %s\n" % old_root
|
||||
msg += str(e)
|
||||
tty.warn(msg)
|
||||
@@ -2554,7 +2554,7 @@ def is_latest_format(manifest):
|
||||
try:
|
||||
with open(manifest, encoding="utf-8") as f:
|
||||
data = syaml.load(f)
|
||||
except (OSError, IOError):
|
||||
except OSError:
|
||||
return True
|
||||
top_level_key = _top_level_key(data)
|
||||
changed = spack.schema.env.update(data[top_level_key])
|
||||
@@ -2634,6 +2634,32 @@ def _ensure_env_dir():
|
||||
|
||||
shutil.copy(envfile, target_manifest)
|
||||
|
||||
# Copy relative path includes that live inside the environment dir
|
||||
try:
|
||||
manifest = EnvironmentManifestFile(environment_dir)
|
||||
except Exception:
|
||||
# error handling for bad manifests is handled on other code paths
|
||||
return
|
||||
|
||||
includes = manifest[TOP_LEVEL_KEY].get("include", [])
|
||||
for include in includes:
|
||||
if os.path.isabs(include):
|
||||
continue
|
||||
|
||||
abspath = pathlib.Path(os.path.normpath(environment_dir / include))
|
||||
common_path = pathlib.Path(os.path.commonpath([environment_dir, abspath]))
|
||||
if common_path != environment_dir:
|
||||
tty.debug(f"Will not copy relative include from outside environment: {include}")
|
||||
continue
|
||||
|
||||
orig_abspath = os.path.normpath(envfile.parent / include)
|
||||
if not os.path.exists(orig_abspath):
|
||||
tty.warn(f"Included file does not exist; will not copy: '{include}'")
|
||||
continue
|
||||
|
||||
fs.touchp(abspath)
|
||||
shutil.copy(orig_abspath, abspath)
|
||||
|
||||
|
||||
class EnvironmentManifestFile(collections.abc.Mapping):
|
||||
"""Manages the in-memory representation of a manifest file, and its synchronization
|
||||
|
@@ -187,7 +187,7 @@ def path_for_extension(target_name: str, *, paths: List[str]) -> str:
|
||||
if name == target_name:
|
||||
return path
|
||||
else:
|
||||
raise IOError('extension "{0}" not found'.format(target_name))
|
||||
raise OSError('extension "{0}" not found'.format(target_name))
|
||||
|
||||
|
||||
def get_module(cmd_name):
|
||||
|
@@ -427,7 +427,7 @@ def needs_file(spec, file):
|
||||
try:
|
||||
with open(manifest_file, "r", encoding="utf-8") as f:
|
||||
manifest = s_json.load(f)
|
||||
except (OSError, IOError):
|
||||
except OSError:
|
||||
# if we can't load it, assume it doesn't know about the file.
|
||||
manifest = {}
|
||||
return test_path in manifest
|
||||
@@ -831,7 +831,7 @@ def get_spec_from_file(filename):
|
||||
try:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
return spack.spec.Spec.from_yaml(f)
|
||||
except IOError:
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
|
||||
|
@@ -42,10 +42,10 @@
|
||||
import llnl.util.tty.color
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.tengine
|
||||
import spack.traverse
|
||||
from spack.solver.input_analysis import create_graph_analyzer
|
||||
|
||||
|
||||
def find(seq, predicate):
|
||||
@@ -537,10 +537,11 @@ def edge_entry(self, edge):
|
||||
|
||||
def _static_edges(specs, depflag):
|
||||
for spec in specs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
possible = pkg_cls.possible_dependencies(expand_virtuals=True, depflag=depflag)
|
||||
*_, edges = create_graph_analyzer().possible_dependencies(
|
||||
spec.name, expand_virtuals=True, allowed_deps=depflag
|
||||
)
|
||||
|
||||
for parent_name, dependencies in possible.items():
|
||||
for parent_name, dependencies in edges.items():
|
||||
for dependency_name in dependencies:
|
||||
yield spack.spec.DependencySpec(
|
||||
spack.spec.Spec(parent_name),
|
||||
|
@@ -26,7 +26,7 @@ def is_shared_library_elf(filepath):
|
||||
with open(filepath, "rb") as f:
|
||||
elf = parse_elf(f, interpreter=True, dynamic_section=True)
|
||||
return elf.has_pt_dynamic and (elf.has_soname or not elf.has_pt_interp)
|
||||
except (IOError, OSError, ElfParsingError):
|
||||
except (OSError, ElfParsingError):
|
||||
return False
|
||||
|
||||
|
||||
|
@@ -166,7 +166,7 @@ def filter_shebangs_in_directory(directory, filenames=None):
|
||||
# Only look at executable, non-symlink files.
|
||||
try:
|
||||
st = os.lstat(path)
|
||||
except (IOError, OSError):
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if stat.S_ISLNK(st.st_mode) or stat.S_ISDIR(st.st_mode) or not st.st_mode & is_exe:
|
||||
|
@@ -566,7 +566,7 @@ def copy_test_files(pkg: Pb, test_spec: spack.spec.Spec):
|
||||
|
||||
# copy test data into test stage data dir
|
||||
try:
|
||||
pkg_cls = test_spec.package_class
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(test_spec.fullname)
|
||||
except spack.repo.UnknownPackageError:
|
||||
tty.debug(f"{test_spec.name}: skipping test data copy since no package class found")
|
||||
return
|
||||
@@ -623,7 +623,7 @@ def test_functions(
|
||||
vpkgs = virtuals(pkg)
|
||||
for vname in vpkgs:
|
||||
try:
|
||||
classes.append((Spec(vname)).package_class)
|
||||
classes.append(spack.repo.PATH.get_pkg_class(vname))
|
||||
except spack.repo.UnknownPackageError:
|
||||
tty.debug(f"{vname}: virtual does not appear to have a package file")
|
||||
|
||||
@@ -668,7 +668,7 @@ def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool
|
||||
|
||||
# grab test functions associated with the spec, which may be virtual
|
||||
try:
|
||||
tests = test_functions(spec.package_class)
|
||||
tests = test_functions(spack.repo.PATH.get_pkg_class(spec.fullname))
|
||||
except spack.repo.UnknownPackageError:
|
||||
# Some virtuals don't have a package so we don't want to report
|
||||
# them as not having tests when that isn't appropriate.
|
||||
|
@@ -814,7 +814,7 @@ def get_depflags(self, pkg: "spack.package_base.PackageBase") -> int:
|
||||
# Include build dependencies if pkg is going to be built from sources, or
|
||||
# if build deps are explicitly requested.
|
||||
if include_build_deps or not (
|
||||
cache_only or pkg.spec.installed and not pkg.spec.dag_hash() in self.overwrite
|
||||
cache_only or pkg.spec.installed and pkg.spec.dag_hash() not in self.overwrite
|
||||
):
|
||||
depflag |= dt.BUILD
|
||||
if self.run_tests(pkg):
|
||||
@@ -2436,11 +2436,7 @@ def _real_install(self) -> None:
|
||||
# DEBUGGING TIP - to debug this section, insert an IPython
|
||||
# embed here, and run the sections below without log capture
|
||||
log_contextmanager = log_output(
|
||||
log_file,
|
||||
self.echo,
|
||||
True,
|
||||
env=self.unmodified_env,
|
||||
filter_fn=self.filter_fn,
|
||||
log_file, self.echo, True, filter_fn=self.filter_fn
|
||||
)
|
||||
|
||||
with log_contextmanager as logger:
|
||||
|
@@ -163,7 +163,7 @@ def format_help_sections(self, level):
|
||||
# lazily add all commands to the parser when needed.
|
||||
add_all_commands(self)
|
||||
|
||||
"""Print help on subcommands in neatly formatted sections."""
|
||||
# Print help on subcommands in neatly formatted sections.
|
||||
formatter = self._get_formatter()
|
||||
|
||||
# Create a list of subcommand actions. Argparse internals are nasty!
|
||||
@@ -728,7 +728,7 @@ def _compatible_sys_types():
|
||||
with the current host.
|
||||
"""
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = str(host_platform.operating_system("default_os"))
|
||||
host_os = str(host_platform.default_operating_system())
|
||||
host_target = archspec.cpu.host()
|
||||
compatible_targets = [host_target] + host_target.ancestors
|
||||
|
||||
|
@@ -64,7 +64,7 @@ def from_local_path(path: str):
|
||||
@staticmethod
|
||||
def from_url(url: str):
|
||||
"""Create an anonymous mirror by URL. This method validates the URL."""
|
||||
if not urllib.parse.urlparse(url).scheme in supported_url_schemes:
|
||||
if urllib.parse.urlparse(url).scheme not in supported_url_schemes:
|
||||
raise ValueError(
|
||||
f'"{url}" is not a valid mirror URL. '
|
||||
f"Scheme must be one of {supported_url_schemes}."
|
||||
|
@@ -383,6 +383,7 @@ def create_opener():
|
||||
"""Create an opener that can handle OCI authentication."""
|
||||
opener = urllib.request.OpenerDirector()
|
||||
for handler in [
|
||||
urllib.request.ProxyHandler(),
|
||||
urllib.request.UnknownHandler(),
|
||||
urllib.request.HTTPSHandler(context=spack.util.web.ssl_create_default_context()),
|
||||
spack.util.web.SpackHTTPDefaultErrorHandler(),
|
||||
|
@@ -2,31 +2,64 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
# flake8: noqa: F401
|
||||
"""spack.util.package is a set of useful build tools and directives for packages.
|
||||
# flake8: noqa: F401, E402
|
||||
"""spack.package defines the public API for Spack packages, by re-exporting useful symbols from
|
||||
other modules. Packages should import this module, instead of importing from spack.* directly
|
||||
to ensure forward compatibility with future versions of Spack."""
|
||||
|
||||
Everything in this module is automatically imported into Spack package files.
|
||||
"""
|
||||
from os import chdir, environ, getcwd, makedirs, mkdir, remove, removedirs
|
||||
from shutil import move, rmtree
|
||||
|
||||
from spack.error import InstallError, NoHeadersError, NoLibrariesError
|
||||
|
||||
# Emulate some shell commands for convenience
|
||||
env = environ
|
||||
cd = chdir
|
||||
pwd = getcwd
|
||||
|
||||
# import most common types used in packages
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import llnl.util.filesystem
|
||||
from llnl.util.filesystem import *
|
||||
|
||||
class tty:
|
||||
import llnl.util.tty as _tty
|
||||
|
||||
debug = _tty.debug
|
||||
error = _tty.error
|
||||
info = _tty.info
|
||||
msg = _tty.msg
|
||||
warn = _tty.warn
|
||||
|
||||
|
||||
from llnl.util.filesystem import (
|
||||
FileFilter,
|
||||
FileList,
|
||||
HeaderList,
|
||||
LibraryList,
|
||||
ancestor,
|
||||
can_access,
|
||||
change_sed_delimiter,
|
||||
copy,
|
||||
copy_tree,
|
||||
filter_file,
|
||||
find,
|
||||
find_all_headers,
|
||||
find_first,
|
||||
find_headers,
|
||||
find_libraries,
|
||||
find_system_libraries,
|
||||
force_remove,
|
||||
force_symlink,
|
||||
install,
|
||||
install_tree,
|
||||
is_exe,
|
||||
join_path,
|
||||
keep_modification_time,
|
||||
library_extensions,
|
||||
mkdirp,
|
||||
remove_directory_contents,
|
||||
remove_linked_tree,
|
||||
rename,
|
||||
set_executable,
|
||||
set_install_permissions,
|
||||
touch,
|
||||
working_dir,
|
||||
)
|
||||
from llnl.util.symlink import symlink
|
||||
|
||||
import spack.util.executable
|
||||
|
||||
# These props will be overridden when the build env is set up.
|
||||
from spack.build_environment import MakeExecutable
|
||||
from spack.build_systems.aspell_dict import AspellDictPackage
|
||||
from spack.build_systems.autotools import AutotoolsPackage
|
||||
@@ -76,7 +109,24 @@
|
||||
from spack.builder import BaseBuilder
|
||||
from spack.config import determine_number_of_jobs
|
||||
from spack.deptypes import ALL_TYPES as all_deptypes
|
||||
from spack.directives import *
|
||||
from spack.directives import (
|
||||
build_system,
|
||||
can_splice,
|
||||
conditional,
|
||||
conflicts,
|
||||
depends_on,
|
||||
extends,
|
||||
license,
|
||||
maintainers,
|
||||
patch,
|
||||
provides,
|
||||
redistribute,
|
||||
requires,
|
||||
resource,
|
||||
variant,
|
||||
version,
|
||||
)
|
||||
from spack.error import InstallError, NoHeadersError, NoLibrariesError
|
||||
from spack.install_test import (
|
||||
SkipTest,
|
||||
cache_extra_test_sources,
|
||||
@@ -86,26 +136,28 @@
|
||||
install_test_root,
|
||||
test_part,
|
||||
)
|
||||
from spack.installer import ExternalPackageError, InstallLockError, UpstreamPackageError
|
||||
from spack.mixins import filter_compiler_wrappers
|
||||
from spack.multimethod import default_args, when
|
||||
from spack.package_base import (
|
||||
DependencyConflictError,
|
||||
build_system_flags,
|
||||
env_flags,
|
||||
flatten_dependencies,
|
||||
inject_flags,
|
||||
install_dependency_symlinks,
|
||||
on_package_attributes,
|
||||
from spack.package_base import build_system_flags, env_flags, inject_flags, on_package_attributes
|
||||
from spack.package_completions import (
|
||||
bash_completion_path,
|
||||
fish_completion_path,
|
||||
zsh_completion_path,
|
||||
)
|
||||
from spack.package_completions import *
|
||||
from spack.phase_callbacks import run_after, run_before
|
||||
from spack.spec import InvalidSpecDetected, Spec
|
||||
from spack.util.executable import *
|
||||
from spack.spec import Spec
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
from spack.util.executable import Executable, ProcessError, which, which_string
|
||||
from spack.util.filesystem import fix_darwin_install_name
|
||||
from spack.util.prefix import Prefix
|
||||
from spack.variant import any_combination_of, auto_or_any_combination_of, disjoint_sets
|
||||
from spack.version import Version, ver
|
||||
|
||||
# Emulate some shell commands for convenience
|
||||
env = environ
|
||||
cd = chdir
|
||||
pwd = getcwd
|
||||
|
||||
# These are just here for editor support; they may be set when the build env is set up.
|
||||
configure: Executable
|
||||
make_jobs: int
|
||||
|
@@ -22,7 +22,6 @@
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
import typing
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union
|
||||
|
||||
from typing_extensions import Literal
|
||||
@@ -30,7 +29,6 @@
|
||||
import llnl.util.filesystem as fsys
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import classproperty, memoized
|
||||
from llnl.util.link_tree import LinkTree
|
||||
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
@@ -67,10 +65,6 @@
|
||||
]
|
||||
FLAG_HANDLER_TYPE = Callable[[str, Iterable[str]], FLAG_HANDLER_RETURN_TYPE]
|
||||
|
||||
"""Allowed URL schemes for spack packages."""
|
||||
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
|
||||
|
||||
|
||||
#: Filename for the Spack build/install log.
|
||||
_spack_build_logfile = "spack-build-out.txt"
|
||||
|
||||
@@ -702,9 +696,6 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
|
||||
#: Verbosity level, preserved across installs.
|
||||
_verbose = None
|
||||
|
||||
#: index of patches by sha256 sum, built lazily
|
||||
_patches_by_hash = None
|
||||
|
||||
#: Package homepage where users can find more information about the package
|
||||
homepage: Optional[str] = None
|
||||
|
||||
@@ -718,19 +709,6 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
|
||||
#: Do not include @ here in order not to unnecessarily ping the users.
|
||||
maintainers: List[str] = []
|
||||
|
||||
#: List of attributes to be excluded from a package's hash.
|
||||
metadata_attrs = [
|
||||
"homepage",
|
||||
"url",
|
||||
"urls",
|
||||
"list_url",
|
||||
"extendable",
|
||||
"parallel",
|
||||
"make_jobs",
|
||||
"maintainers",
|
||||
"tags",
|
||||
]
|
||||
|
||||
#: Set to ``True`` to indicate the stand-alone test requires a compiler.
|
||||
#: It is used to ensure a compiler and build dependencies like 'cmake'
|
||||
#: are available to build a custom test code.
|
||||
@@ -830,104 +808,6 @@ def get_variant(self, name: str) -> spack.variant.Variant:
|
||||
except StopIteration:
|
||||
raise ValueError(f"No variant '{name}' on spec: {self.spec}")
|
||||
|
||||
@classmethod
|
||||
def possible_dependencies(
|
||||
cls,
|
||||
transitive: bool = True,
|
||||
expand_virtuals: bool = True,
|
||||
depflag: dt.DepFlag = dt.ALL,
|
||||
visited: Optional[dict] = None,
|
||||
missing: Optional[dict] = None,
|
||||
virtuals: Optional[set] = None,
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Return dict of possible dependencies of this package.
|
||||
|
||||
Args:
|
||||
transitive (bool or None): return all transitive dependencies if
|
||||
True, only direct dependencies if False (default True)..
|
||||
expand_virtuals (bool or None): expand virtual dependencies into
|
||||
all possible implementations (default True)
|
||||
depflag: dependency types to consider
|
||||
visited (dict or None): dict of names of dependencies visited so
|
||||
far, mapped to their immediate dependencies' names.
|
||||
missing (dict or None): dict to populate with packages and their
|
||||
*missing* dependencies.
|
||||
virtuals (set): if provided, populate with virtuals seen so far.
|
||||
|
||||
Returns:
|
||||
(dict): dictionary mapping dependency names to *their*
|
||||
immediate dependencies
|
||||
|
||||
Each item in the returned dictionary maps a (potentially
|
||||
transitive) dependency of this package to its possible
|
||||
*immediate* dependencies. If ``expand_virtuals`` is ``False``,
|
||||
virtual package names wil be inserted as keys mapped to empty
|
||||
sets of dependencies. Virtuals, if not expanded, are treated as
|
||||
though they have no immediate dependencies.
|
||||
|
||||
Missing dependencies by default are ignored, but if a
|
||||
missing dict is provided, it will be populated with package names
|
||||
mapped to any dependencies they have that are in no
|
||||
repositories. This is only populated if transitive is True.
|
||||
|
||||
Note: the returned dict *includes* the package itself.
|
||||
|
||||
"""
|
||||
visited = {} if visited is None else visited
|
||||
missing = {} if missing is None else missing
|
||||
|
||||
visited.setdefault(cls.name, set())
|
||||
|
||||
for name, conditions in cls.dependencies_by_name(when=True).items():
|
||||
# check whether this dependency could be of the type asked for
|
||||
depflag_union = 0
|
||||
for deplist in conditions.values():
|
||||
for dep in deplist:
|
||||
depflag_union |= dep.depflag
|
||||
if not (depflag & depflag_union):
|
||||
continue
|
||||
|
||||
# expand virtuals if enabled, otherwise just stop at virtuals
|
||||
if spack.repo.PATH.is_virtual(name):
|
||||
if virtuals is not None:
|
||||
virtuals.add(name)
|
||||
if expand_virtuals:
|
||||
providers = spack.repo.PATH.providers_for(name)
|
||||
dep_names = [spec.name for spec in providers]
|
||||
else:
|
||||
visited.setdefault(cls.name, set()).add(name)
|
||||
visited.setdefault(name, set())
|
||||
continue
|
||||
else:
|
||||
dep_names = [name]
|
||||
|
||||
# add the dependency names to the visited dict
|
||||
visited.setdefault(cls.name, set()).update(set(dep_names))
|
||||
|
||||
# recursively traverse dependencies
|
||||
for dep_name in dep_names:
|
||||
if dep_name in visited:
|
||||
continue
|
||||
|
||||
visited.setdefault(dep_name, set())
|
||||
|
||||
# skip the rest if not transitive
|
||||
if not transitive:
|
||||
continue
|
||||
|
||||
try:
|
||||
dep_cls = spack.repo.PATH.get_pkg_class(dep_name)
|
||||
except spack.repo.UnknownPackageError:
|
||||
# log unknown packages
|
||||
missing.setdefault(cls.name, set()).add(dep_name)
|
||||
continue
|
||||
|
||||
dep_cls.possible_dependencies(
|
||||
transitive, expand_virtuals, depflag, visited, missing, virtuals
|
||||
)
|
||||
|
||||
return visited
|
||||
|
||||
@classproperty
|
||||
def package_dir(cls):
|
||||
"""Directory where the package.py file lives."""
|
||||
@@ -2292,85 +2172,6 @@ def rpath_args(self):
|
||||
build_system_flags = PackageBase.build_system_flags
|
||||
|
||||
|
||||
def install_dependency_symlinks(pkg, spec, prefix):
|
||||
"""
|
||||
Execute a dummy install and flatten dependencies.
|
||||
|
||||
This routine can be used in a ``package.py`` definition by setting
|
||||
``install = install_dependency_symlinks``.
|
||||
|
||||
This feature comes in handy for creating a common location for the
|
||||
the installation of third-party libraries.
|
||||
"""
|
||||
flatten_dependencies(spec, prefix)
|
||||
|
||||
|
||||
def use_cray_compiler_names():
|
||||
"""Compiler names for builds that rely on cray compiler names."""
|
||||
os.environ["CC"] = "cc"
|
||||
os.environ["CXX"] = "CC"
|
||||
os.environ["FC"] = "ftn"
|
||||
os.environ["F77"] = "ftn"
|
||||
|
||||
|
||||
def flatten_dependencies(spec, flat_dir):
|
||||
"""Make each dependency of spec present in dir via symlink."""
|
||||
for dep in spec.traverse(root=False):
|
||||
name = dep.name
|
||||
|
||||
dep_path = spack.store.STORE.layout.path_for_spec(dep)
|
||||
dep_files = LinkTree(dep_path)
|
||||
|
||||
os.mkdir(flat_dir + "/" + name)
|
||||
|
||||
conflict = dep_files.find_conflict(flat_dir + "/" + name)
|
||||
if conflict:
|
||||
raise DependencyConflictError(conflict)
|
||||
|
||||
dep_files.merge(flat_dir + "/" + name)
|
||||
|
||||
|
||||
def possible_dependencies(
|
||||
*pkg_or_spec: Union[str, spack.spec.Spec, typing.Type[PackageBase]],
|
||||
transitive: bool = True,
|
||||
expand_virtuals: bool = True,
|
||||
depflag: dt.DepFlag = dt.ALL,
|
||||
missing: Optional[dict] = None,
|
||||
virtuals: Optional[set] = None,
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Get the possible dependencies of a number of packages.
|
||||
|
||||
See ``PackageBase.possible_dependencies`` for details.
|
||||
"""
|
||||
packages = []
|
||||
for pos in pkg_or_spec:
|
||||
if isinstance(pos, PackageMeta) and issubclass(pos, PackageBase):
|
||||
packages.append(pos)
|
||||
continue
|
||||
|
||||
if not isinstance(pos, spack.spec.Spec):
|
||||
pos = spack.spec.Spec(pos)
|
||||
|
||||
if spack.repo.PATH.is_virtual(pos.name):
|
||||
packages.extend(p.package_class for p in spack.repo.PATH.providers_for(pos.name))
|
||||
continue
|
||||
else:
|
||||
packages.append(pos.package_class)
|
||||
|
||||
visited: Dict[str, Set[str]] = {}
|
||||
for pkg in packages:
|
||||
pkg.possible_dependencies(
|
||||
visited=visited,
|
||||
transitive=transitive,
|
||||
expand_virtuals=expand_virtuals,
|
||||
depflag=depflag,
|
||||
missing=missing,
|
||||
virtuals=virtuals,
|
||||
)
|
||||
|
||||
return visited
|
||||
|
||||
|
||||
def deprecated_version(pkg: PackageBase, version: Union[str, StandardVersion]) -> bool:
|
||||
"""Return True iff the version is deprecated.
|
||||
|
||||
|
@@ -52,8 +52,7 @@ def use_platform(new_platform):
|
||||
|
||||
import spack.config
|
||||
|
||||
msg = '"{0}" must be an instance of Platform'
|
||||
assert isinstance(new_platform, Platform), msg.format(new_platform)
|
||||
assert isinstance(new_platform, Platform), f'"{new_platform}" must be an instance of Platform'
|
||||
|
||||
original_host_fn = host
|
||||
|
||||
|
@@ -1,42 +1,22 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.error
|
||||
|
||||
|
||||
class NoPlatformError(spack.error.SpackError):
|
||||
def __init__(self):
|
||||
msg = "Could not determine a platform for this machine"
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
@llnl.util.lang.lazy_lexicographic_ordering
|
||||
class Platform:
|
||||
"""Platform is an abstract class extended by subclasses.
|
||||
|
||||
To add a new type of platform (such as cray_xe), create a subclass and set all the
|
||||
class attributes such as priority, front_target, back_target, front_os, back_os.
|
||||
|
||||
Platform also contain a priority class attribute. A lower number signifies higher
|
||||
priority. These numbers are arbitrarily set and can be changed though often there
|
||||
isn't much need unless a new platform is added and the user wants that to be
|
||||
detected first.
|
||||
|
||||
Targets are created inside the platform subclasses. Most architecture (like linux,
|
||||
and darwin) will have only one target family (x86_64) but in the case of Cray
|
||||
machines, there is both a frontend and backend processor. The user can specify
|
||||
which targets are present on front-end and back-end architecture.
|
||||
|
||||
Depending on the platform, operating systems are either autodetected or are
|
||||
set. The user can set the frontend and backend operating setting by the class
|
||||
attributes front_os and back_os. The operating system will be responsible for
|
||||
compiler detection.
|
||||
"""
|
||||
|
||||
# Subclass sets number. Controls detection order
|
||||
@@ -45,82 +25,72 @@ class attributes such as priority, front_target, back_target, front_os, back_os.
|
||||
#: binary formats used on this platform; used by relocation logic
|
||||
binary_formats = ["elf"]
|
||||
|
||||
front_end: Optional[str] = None
|
||||
back_end: Optional[str] = None
|
||||
default: Optional[str] = None # The default back end target.
|
||||
|
||||
front_os: Optional[str] = None
|
||||
back_os: Optional[str] = None
|
||||
default_os: Optional[str] = None
|
||||
default: str
|
||||
default_os: str
|
||||
|
||||
reserved_targets = ["default_target", "frontend", "fe", "backend", "be"]
|
||||
reserved_oss = ["default_os", "frontend", "fe", "backend", "be"]
|
||||
deprecated_names = ["frontend", "fe", "backend", "be"]
|
||||
|
||||
def __init__(self, name):
|
||||
self.targets = {}
|
||||
self.operating_sys = {}
|
||||
self.name = name
|
||||
self._init_targets()
|
||||
|
||||
def add_target(self, name: str, target: archspec.cpu.Microarchitecture) -> None:
|
||||
"""Used by the platform specific subclass to list available targets.
|
||||
Raises an error if the platform specifies a name
|
||||
that is reserved by spack as an alias.
|
||||
"""
|
||||
if name in Platform.reserved_targets:
|
||||
msg = "{0} is a spack reserved alias and cannot be the name of a target"
|
||||
raise ValueError(msg.format(name))
|
||||
msg = f"{name} is a spack reserved alias and cannot be the name of a target"
|
||||
raise ValueError(msg)
|
||||
self.targets[name] = target
|
||||
|
||||
def _add_archspec_targets(self):
|
||||
def _init_targets(self):
|
||||
self.default = archspec.cpu.host().name
|
||||
for name, microarchitecture in archspec.cpu.TARGETS.items():
|
||||
self.add_target(name, microarchitecture)
|
||||
|
||||
def target(self, name):
|
||||
"""This is a getter method for the target dictionary
|
||||
that handles defaulting based on the values provided by default,
|
||||
front-end, and back-end. This can be overwritten
|
||||
by a subclass for which we want to provide further aliasing options.
|
||||
"""
|
||||
# TODO: Check if we can avoid using strings here
|
||||
name = str(name)
|
||||
if name == "default_target":
|
||||
if name in Platform.deprecated_names:
|
||||
warnings.warn(f"target={name} is deprecated, use target={self.default} instead")
|
||||
|
||||
if name in Platform.reserved_targets:
|
||||
name = self.default
|
||||
elif name == "frontend" or name == "fe":
|
||||
name = self.front_end
|
||||
elif name == "backend" or name == "be":
|
||||
name = self.back_end
|
||||
|
||||
return self.targets.get(name, None)
|
||||
|
||||
def add_operating_system(self, name, os_class):
|
||||
"""Add the operating_system class object into the
|
||||
platform.operating_sys dictionary.
|
||||
"""
|
||||
if name in Platform.reserved_oss:
|
||||
msg = "{0} is a spack reserved alias and cannot be the name of an OS"
|
||||
raise ValueError(msg.format(name))
|
||||
if name in Platform.reserved_oss + Platform.deprecated_names:
|
||||
msg = f"{name} is a spack reserved alias and cannot be the name of an OS"
|
||||
raise ValueError(msg)
|
||||
self.operating_sys[name] = os_class
|
||||
|
||||
def default_target(self):
|
||||
return self.target(self.default)
|
||||
|
||||
def default_operating_system(self):
|
||||
return self.operating_system(self.default_os)
|
||||
|
||||
def operating_system(self, name):
|
||||
if name == "default_os":
|
||||
if name in Platform.deprecated_names:
|
||||
warnings.warn(f"os={name} is deprecated, use os={self.default_os} instead")
|
||||
|
||||
if name in Platform.reserved_oss:
|
||||
name = self.default_os
|
||||
if name == "frontend" or name == "fe":
|
||||
name = self.front_os
|
||||
if name == "backend" or name == "be":
|
||||
name = self.back_os
|
||||
|
||||
return self.operating_sys.get(name, None)
|
||||
|
||||
def setup_platform_environment(self, pkg, env):
|
||||
"""Subclass can override this method if it requires any
|
||||
platform-specific build environment modifications.
|
||||
"""Platform-specific build environment modifications.
|
||||
|
||||
This method is meant toi be overridden by subclasses, when needed.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
"""Return True if the the host platform is detected to be the current
|
||||
Platform class, False otherwise.
|
||||
"""Returns True if the host platform is detected to be the current Platform class,
|
||||
False otherwise.
|
||||
|
||||
Derived classes are responsible for implementing this method.
|
||||
"""
|
||||
@@ -135,11 +105,7 @@ def __str__(self):
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.default
|
||||
yield self.front_end
|
||||
yield self.back_end
|
||||
yield self.default_os
|
||||
yield self.front_os
|
||||
yield self.back_os
|
||||
|
||||
def targets():
|
||||
for t in sorted(self.targets.values()):
|
||||
|
@@ -4,8 +4,6 @@
|
||||
|
||||
import platform as py_platform
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.operating_systems.mac_os import MacOs
|
||||
from spack.version import Version
|
||||
|
||||
@@ -19,18 +17,8 @@ class Darwin(Platform):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("darwin")
|
||||
self._add_archspec_targets()
|
||||
|
||||
self.default = archspec.cpu.host().name
|
||||
self.front_end = self.default
|
||||
self.back_end = self.default
|
||||
|
||||
mac_os = MacOs()
|
||||
|
||||
self.default_os = str(mac_os)
|
||||
self.front_os = str(mac_os)
|
||||
self.back_os = str(mac_os)
|
||||
|
||||
self.add_operating_system(str(mac_os), mac_os)
|
||||
|
||||
@classmethod
|
||||
|
@@ -3,8 +3,6 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import platform
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.operating_systems.freebsd import FreeBSDOs
|
||||
|
||||
from ._platform import Platform
|
||||
@@ -15,18 +13,8 @@ class FreeBSD(Platform):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("freebsd")
|
||||
|
||||
self._add_archspec_targets()
|
||||
|
||||
# Get specific default
|
||||
self.default = archspec.cpu.host().name
|
||||
self.front_end = self.default
|
||||
self.back_end = self.default
|
||||
|
||||
os = FreeBSDOs()
|
||||
self.default_os = str(os)
|
||||
self.front_os = self.default_os
|
||||
self.back_os = self.default_os
|
||||
self.add_operating_system(str(os), os)
|
||||
|
||||
@classmethod
|
||||
|
@@ -3,8 +3,6 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import platform
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.operating_systems.linux_distro import LinuxDistro
|
||||
|
||||
from ._platform import Platform
|
||||
@@ -15,18 +13,8 @@ class Linux(Platform):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("linux")
|
||||
|
||||
self._add_archspec_targets()
|
||||
|
||||
# Get specific default
|
||||
self.default = archspec.cpu.host().name
|
||||
self.front_end = self.default
|
||||
self.back_end = self.default
|
||||
|
||||
linux_dist = LinuxDistro()
|
||||
self.default_os = str(linux_dist)
|
||||
self.front_os = self.default_os
|
||||
self.back_os = self.default_os
|
||||
self.add_operating_system(str(linux_dist), linux_dist)
|
||||
|
||||
@classmethod
|
||||
|
@@ -16,31 +16,19 @@ class Test(Platform):
|
||||
if platform.system().lower() == "darwin":
|
||||
binary_formats = ["macho"]
|
||||
|
||||
if platform.machine() == "arm64":
|
||||
front_end = "aarch64"
|
||||
back_end = "m1"
|
||||
default = "m1"
|
||||
else:
|
||||
front_end = "x86_64"
|
||||
back_end = "core2"
|
||||
default = "core2"
|
||||
|
||||
front_os = "redhat6"
|
||||
back_os = "debian6"
|
||||
default_os = "debian6"
|
||||
default = "m1" if platform.machine() == "arm64" else "core2"
|
||||
|
||||
def __init__(self, name=None):
|
||||
name = name or "test"
|
||||
super().__init__(name)
|
||||
self.add_target(self.default, archspec.cpu.TARGETS[self.default])
|
||||
self.add_target(self.front_end, archspec.cpu.TARGETS[self.front_end])
|
||||
self.add_operating_system("debian6", spack.operating_systems.OperatingSystem("debian", 6))
|
||||
self.add_operating_system("redhat6", spack.operating_systems.OperatingSystem("redhat", 6))
|
||||
|
||||
self.add_operating_system(
|
||||
self.default_os, spack.operating_systems.OperatingSystem("debian", 6)
|
||||
)
|
||||
self.add_operating_system(
|
||||
self.front_os, spack.operating_systems.OperatingSystem("redhat", 6)
|
||||
)
|
||||
def _init_targets(self):
|
||||
targets = ("aarch64", "m1") if platform.machine() == "arm64" else ("x86_64", "core2")
|
||||
for t in targets:
|
||||
self.add_target(t, archspec.cpu.TARGETS[t])
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
|
@@ -4,8 +4,6 @@
|
||||
|
||||
import platform
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.operating_systems.windows_os import WindowsOs
|
||||
|
||||
from ._platform import Platform
|
||||
@@ -16,18 +14,8 @@ class Windows(Platform):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("windows")
|
||||
self._add_archspec_targets()
|
||||
|
||||
self.default = archspec.cpu.host().name
|
||||
self.front_end = self.default
|
||||
self.back_end = self.default
|
||||
|
||||
windows_os = WindowsOs()
|
||||
|
||||
self.default_os = str(windows_os)
|
||||
self.front_os = str(windows_os)
|
||||
self.back_os = str(windows_os)
|
||||
|
||||
self.add_operating_system(str(windows_os), windows_os)
|
||||
|
||||
@classmethod
|
||||
|
@@ -236,22 +236,15 @@ def relocate_elf_binaries(binaries: Iterable[str], prefix_to_prefix: Dict[str, s
|
||||
_set_elf_rpaths_and_interpreter(path, rpaths=rpaths, interpreter=interpreter)
|
||||
|
||||
|
||||
def _warn_if_link_cant_be_relocated(link: str, target: str):
|
||||
if not os.path.isabs(target):
|
||||
return
|
||||
tty.warn(f'Symbolic link at "{link}" to "{target}" cannot be relocated')
|
||||
|
||||
|
||||
def relocate_links(links: Iterable[str], prefix_to_prefix: Dict[str, str]) -> None:
|
||||
"""Relocate links to a new install prefix."""
|
||||
regex = re.compile("|".join(re.escape(p) for p in prefix_to_prefix.keys()))
|
||||
for link in links:
|
||||
old_target = readlink(link)
|
||||
if not os.path.isabs(old_target):
|
||||
continue
|
||||
match = regex.match(old_target)
|
||||
|
||||
# No match.
|
||||
if match is None:
|
||||
_warn_if_link_cant_be_relocated(link, old_target)
|
||||
continue
|
||||
|
||||
new_target = prefix_to_prefix[match.group()] + old_target[match.end() :]
|
||||
|
@@ -1041,7 +1041,7 @@ def _read_config(self) -> Dict[str, str]:
|
||||
|
||||
return yaml_data["repo"]
|
||||
|
||||
except IOError:
|
||||
except OSError:
|
||||
tty.die(f"Error reading {self.config_file} when opening {self.root}")
|
||||
|
||||
def get(self, spec: "spack.spec.Spec") -> "spack.package_base.PackageBase":
|
||||
@@ -1369,7 +1369,7 @@ def create_repo(root, namespace=None, subdir=packages_dir_name):
|
||||
if subdir != packages_dir_name:
|
||||
config.write(f" subdirectory: '{subdir}'\n")
|
||||
|
||||
except (IOError, OSError) as e:
|
||||
except OSError as e:
|
||||
# try to clean up.
|
||||
if existed:
|
||||
shutil.rmtree(config_path, ignore_errors=True)
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import codecs
|
||||
import collections
|
||||
import hashlib
|
||||
import os
|
||||
@@ -13,7 +14,7 @@
|
||||
import xml.sax.saxutils
|
||||
from typing import Dict, Optional
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import HTTPSHandler, Request, build_opener
|
||||
from urllib.request import Request
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import working_dir
|
||||
@@ -24,10 +25,10 @@
|
||||
import spack.spec
|
||||
import spack.tengine
|
||||
import spack.util.git
|
||||
import spack.util.web as web_util
|
||||
from spack.error import SpackError
|
||||
from spack.util.crypto import checksum
|
||||
from spack.util.log_parse import parse_log_events
|
||||
from spack.util.web import ssl_create_default_context
|
||||
|
||||
from .base import Reporter
|
||||
from .extract import extract_test_parts
|
||||
@@ -106,7 +107,7 @@ def __init__(self, configuration: CDashConfiguration):
|
||||
self.site = configuration.site or socket.gethostname()
|
||||
self.osname = platform.system()
|
||||
self.osrelease = platform.release()
|
||||
self.target = spack.platforms.host().target("default_target")
|
||||
self.target = spack.platforms.host().default_target()
|
||||
self.starttime = int(time.time())
|
||||
self.endtime = self.starttime
|
||||
self.buildstamp = (
|
||||
@@ -433,7 +434,6 @@ def upload(self, filename):
|
||||
# Compute md5 checksum for the contents of this file.
|
||||
md5sum = checksum(hashlib.md5, filename, block_size=8192)
|
||||
|
||||
opener = build_opener(HTTPSHandler(context=ssl_create_default_context()))
|
||||
with open(filename, "rb") as f:
|
||||
params_dict = {
|
||||
"build": self.buildname,
|
||||
@@ -443,26 +443,21 @@ def upload(self, filename):
|
||||
}
|
||||
encoded_params = urlencode(params_dict)
|
||||
url = "{0}&{1}".format(self.cdash_upload_url, encoded_params)
|
||||
request = Request(url, data=f)
|
||||
request = Request(url, data=f, method="PUT")
|
||||
request.add_header("Content-Type", "text/xml")
|
||||
request.add_header("Content-Length", os.path.getsize(filename))
|
||||
if self.authtoken:
|
||||
request.add_header("Authorization", "Bearer {0}".format(self.authtoken))
|
||||
try:
|
||||
# By default, urllib2 only support GET and POST.
|
||||
# CDash expects this file to be uploaded via PUT.
|
||||
request.get_method = lambda: "PUT"
|
||||
response = opener.open(request, timeout=SPACK_CDASH_TIMEOUT)
|
||||
response = web_util.urlopen(request, timeout=SPACK_CDASH_TIMEOUT)
|
||||
if self.current_package_name not in self.buildIds:
|
||||
resp_value = response.read()
|
||||
if isinstance(resp_value, bytes):
|
||||
resp_value = resp_value.decode("utf-8")
|
||||
resp_value = codecs.getreader("utf-8")(response).read()
|
||||
match = self.buildid_regexp.search(resp_value)
|
||||
if match:
|
||||
buildid = match.group(1)
|
||||
self.buildIds[self.current_package_name] = buildid
|
||||
except Exception as e:
|
||||
print("Upload to CDash failed: {0}".format(e))
|
||||
print(f"Upload to CDash failed: {e}")
|
||||
|
||||
def finalize_report(self):
|
||||
if self.buildIds:
|
||||
|
@@ -87,6 +87,7 @@
|
||||
"strategy": {"type": "string", "enum": ["none", "minimal", "full"]}
|
||||
},
|
||||
},
|
||||
"static_analysis": {"type": "boolean"},
|
||||
"timeout": {"type": "integer", "minimum": 0},
|
||||
"error_on_timeout": {"type": "boolean"},
|
||||
"os_compatible": {"type": "object", "additionalProperties": {"type": "array"}},
|
||||
|
@@ -62,7 +62,7 @@
|
||||
parse_files,
|
||||
parse_term,
|
||||
)
|
||||
from .counter import FullDuplicatesCounter, MinimalDuplicatesCounter, NoDuplicatesCounter
|
||||
from .input_analysis import create_counter, create_graph_analyzer
|
||||
from .requirements import RequirementKind, RequirementParser, RequirementRule
|
||||
from .version_order import concretization_version_order
|
||||
|
||||
@@ -271,15 +271,6 @@ def remove_node(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunc
|
||||
return list(filter(lambda x: x.args[0] not in ("node", "virtual_node"), facts))
|
||||
|
||||
|
||||
def _create_counter(specs: List[spack.spec.Spec], tests: bool):
|
||||
strategy = spack.config.CONFIG.get("concretizer:duplicates:strategy", "none")
|
||||
if strategy == "full":
|
||||
return FullDuplicatesCounter(specs, tests=tests)
|
||||
if strategy == "minimal":
|
||||
return MinimalDuplicatesCounter(specs, tests=tests)
|
||||
return NoDuplicatesCounter(specs, tests=tests)
|
||||
|
||||
|
||||
def all_libcs() -> Set[spack.spec.Spec]:
|
||||
"""Return a set of all libc specs targeted by any configured compiler. If none, fall back to
|
||||
libc determined from the current Python process if dynamically linked."""
|
||||
@@ -1121,6 +1112,8 @@ class SpackSolverSetup:
|
||||
"""Class to set up and run a Spack concretization solve."""
|
||||
|
||||
def __init__(self, tests: bool = False):
|
||||
self.possible_graph = create_graph_analyzer()
|
||||
|
||||
# these are all initialized in setup()
|
||||
self.gen: "ProblemInstanceBuilder" = ProblemInstanceBuilder()
|
||||
self.requirement_parser = RequirementParser(spack.config.CONFIG)
|
||||
@@ -2397,38 +2390,20 @@ def keyfun(os):
|
||||
|
||||
def target_defaults(self, specs):
|
||||
"""Add facts about targets and target compatibility."""
|
||||
self.gen.h2("Default target")
|
||||
|
||||
platform = spack.platforms.host()
|
||||
uarch = archspec.cpu.TARGETS.get(platform.default)
|
||||
|
||||
self.gen.h2("Target compatibility")
|
||||
|
||||
# Construct the list of targets which are compatible with the host
|
||||
candidate_targets = [uarch] + uarch.ancestors
|
||||
|
||||
# Get configuration options
|
||||
granularity = spack.config.get("concretizer:targets:granularity")
|
||||
host_compatible = spack.config.get("concretizer:targets:host_compatible")
|
||||
|
||||
# Add targets which are not compatible with the current host
|
||||
if not host_compatible:
|
||||
additional_targets_in_family = sorted(
|
||||
[
|
||||
t
|
||||
for t in archspec.cpu.TARGETS.values()
|
||||
if (t.family.name == uarch.family.name and t not in candidate_targets)
|
||||
],
|
||||
key=lambda x: len(x.ancestors),
|
||||
reverse=True,
|
||||
)
|
||||
candidate_targets += additional_targets_in_family
|
||||
|
||||
# Check if we want only generic architecture
|
||||
if granularity == "generic":
|
||||
candidate_targets = [t for t in candidate_targets if t.vendor == "generic"]
|
||||
|
||||
# Add targets explicitly requested from specs
|
||||
candidate_targets = []
|
||||
for x in self.possible_graph.candidate_targets():
|
||||
if all(
|
||||
self.possible_graph.unreachable(pkg_name=pkg_name, when_spec=f"target={x}")
|
||||
for pkg_name in self.pkgs
|
||||
):
|
||||
tty.debug(f"[{__name__}] excluding target={x}, cause no package can use it")
|
||||
continue
|
||||
candidate_targets.append(x)
|
||||
|
||||
host_compatible = spack.config.CONFIG.get("concretizer:targets:host_compatible")
|
||||
for spec in specs:
|
||||
if not spec.architecture or not spec.architecture.target:
|
||||
continue
|
||||
@@ -2444,6 +2419,8 @@ def target_defaults(self, specs):
|
||||
if ancestor not in candidate_targets:
|
||||
candidate_targets.append(ancestor)
|
||||
|
||||
platform = spack.platforms.host()
|
||||
uarch = archspec.cpu.TARGETS.get(platform.default)
|
||||
best_targets = {uarch.family.name}
|
||||
for compiler_id, known_compiler in enumerate(self.possible_compilers):
|
||||
if not known_compiler.available:
|
||||
@@ -2501,7 +2478,6 @@ def target_defaults(self, specs):
|
||||
self.gen.newline()
|
||||
|
||||
self.default_targets = list(sorted(set(self.default_targets)))
|
||||
|
||||
self.target_preferences()
|
||||
|
||||
def virtual_providers(self):
|
||||
@@ -2605,7 +2581,14 @@ def define_variant_values(self):
|
||||
# Tell the concretizer about possible values from specs seen in spec_clauses().
|
||||
# We might want to order these facts by pkg and name if we are debugging.
|
||||
for pkg_name, variant_def_id, value in self.variant_values_from_specs:
|
||||
vid = self.variant_ids_by_def_id[variant_def_id]
|
||||
try:
|
||||
vid = self.variant_ids_by_def_id[variant_def_id]
|
||||
except KeyError:
|
||||
tty.debug(
|
||||
f"[{__name__}] cannot retrieve id of the {value} variant from {pkg_name}"
|
||||
)
|
||||
continue
|
||||
|
||||
self.gen.fact(fn.pkg_fact(pkg_name, fn.variant_possible_value(vid, value)))
|
||||
|
||||
def register_concrete_spec(self, spec, possible):
|
||||
@@ -2676,7 +2659,7 @@ def setup(
|
||||
"""
|
||||
check_packages_exist(specs)
|
||||
|
||||
node_counter = _create_counter(specs, tests=self.tests)
|
||||
node_counter = create_counter(specs, tests=self.tests, possible_graph=self.possible_graph)
|
||||
self.possible_virtuals = node_counter.possible_virtuals()
|
||||
self.pkgs = node_counter.possible_dependencies()
|
||||
self.libcs = sorted(all_libcs()) # type: ignore[type-var]
|
||||
@@ -3489,7 +3472,7 @@ def external_spec_selected(self, node, idx):
|
||||
self._specs[node].extra_attributes = spec_info.get("extra_attributes", {})
|
||||
|
||||
# If this is an extension, update the dependencies to include the extendee
|
||||
package = self._specs[node].package_class(self._specs[node])
|
||||
package = spack.repo.PATH.get_pkg_class(self._specs[node].fullname)(self._specs[node])
|
||||
extendee_spec = package.extendee_spec
|
||||
|
||||
if extendee_spec:
|
||||
|
@@ -1,179 +0,0 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import collections
|
||||
from typing import List, Set
|
||||
|
||||
from llnl.util import lang
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.package_base
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
PossibleDependencies = Set[str]
|
||||
|
||||
|
||||
class Counter:
|
||||
"""Computes the possible packages and the maximum number of duplicates
|
||||
allowed for each of them.
|
||||
|
||||
Args:
|
||||
specs: abstract specs to concretize
|
||||
tests: if True, add test dependencies to the list of possible packages
|
||||
"""
|
||||
|
||||
def __init__(self, specs: List["spack.spec.Spec"], tests: bool) -> None:
|
||||
runtime_pkgs = spack.repo.PATH.packages_with_tags("runtime")
|
||||
runtime_virtuals = set()
|
||||
for x in runtime_pkgs:
|
||||
pkg_class = spack.repo.PATH.get_pkg_class(x)
|
||||
runtime_virtuals.update(pkg_class.provided_virtual_names())
|
||||
|
||||
self.specs = specs + [spack.spec.Spec(x) for x in runtime_pkgs]
|
||||
|
||||
self.link_run_types: dt.DepFlag = dt.LINK | dt.RUN | dt.TEST
|
||||
self.all_types: dt.DepFlag = dt.ALL
|
||||
if not tests:
|
||||
self.link_run_types = dt.LINK | dt.RUN
|
||||
self.all_types = dt.LINK | dt.RUN | dt.BUILD
|
||||
|
||||
self._possible_dependencies: PossibleDependencies = set()
|
||||
self._possible_virtuals: Set[str] = (
|
||||
set(x.name for x in specs if x.virtual) | runtime_virtuals
|
||||
)
|
||||
|
||||
def possible_dependencies(self) -> PossibleDependencies:
|
||||
"""Returns the list of possible dependencies"""
|
||||
self.ensure_cache_values()
|
||||
return self._possible_dependencies
|
||||
|
||||
def possible_virtuals(self) -> Set[str]:
|
||||
"""Returns the list of possible virtuals"""
|
||||
self.ensure_cache_values()
|
||||
return self._possible_virtuals
|
||||
|
||||
def ensure_cache_values(self) -> None:
|
||||
"""Ensure the cache values have been computed"""
|
||||
if self._possible_dependencies:
|
||||
return
|
||||
self._compute_cache_values()
|
||||
|
||||
def possible_packages_facts(self, gen: "spack.solver.asp.PyclingoDriver", fn) -> None:
|
||||
"""Emit facts associated with the possible packages"""
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
||||
def _compute_cache_values(self):
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
||||
|
||||
class NoDuplicatesCounter(Counter):
|
||||
def _compute_cache_values(self):
|
||||
result = spack.package_base.possible_dependencies(
|
||||
*self.specs, virtuals=self._possible_virtuals, depflag=self.all_types
|
||||
)
|
||||
self._possible_dependencies = set(result)
|
||||
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
gen.h2("Maximum number of nodes (packages)")
|
||||
for package_name in sorted(self.possible_dependencies()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
gen.h2("Maximum number of nodes (virtual packages)")
|
||||
for package_name in sorted(self.possible_virtuals()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self.possible_dependencies()):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
|
||||
class MinimalDuplicatesCounter(NoDuplicatesCounter):
|
||||
def __init__(self, specs, tests):
|
||||
super().__init__(specs, tests)
|
||||
self._link_run: PossibleDependencies = set()
|
||||
self._direct_build: PossibleDependencies = set()
|
||||
self._total_build: PossibleDependencies = set()
|
||||
self._link_run_virtuals: Set[str] = set()
|
||||
|
||||
def _compute_cache_values(self):
|
||||
self._link_run = set(
|
||||
spack.package_base.possible_dependencies(
|
||||
*self.specs, virtuals=self._possible_virtuals, depflag=self.link_run_types
|
||||
)
|
||||
)
|
||||
self._link_run_virtuals.update(self._possible_virtuals)
|
||||
for x in self._link_run:
|
||||
build_dependencies = spack.repo.PATH.get_pkg_class(x).dependencies_of_type(dt.BUILD)
|
||||
virtuals, reals = lang.stable_partition(
|
||||
build_dependencies, spack.repo.PATH.is_virtual_safe
|
||||
)
|
||||
|
||||
self._possible_virtuals.update(virtuals)
|
||||
for virtual_dep in virtuals:
|
||||
providers = spack.repo.PATH.providers_for(virtual_dep)
|
||||
self._direct_build.update(str(x) for x in providers)
|
||||
|
||||
self._direct_build.update(reals)
|
||||
|
||||
self._total_build = set(
|
||||
spack.package_base.possible_dependencies(
|
||||
*self._direct_build, virtuals=self._possible_virtuals, depflag=self.all_types
|
||||
)
|
||||
)
|
||||
self._possible_dependencies = set(self._link_run) | set(self._total_build)
|
||||
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
gen.h2("Packages with at most a single node")
|
||||
for package_name in sorted(self.possible_dependencies() - build_tools):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Packages with at multiple possible nodes (build-tools)")
|
||||
for package_name in sorted(self.possible_dependencies() & build_tools):
|
||||
gen.fact(fn.max_dupes(package_name, 2))
|
||||
gen.fact(fn.multiple_unification_sets(package_name))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Maximum number of nodes (virtual packages)")
|
||||
for package_name in sorted(self.possible_virtuals()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self._link_run):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
|
||||
class FullDuplicatesCounter(MinimalDuplicatesCounter):
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
counter = collections.Counter(
|
||||
list(self._link_run) + list(self._total_build) + list(self._direct_build)
|
||||
)
|
||||
gen.h2("Maximum number of nodes")
|
||||
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
|
||||
count = min(count, 2)
|
||||
gen.fact(fn.max_dupes(pkg, count))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Build unification sets ")
|
||||
for name in sorted(self.possible_dependencies() & build_tools):
|
||||
gen.fact(fn.multiple_unification_sets(name))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self._link_run):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
counter = collections.Counter(
|
||||
list(self._link_run_virtuals) + list(self._possible_virtuals)
|
||||
)
|
||||
gen.h2("Maximum number of virtual nodes")
|
||||
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
|
||||
gen.fact(fn.max_dupes(pkg, count))
|
||||
gen.newline()
|
524
lib/spack/spack/solver/input_analysis.py
Normal file
524
lib/spack/spack/solver/input_analysis.py
Normal file
@@ -0,0 +1,524 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Classes to analyze the input of a solve, and provide information to set up the ASP problem"""
|
||||
import collections
|
||||
from typing import Dict, List, NamedTuple, Set, Tuple, Union
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from llnl.util import lang, tty
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.deptypes as dt
|
||||
import spack.platforms
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
from spack.error import SpackError
|
||||
|
||||
RUNTIME_TAG = "runtime"
|
||||
|
||||
|
||||
class PossibleGraph(NamedTuple):
|
||||
real_pkgs: Set[str]
|
||||
virtuals: Set[str]
|
||||
edges: Dict[str, Set[str]]
|
||||
|
||||
|
||||
class PossibleDependencyGraph:
|
||||
"""Returns information needed to set up an ASP problem"""
|
||||
|
||||
def unreachable(self, *, pkg_name: str, when_spec: spack.spec.Spec) -> bool:
|
||||
"""Returns true if the context can determine that the condition cannot ever
|
||||
be met on pkg_name.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def candidate_targets(self) -> List[archspec.cpu.Microarchitecture]:
|
||||
"""Returns a list of targets that are candidate for concretization"""
|
||||
raise NotImplementedError
|
||||
|
||||
def possible_dependencies(
|
||||
self,
|
||||
*specs: Union[spack.spec.Spec, str],
|
||||
allowed_deps: dt.DepFlag,
|
||||
transitive: bool = True,
|
||||
strict_depflag: bool = False,
|
||||
expand_virtuals: bool = True,
|
||||
) -> PossibleGraph:
|
||||
"""Returns the set of possible dependencies, and the set of possible virtuals.
|
||||
|
||||
Both sets always include runtime packages, which may be injected by compilers.
|
||||
|
||||
Args:
|
||||
transitive: return transitive dependencies if True, only direct dependencies if False
|
||||
allowed_deps: dependency types to consider
|
||||
strict_depflag: if True, only the specific dep type is considered, if False any
|
||||
deptype that intersects with allowed deptype is considered
|
||||
expand_virtuals: expand virtual dependencies into all possible implementations
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class NoStaticAnalysis(PossibleDependencyGraph):
|
||||
"""Implementation that tries to minimize the setup time (i.e. defaults to give fast
|
||||
answers), rather than trying to reduce the ASP problem size with more complex analysis.
|
||||
"""
|
||||
|
||||
def __init__(self, *, configuration: spack.config.Configuration, repo: spack.repo.RepoPath):
|
||||
self.configuration = configuration
|
||||
self.repo = repo
|
||||
self.runtime_pkgs = set(self.repo.packages_with_tags(RUNTIME_TAG))
|
||||
self.runtime_virtuals = set()
|
||||
self._platform_condition = spack.spec.Spec(
|
||||
f"platform={spack.platforms.host()} target={archspec.cpu.host().family}:"
|
||||
)
|
||||
for x in self.runtime_pkgs:
|
||||
pkg_class = self.repo.get_pkg_class(x)
|
||||
self.runtime_virtuals.update(pkg_class.provided_virtual_names())
|
||||
|
||||
try:
|
||||
self.libc_pkgs = [x.name for x in self.providers_for("libc")]
|
||||
except spack.repo.UnknownPackageError:
|
||||
self.libc_pkgs = []
|
||||
|
||||
def is_virtual(self, name: str) -> bool:
|
||||
return self.repo.is_virtual(name)
|
||||
|
||||
@lang.memoized
|
||||
def is_allowed_on_this_platform(self, *, pkg_name: str) -> bool:
|
||||
"""Returns true if a package is allowed on the current host"""
|
||||
pkg_cls = self.repo.get_pkg_class(pkg_name)
|
||||
for when_spec, conditions in pkg_cls.requirements.items():
|
||||
if not when_spec.intersects(self._platform_condition):
|
||||
continue
|
||||
for requirements, _, _ in conditions:
|
||||
if not any(x.intersects(self._platform_condition) for x in requirements):
|
||||
tty.debug(f"[{__name__}] {pkg_name} is not for this platform")
|
||||
return False
|
||||
return True
|
||||
|
||||
def providers_for(self, virtual_str: str) -> List[spack.spec.Spec]:
|
||||
"""Returns a list of possible providers for the virtual string in input."""
|
||||
return self.repo.providers_for(virtual_str)
|
||||
|
||||
def can_be_installed(self, *, pkg_name) -> bool:
|
||||
"""Returns True if a package can be installed, False otherwise."""
|
||||
return True
|
||||
|
||||
def unreachable(self, *, pkg_name: str, when_spec: spack.spec.Spec) -> bool:
|
||||
"""Returns true if the context can determine that the condition cannot ever
|
||||
be met on pkg_name.
|
||||
"""
|
||||
return False
|
||||
|
||||
def candidate_targets(self) -> List[archspec.cpu.Microarchitecture]:
|
||||
"""Returns a list of targets that are candidate for concretization"""
|
||||
platform = spack.platforms.host()
|
||||
default_target = archspec.cpu.TARGETS[platform.default]
|
||||
|
||||
# Construct the list of targets which are compatible with the host
|
||||
candidate_targets = [default_target] + default_target.ancestors
|
||||
granularity = self.configuration.get("concretizer:targets:granularity")
|
||||
host_compatible = self.configuration.get("concretizer:targets:host_compatible")
|
||||
|
||||
# Add targets which are not compatible with the current host
|
||||
if not host_compatible:
|
||||
additional_targets_in_family = sorted(
|
||||
[
|
||||
t
|
||||
for t in archspec.cpu.TARGETS.values()
|
||||
if (t.family.name == default_target.family.name and t not in candidate_targets)
|
||||
],
|
||||
key=lambda x: len(x.ancestors),
|
||||
reverse=True,
|
||||
)
|
||||
candidate_targets += additional_targets_in_family
|
||||
|
||||
# Check if we want only generic architecture
|
||||
if granularity == "generic":
|
||||
candidate_targets = [t for t in candidate_targets if t.vendor == "generic"]
|
||||
|
||||
return candidate_targets
|
||||
|
||||
def possible_dependencies(
|
||||
self,
|
||||
*specs: Union[spack.spec.Spec, str],
|
||||
allowed_deps: dt.DepFlag,
|
||||
transitive: bool = True,
|
||||
strict_depflag: bool = False,
|
||||
expand_virtuals: bool = True,
|
||||
) -> PossibleGraph:
|
||||
stack = [x for x in self._package_list(specs)]
|
||||
virtuals: Set[str] = set()
|
||||
edges: Dict[str, Set[str]] = {}
|
||||
|
||||
while stack:
|
||||
pkg_name = stack.pop()
|
||||
|
||||
if pkg_name in edges:
|
||||
continue
|
||||
|
||||
edges[pkg_name] = set()
|
||||
|
||||
# Since libc is not buildable, there is no need to extend the
|
||||
# search space with libc dependencies.
|
||||
if pkg_name in self.libc_pkgs:
|
||||
continue
|
||||
|
||||
pkg_cls = self.repo.get_pkg_class(pkg_name=pkg_name)
|
||||
for name, conditions in pkg_cls.dependencies_by_name(when=True).items():
|
||||
if all(self.unreachable(pkg_name=pkg_name, when_spec=x) for x in conditions):
|
||||
tty.debug(
|
||||
f"[{__name__}] Not adding {name} as a dep of {pkg_name}, because "
|
||||
f"conditions cannot be met"
|
||||
)
|
||||
continue
|
||||
|
||||
if not self._has_deptypes(
|
||||
conditions, allowed_deps=allowed_deps, strict=strict_depflag
|
||||
):
|
||||
continue
|
||||
|
||||
if name in virtuals:
|
||||
continue
|
||||
|
||||
dep_names = set()
|
||||
if self.is_virtual(name):
|
||||
virtuals.add(name)
|
||||
if expand_virtuals:
|
||||
providers = self.providers_for(name)
|
||||
dep_names = {spec.name for spec in providers}
|
||||
else:
|
||||
dep_names = {name}
|
||||
|
||||
edges[pkg_name].update(dep_names)
|
||||
|
||||
if not transitive:
|
||||
continue
|
||||
|
||||
for dep_name in dep_names:
|
||||
if dep_name in edges:
|
||||
continue
|
||||
|
||||
if not self._is_possible(pkg_name=dep_name):
|
||||
continue
|
||||
|
||||
stack.append(dep_name)
|
||||
|
||||
real_packages = set(edges)
|
||||
if not transitive:
|
||||
# We exit early, so add children from the edges information
|
||||
for root, children in edges.items():
|
||||
real_packages.update(x for x in children if self._is_possible(pkg_name=x))
|
||||
|
||||
virtuals.update(self.runtime_virtuals)
|
||||
real_packages = real_packages | self.runtime_pkgs
|
||||
return PossibleGraph(real_pkgs=real_packages, virtuals=virtuals, edges=edges)
|
||||
|
||||
def _package_list(self, specs: Tuple[Union[spack.spec.Spec, str], ...]) -> List[str]:
|
||||
stack = []
|
||||
for current_spec in specs:
|
||||
if isinstance(current_spec, str):
|
||||
current_spec = spack.spec.Spec(current_spec)
|
||||
|
||||
if self.repo.is_virtual(current_spec.name):
|
||||
stack.extend([p.name for p in self.providers_for(current_spec.name)])
|
||||
continue
|
||||
|
||||
stack.append(current_spec.name)
|
||||
return sorted(set(stack))
|
||||
|
||||
def _has_deptypes(self, dependencies, *, allowed_deps: dt.DepFlag, strict: bool) -> bool:
|
||||
if strict is True:
|
||||
return any(
|
||||
dep.depflag == allowed_deps for deplist in dependencies.values() for dep in deplist
|
||||
)
|
||||
return any(
|
||||
dep.depflag & allowed_deps for deplist in dependencies.values() for dep in deplist
|
||||
)
|
||||
|
||||
def _is_possible(self, *, pkg_name):
|
||||
try:
|
||||
return self.is_allowed_on_this_platform(pkg_name=pkg_name) and self.can_be_installed(
|
||||
pkg_name=pkg_name
|
||||
)
|
||||
except spack.repo.UnknownPackageError:
|
||||
return False
|
||||
|
||||
|
||||
class StaticAnalysis(NoStaticAnalysis):
|
||||
"""Performs some static analysis of the configuration, store, etc. to provide more precise
|
||||
answers on whether some packages can be installed, or used as a provider.
|
||||
|
||||
It increases the setup time, but might decrease the grounding and solve time considerably,
|
||||
especially when requirements restrict the possible choices for providers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
configuration: spack.config.Configuration,
|
||||
repo: spack.repo.RepoPath,
|
||||
store: spack.store.Store,
|
||||
binary_index: spack.binary_distribution.BinaryCacheIndex,
|
||||
):
|
||||
super().__init__(configuration=configuration, repo=repo)
|
||||
self.store = store
|
||||
self.binary_index = binary_index
|
||||
|
||||
@lang.memoized
|
||||
def providers_for(self, virtual_str: str) -> List[spack.spec.Spec]:
|
||||
candidates = super().providers_for(virtual_str)
|
||||
result = []
|
||||
for spec in candidates:
|
||||
if not self._is_provider_candidate(pkg_name=spec.name, virtual=virtual_str):
|
||||
continue
|
||||
result.append(spec)
|
||||
return result
|
||||
|
||||
@lang.memoized
|
||||
def buildcache_specs(self) -> List[spack.spec.Spec]:
|
||||
self.binary_index.update()
|
||||
return self.binary_index.get_all_built_specs()
|
||||
|
||||
@lang.memoized
|
||||
def can_be_installed(self, *, pkg_name) -> bool:
|
||||
if self.configuration.get(f"packages:{pkg_name}:buildable", True):
|
||||
return True
|
||||
|
||||
if self.configuration.get(f"packages:{pkg_name}:externals", []):
|
||||
return True
|
||||
|
||||
reuse = self.configuration.get("concretizer:reuse")
|
||||
if reuse is not False and self.store.db.query(pkg_name):
|
||||
return True
|
||||
|
||||
if reuse is not False and any(x.name == pkg_name for x in self.buildcache_specs()):
|
||||
return True
|
||||
|
||||
tty.debug(f"[{__name__}] {pkg_name} cannot be installed")
|
||||
return False
|
||||
|
||||
@lang.memoized
|
||||
def _is_provider_candidate(self, *, pkg_name: str, virtual: str) -> bool:
|
||||
if not self.is_allowed_on_this_platform(pkg_name=pkg_name):
|
||||
return False
|
||||
|
||||
if not self.can_be_installed(pkg_name=pkg_name):
|
||||
return False
|
||||
|
||||
virtual_spec = spack.spec.Spec(virtual)
|
||||
if self.unreachable(pkg_name=virtual_spec.name, when_spec=pkg_name):
|
||||
tty.debug(f"[{__name__}] {pkg_name} cannot be a provider for {virtual}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@lang.memoized
|
||||
def unreachable(self, *, pkg_name: str, when_spec: spack.spec.Spec) -> bool:
|
||||
"""Returns true if the context can determine that the condition cannot ever
|
||||
be met on pkg_name.
|
||||
"""
|
||||
candidates = self.configuration.get(f"packages:{pkg_name}:require", [])
|
||||
if not candidates and pkg_name != "all":
|
||||
return self.unreachable(pkg_name="all", when_spec=when_spec)
|
||||
|
||||
if not candidates:
|
||||
return False
|
||||
|
||||
if isinstance(candidates, str):
|
||||
candidates = [candidates]
|
||||
|
||||
union_requirement = spack.spec.Spec()
|
||||
for c in candidates:
|
||||
if not isinstance(c, str):
|
||||
continue
|
||||
try:
|
||||
union_requirement.constrain(c)
|
||||
except SpackError:
|
||||
# Less optimized, but shouldn't fail
|
||||
pass
|
||||
|
||||
if not union_requirement.intersects(when_spec):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def create_graph_analyzer() -> PossibleDependencyGraph:
|
||||
static_analysis = spack.config.CONFIG.get("concretizer:static_analysis", False)
|
||||
if static_analysis:
|
||||
return StaticAnalysis(
|
||||
configuration=spack.config.CONFIG,
|
||||
repo=spack.repo.PATH,
|
||||
store=spack.store.STORE,
|
||||
binary_index=spack.binary_distribution.BINARY_INDEX,
|
||||
)
|
||||
return NoStaticAnalysis(configuration=spack.config.CONFIG, repo=spack.repo.PATH)
|
||||
|
||||
|
||||
class Counter:
|
||||
"""Computes the possible packages and the maximum number of duplicates
|
||||
allowed for each of them.
|
||||
|
||||
Args:
|
||||
specs: abstract specs to concretize
|
||||
tests: if True, add test dependencies to the list of possible packages
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, specs: List["spack.spec.Spec"], tests: bool, possible_graph: PossibleDependencyGraph
|
||||
) -> None:
|
||||
self.possible_graph = possible_graph
|
||||
self.specs = specs
|
||||
self.link_run_types: dt.DepFlag = dt.LINK | dt.RUN | dt.TEST
|
||||
self.all_types: dt.DepFlag = dt.ALL
|
||||
if not tests:
|
||||
self.link_run_types = dt.LINK | dt.RUN
|
||||
self.all_types = dt.LINK | dt.RUN | dt.BUILD
|
||||
|
||||
self._possible_dependencies: Set[str] = set()
|
||||
self._possible_virtuals: Set[str] = set(x.name for x in specs if x.virtual)
|
||||
|
||||
def possible_dependencies(self) -> Set[str]:
|
||||
"""Returns the list of possible dependencies"""
|
||||
self.ensure_cache_values()
|
||||
return self._possible_dependencies
|
||||
|
||||
def possible_virtuals(self) -> Set[str]:
|
||||
"""Returns the list of possible virtuals"""
|
||||
self.ensure_cache_values()
|
||||
return self._possible_virtuals
|
||||
|
||||
def ensure_cache_values(self) -> None:
|
||||
"""Ensure the cache values have been computed"""
|
||||
if self._possible_dependencies:
|
||||
return
|
||||
self._compute_cache_values()
|
||||
|
||||
def possible_packages_facts(self, gen: "spack.solver.asp.ProblemInstanceBuilder", fn) -> None:
|
||||
"""Emit facts associated with the possible packages"""
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
||||
def _compute_cache_values(self) -> None:
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
||||
|
||||
class NoDuplicatesCounter(Counter):
|
||||
def _compute_cache_values(self) -> None:
|
||||
self._possible_dependencies, virtuals, _ = self.possible_graph.possible_dependencies(
|
||||
*self.specs, allowed_deps=self.all_types
|
||||
)
|
||||
self._possible_virtuals.update(virtuals)
|
||||
|
||||
def possible_packages_facts(self, gen: "spack.solver.asp.ProblemInstanceBuilder", fn) -> None:
|
||||
gen.h2("Maximum number of nodes (packages)")
|
||||
for package_name in sorted(self.possible_dependencies()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
gen.h2("Maximum number of nodes (virtual packages)")
|
||||
for package_name in sorted(self.possible_virtuals()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self.possible_dependencies()):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
|
||||
class MinimalDuplicatesCounter(NoDuplicatesCounter):
|
||||
def __init__(
|
||||
self, specs: List["spack.spec.Spec"], tests: bool, possible_graph: PossibleDependencyGraph
|
||||
) -> None:
|
||||
super().__init__(specs, tests, possible_graph)
|
||||
self._link_run: Set[str] = set()
|
||||
self._direct_build: Set[str] = set()
|
||||
self._total_build: Set[str] = set()
|
||||
self._link_run_virtuals: Set[str] = set()
|
||||
|
||||
def _compute_cache_values(self) -> None:
|
||||
self._link_run, virtuals, _ = self.possible_graph.possible_dependencies(
|
||||
*self.specs, allowed_deps=self.link_run_types
|
||||
)
|
||||
self._possible_virtuals.update(virtuals)
|
||||
self._link_run_virtuals.update(virtuals)
|
||||
for x in self._link_run:
|
||||
reals, virtuals, _ = self.possible_graph.possible_dependencies(
|
||||
x, allowed_deps=dt.BUILD, transitive=False, strict_depflag=True
|
||||
)
|
||||
self._possible_virtuals.update(virtuals)
|
||||
self._direct_build.update(reals)
|
||||
|
||||
self._total_build, virtuals, _ = self.possible_graph.possible_dependencies(
|
||||
*self._direct_build, allowed_deps=self.all_types
|
||||
)
|
||||
self._possible_virtuals.update(virtuals)
|
||||
self._possible_dependencies = set(self._link_run) | set(self._total_build)
|
||||
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
gen.h2("Packages with at most a single node")
|
||||
for package_name in sorted(self.possible_dependencies() - build_tools):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Packages with at multiple possible nodes (build-tools)")
|
||||
for package_name in sorted(self.possible_dependencies() & build_tools):
|
||||
gen.fact(fn.max_dupes(package_name, 2))
|
||||
gen.fact(fn.multiple_unification_sets(package_name))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Maximum number of nodes (virtual packages)")
|
||||
for package_name in sorted(self.possible_virtuals()):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self._link_run):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
|
||||
class FullDuplicatesCounter(MinimalDuplicatesCounter):
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
counter = collections.Counter(
|
||||
list(self._link_run) + list(self._total_build) + list(self._direct_build)
|
||||
)
|
||||
gen.h2("Maximum number of nodes")
|
||||
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
|
||||
count = min(count, 2)
|
||||
gen.fact(fn.max_dupes(pkg, count))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Build unification sets ")
|
||||
for name in sorted(self.possible_dependencies() & build_tools):
|
||||
gen.fact(fn.multiple_unification_sets(name))
|
||||
gen.newline()
|
||||
|
||||
gen.h2("Possible package in link-run subDAG")
|
||||
for name in sorted(self._link_run):
|
||||
gen.fact(fn.possible_in_link_run(name))
|
||||
gen.newline()
|
||||
|
||||
counter = collections.Counter(
|
||||
list(self._link_run_virtuals) + list(self._possible_virtuals)
|
||||
)
|
||||
gen.h2("Maximum number of virtual nodes")
|
||||
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
|
||||
gen.fact(fn.max_dupes(pkg, count))
|
||||
gen.newline()
|
||||
|
||||
|
||||
def create_counter(
|
||||
specs: List[spack.spec.Spec], tests: bool, possible_graph: PossibleDependencyGraph
|
||||
) -> Counter:
|
||||
strategy = spack.config.CONFIG.get("concretizer:duplicates:strategy", "none")
|
||||
if strategy == "full":
|
||||
return FullDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
|
||||
if strategy == "minimal":
|
||||
return MinimalDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
|
||||
return NoDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
|
@@ -230,30 +230,21 @@ def ensure_modern_format_string(fmt: str) -> None:
|
||||
def _make_microarchitecture(name: str) -> archspec.cpu.Microarchitecture:
|
||||
if isinstance(name, archspec.cpu.Microarchitecture):
|
||||
return name
|
||||
return archspec.cpu.TARGETS.get(name) or archspec.cpu.generic_microarchitecture(name)
|
||||
return archspec.cpu.TARGETS.get(name, archspec.cpu.generic_microarchitecture(name))
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class ArchSpec:
|
||||
"""Aggregate the target platform, the operating system and the target microarchitecture."""
|
||||
|
||||
@staticmethod
|
||||
def _return_arch(os_tag, target_tag):
|
||||
platform = spack.platforms.host()
|
||||
default_os = platform.operating_system(os_tag)
|
||||
default_target = platform.target(target_tag)
|
||||
arch_tuple = str(platform), str(default_os), str(default_target)
|
||||
return ArchSpec(arch_tuple)
|
||||
|
||||
@staticmethod
|
||||
def default_arch():
|
||||
"""Return the default architecture"""
|
||||
return ArchSpec._return_arch("default_os", "default_target")
|
||||
|
||||
@staticmethod
|
||||
def frontend_arch():
|
||||
"""Return the frontend architecture"""
|
||||
return ArchSpec._return_arch("frontend", "frontend")
|
||||
platform = spack.platforms.host()
|
||||
default_os = platform.default_operating_system()
|
||||
default_target = platform.default_target()
|
||||
arch_tuple = str(platform), str(default_os), str(default_target)
|
||||
return ArchSpec(arch_tuple)
|
||||
|
||||
__slots__ = "_platform", "_os", "_target"
|
||||
|
||||
@@ -467,10 +458,10 @@ def _target_constrain(self, other: "ArchSpec") -> bool:
|
||||
if not other._target_satisfies(self, strict=False):
|
||||
raise UnsatisfiableArchitectureSpecError(self, other)
|
||||
|
||||
if self._target_concrete:
|
||||
if self.target_concrete:
|
||||
return False
|
||||
|
||||
elif other._target_concrete:
|
||||
elif other.target_concrete:
|
||||
self.target = other.target
|
||||
return True
|
||||
|
||||
@@ -485,8 +476,8 @@ def _target_constrain(self, other: "ArchSpec") -> bool:
|
||||
self.target = intersection_target
|
||||
return True
|
||||
|
||||
def _target_intersection(self, other: "ArchSpec") -> List[str]:
|
||||
results: List[str] = []
|
||||
def _target_intersection(self, other):
|
||||
results = []
|
||||
|
||||
if not self.target or not other.target:
|
||||
return results
|
||||
@@ -593,23 +584,23 @@ def constrain(self, other: "ArchSpec") -> bool:
|
||||
|
||||
return constrained
|
||||
|
||||
def copy(self) -> "ArchSpec":
|
||||
def copy(self):
|
||||
"""Copy the current instance and returns the clone."""
|
||||
return ArchSpec(self)
|
||||
|
||||
@property
|
||||
def concrete(self):
|
||||
"""True if the spec is concrete, False otherwise"""
|
||||
return self.platform and self.os and self.target and self._target_concrete
|
||||
return self.platform and self.os and self.target and self.target_concrete
|
||||
|
||||
@property
|
||||
def _target_concrete(self) -> bool:
|
||||
def target_concrete(self):
|
||||
"""True if the target is not a range or list."""
|
||||
return (
|
||||
self.target is not None and ":" not in str(self.target) and "," not in str(self.target)
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
def to_dict(self):
|
||||
# Generic targets represent either an architecture family (like x86_64)
|
||||
# or a custom micro-architecture
|
||||
if self.target.vendor == "generic":
|
||||
@@ -621,7 +612,7 @@ def to_dict(self) -> dict:
|
||||
return {"arch": {"platform": self.platform, "platform_os": self.os, "target": target_data}}
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d: dict) -> "ArchSpec":
|
||||
def from_dict(d):
|
||||
"""Import an ArchSpec from raw YAML/JSON data"""
|
||||
arch = d["arch"]
|
||||
target_name = arch["target"]
|
||||
@@ -631,12 +622,13 @@ def from_dict(d: dict) -> "ArchSpec":
|
||||
return ArchSpec((arch["platform"], arch["platform_os"], target))
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.platform}-{self.os}-{self.target}"
|
||||
return "%s-%s-%s" % (self.platform, self.os, self.target)
|
||||
|
||||
def __repr__(self):
|
||||
return f"ArchSpec(({self.platform!r}, {self.os!r}, {str(self.target)!r}))"
|
||||
fmt = "ArchSpec(({0.platform!r}, {0.os!r}, {1!r}))"
|
||||
return fmt.format(self, str(self.target))
|
||||
|
||||
def __contains__(self, string) -> bool:
|
||||
def __contains__(self, string):
|
||||
return string in str(self) or string in self.target
|
||||
|
||||
|
||||
@@ -1535,9 +1527,8 @@ def __init__(self, spec_like=None, *, external_path=None, external_modules=None)
|
||||
self._external_path = external_path
|
||||
self.external_modules = Spec._format_module_list(external_modules)
|
||||
|
||||
# This attribute is used to store custom information for
|
||||
# external specs. None signal that it was not set yet.
|
||||
self.extra_attributes = None
|
||||
# This attribute is used to store custom information for external specs.
|
||||
self.extra_attributes: dict = {}
|
||||
|
||||
# This attribute holds the original build copy of the spec if it is
|
||||
# deployed differently than it was built. None signals that the spec
|
||||
@@ -1914,6 +1905,12 @@ def package_class(self):
|
||||
"""Internal package call gets only the class object for a package.
|
||||
Use this to just get package metadata.
|
||||
"""
|
||||
warnings.warn(
|
||||
"`Spec.package_class` is deprecated and will be removed in version 1.0.0. Use "
|
||||
"`spack.repo.PATH.get_pkg_class(spec.fullname) instead.",
|
||||
category=spack.error.SpackAPIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return spack.repo.PATH.get_pkg_class(self.fullname)
|
||||
|
||||
@property
|
||||
@@ -2359,15 +2356,10 @@ def to_node_dict(self, hash=ht.dag_hash):
|
||||
)
|
||||
|
||||
if self.external:
|
||||
if self.extra_attributes:
|
||||
extra_attributes = syaml.sorted_dict(self.extra_attributes)
|
||||
else:
|
||||
extra_attributes = None
|
||||
|
||||
d["external"] = {
|
||||
"path": self.external_path,
|
||||
"module": self.external_modules,
|
||||
"extra_attributes": extra_attributes,
|
||||
"module": self.external_modules or None,
|
||||
"extra_attributes": syaml.sorted_dict(self.extra_attributes),
|
||||
}
|
||||
|
||||
if not self._concrete:
|
||||
@@ -2878,7 +2870,7 @@ def inject_patches_variant(root):
|
||||
|
||||
# Add any patches from the package to the spec.
|
||||
patches = set()
|
||||
for cond, patch_list in s.package_class.patches.items():
|
||||
for cond, patch_list in spack.repo.PATH.get_pkg_class(s.fullname).patches.items():
|
||||
if s.satisfies(cond):
|
||||
for patch in patch_list:
|
||||
patches.add(patch)
|
||||
@@ -2891,7 +2883,7 @@ def inject_patches_variant(root):
|
||||
if dspec.spec.concrete:
|
||||
continue
|
||||
|
||||
pkg_deps = dspec.parent.package_class.dependencies
|
||||
pkg_deps = spack.repo.PATH.get_pkg_class(dspec.parent.fullname).dependencies
|
||||
|
||||
patches = []
|
||||
for cond, deps_by_name in pkg_deps.items():
|
||||
@@ -3125,7 +3117,7 @@ def ensure_valid_variants(spec):
|
||||
if spec.concrete:
|
||||
return
|
||||
|
||||
pkg_cls = spec.package_class
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
|
||||
pkg_variants = pkg_cls.variant_names()
|
||||
# reserved names are variants that may be set on any package
|
||||
# but are not necessarily recorded by the package's class
|
||||
@@ -3869,6 +3861,13 @@ def _cmp_iter(self):
|
||||
for item in self._cmp_node():
|
||||
yield item
|
||||
|
||||
# If there is ever a breaking change to hash computation, whether accidental or purposeful,
|
||||
# two specs can be identical modulo DAG hash, depending on what time they were concretized
|
||||
# From the perspective of many operation in Spack (database, build cache, etc) a different
|
||||
# DAG hash means a different spec. Here we ensure that two otherwise identical specs, one
|
||||
# serialized before the hash change and one after, are considered different.
|
||||
yield self.dag_hash() if self.concrete else None
|
||||
|
||||
# This needs to be in _cmp_iter so that no specs with different process hashes
|
||||
# are considered the same by `__hash__` or `__eq__`.
|
||||
#
|
||||
@@ -4712,7 +4711,7 @@ def concrete(self):
|
||||
bool: True or False
|
||||
"""
|
||||
return self.spec._concrete or all(
|
||||
v in self for v in self.spec.package_class.variant_names()
|
||||
v in self for v in spack.repo.PATH.get_pkg_class(self.spec.fullname).variant_names()
|
||||
)
|
||||
|
||||
def copy(self) -> "VariantMap":
|
||||
@@ -4734,7 +4733,10 @@ def __str__(self):
|
||||
bool_keys = []
|
||||
kv_keys = []
|
||||
for key in sorted_keys:
|
||||
bool_keys.append(key) if isinstance(self[key].value, bool) else kv_keys.append(key)
|
||||
if isinstance(self[key].value, bool):
|
||||
bool_keys.append(key)
|
||||
else:
|
||||
kv_keys.append(key)
|
||||
|
||||
# add spaces before and after key/value variants.
|
||||
string = io.StringIO()
|
||||
@@ -4769,14 +4771,14 @@ def substitute_abstract_variants(spec: Spec):
|
||||
elif name in vt.reserved_names:
|
||||
continue
|
||||
|
||||
variant_defs = spec.package_class.variant_definitions(name)
|
||||
variant_defs = spack.repo.PATH.get_pkg_class(spec.fullname).variant_definitions(name)
|
||||
valid_defs = []
|
||||
for when, vdef in variant_defs:
|
||||
if when.intersects(spec):
|
||||
valid_defs.append(vdef)
|
||||
|
||||
if not valid_defs:
|
||||
if name not in spec.package_class.variant_names():
|
||||
if name not in spack.repo.PATH.get_pkg_class(spec.fullname).variant_names():
|
||||
unknown.append(name)
|
||||
else:
|
||||
whens = [str(when) for when, _ in variant_defs]
|
||||
@@ -4913,7 +4915,7 @@ def from_node_dict(cls, node):
|
||||
spec.external_modules = node["external"]["module"]
|
||||
if spec.external_modules is False:
|
||||
spec.external_modules = None
|
||||
spec.extra_attributes = node["external"].get("extra_attributes", {})
|
||||
spec.extra_attributes = node["external"].get("extra_attributes") or {}
|
||||
|
||||
# specs read in are concrete unless marked abstract
|
||||
if node.get("concrete", True):
|
||||
@@ -5190,12 +5192,10 @@ def get_host_environment_metadata() -> Dict[str, str]:
|
||||
|
||||
|
||||
def get_host_environment() -> Dict[str, Any]:
|
||||
"""Return a dictionary (lookup) with host information (not including the
|
||||
os.environ).
|
||||
"""
|
||||
"""Returns a dictionary with host information (not including the os.environ)."""
|
||||
host_platform = spack.platforms.host()
|
||||
host_target = host_platform.target("default_target")
|
||||
host_os = host_platform.operating_system("default_os")
|
||||
host_target = host_platform.default_target()
|
||||
host_os = host_platform.default_operating_system()
|
||||
arch_fmt = "platform={0} os={1} target={2}"
|
||||
arch_spec = Spec(arch_fmt.format(host_platform, host_os, host_target))
|
||||
return {
|
||||
|
@@ -60,8 +60,7 @@ def test_user_input_combination(config, target_str, os_str):
|
||||
"""Test for all the valid user input combinations that both the target and
|
||||
the operating system match.
|
||||
"""
|
||||
spec_str = "libelf os={} target={}".format(os_str, target_str)
|
||||
spec = Spec(spec_str)
|
||||
spec = Spec(f"libelf os={os_str} target={target_str}")
|
||||
assert spec.architecture.os == str(TEST_PLATFORM.operating_system(os_str))
|
||||
assert spec.architecture.target == TEST_PLATFORM.target(target_str)
|
||||
|
||||
@@ -71,8 +70,8 @@ def test_default_os_and_target(default_mock_concretization):
|
||||
after concretization.
|
||||
"""
|
||||
spec = default_mock_concretization("libelf")
|
||||
assert spec.architecture.os == str(TEST_PLATFORM.operating_system("default_os"))
|
||||
assert spec.architecture.target == TEST_PLATFORM.target("default_target")
|
||||
assert spec.architecture.os == str(TEST_PLATFORM.default_operating_system())
|
||||
assert spec.architecture.target == TEST_PLATFORM.default_target()
|
||||
|
||||
|
||||
def test_operating_system_conversion_to_dict():
|
||||
|
@@ -1,8 +1,10 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
from urllib.error import HTTPError
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -15,6 +17,7 @@
|
||||
import spack.paths as spack_paths
|
||||
import spack.repo as repo
|
||||
import spack.util.git
|
||||
from spack.test.conftest import MockHTTPResponse
|
||||
|
||||
pytestmark = [pytest.mark.usefixtures("mock_packages")]
|
||||
|
||||
@@ -162,38 +165,8 @@ def test_import_signing_key(mock_gnupghome):
|
||||
ci.import_signing_key(signing_key)
|
||||
|
||||
|
||||
class FakeWebResponder:
|
||||
def __init__(self, response_code=200, content_to_read=[]):
|
||||
self._resp_code = response_code
|
||||
self._content = content_to_read
|
||||
self._read = [False for c in content_to_read]
|
||||
|
||||
def open(self, request, data=None, timeout=object()):
|
||||
return self
|
||||
|
||||
def getcode(self):
|
||||
return self._resp_code
|
||||
|
||||
def read(self, length=None):
|
||||
if len(self._content) <= 0:
|
||||
return None
|
||||
|
||||
if not self._read[-1]:
|
||||
return_content = self._content[-1]
|
||||
if length:
|
||||
self._read[-1] = True
|
||||
else:
|
||||
self._read.pop()
|
||||
self._content.pop()
|
||||
return return_content
|
||||
|
||||
self._read.pop()
|
||||
self._content.pop()
|
||||
return None
|
||||
|
||||
|
||||
def test_download_and_extract_artifacts(tmpdir, monkeypatch, working_env):
|
||||
os.environ.update({"GITLAB_PRIVATE_TOKEN": "faketoken"})
|
||||
def test_download_and_extract_artifacts(tmpdir, monkeypatch):
|
||||
monkeypatch.setenv("GITLAB_PRIVATE_TOKEN", "faketoken")
|
||||
|
||||
url = "https://www.nosuchurlexists.itsfake/artifacts.zip"
|
||||
working_dir = os.path.join(tmpdir.strpath, "repro")
|
||||
@@ -201,10 +174,13 @@ def test_download_and_extract_artifacts(tmpdir, monkeypatch, working_env):
|
||||
spack_paths.test_path, "data", "ci", "gitlab", "artifacts.zip"
|
||||
)
|
||||
|
||||
with open(test_artifacts_path, "rb") as fd:
|
||||
fake_responder = FakeWebResponder(content_to_read=[fd.read()])
|
||||
def _urlopen_OK(*args, **kwargs):
|
||||
with open(test_artifacts_path, "rb") as f:
|
||||
return MockHTTPResponse(
|
||||
"200", "OK", {"Content-Type": "application/zip"}, io.BytesIO(f.read())
|
||||
)
|
||||
|
||||
monkeypatch.setattr(ci, "build_opener", lambda handler: fake_responder)
|
||||
monkeypatch.setattr(ci, "urlopen", _urlopen_OK)
|
||||
|
||||
ci.download_and_extract_artifacts(url, working_dir)
|
||||
|
||||
@@ -214,7 +190,11 @@ def test_download_and_extract_artifacts(tmpdir, monkeypatch, working_env):
|
||||
found_install = fs.find(working_dir, "install.sh")
|
||||
assert len(found_install) == 1
|
||||
|
||||
fake_responder._resp_code = 400
|
||||
def _urlopen_500(*args, **kwargs):
|
||||
raise HTTPError(url, 500, "Internal Server Error", {}, None)
|
||||
|
||||
monkeypatch.setattr(ci, "urlopen", _urlopen_500)
|
||||
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
ci.download_and_extract_artifacts(url, working_dir)
|
||||
|
||||
@@ -328,16 +308,14 @@ def test_get_spec_filter_list(mutable_mock_env_path, mutable_mock_repo):
|
||||
e1.add("hypre")
|
||||
e1.concretize()
|
||||
|
||||
"""
|
||||
Concretizing the above environment results in the following graphs:
|
||||
# Concretizing the above environment results in the following graphs:
|
||||
|
||||
mpileaks -> mpich (provides mpi virtual dep of mpileaks)
|
||||
-> callpath -> dyninst -> libelf
|
||||
-> libdwarf -> libelf
|
||||
-> mpich (provides mpi dep of callpath)
|
||||
# mpileaks -> mpich (provides mpi virtual dep of mpileaks)
|
||||
# -> callpath -> dyninst -> libelf
|
||||
# -> libdwarf -> libelf
|
||||
# -> mpich (provides mpi dep of callpath)
|
||||
|
||||
hypre -> openblas-with-lapack (provides lapack and blas virtual deps of hypre)
|
||||
"""
|
||||
# hypre -> openblas-with-lapack (provides lapack and blas virtual deps of hypre)
|
||||
|
||||
touched = ["libdwarf"]
|
||||
|
||||
|
@@ -329,14 +329,14 @@ def test_ci_generate_pkg_with_deps(ci_generate_test, tmp_path, ci_base_environme
|
||||
f"""\
|
||||
spack:
|
||||
specs:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
mirrors:
|
||||
buildcache-destination: {tmp_path / 'ci-mirror'}
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- submapping:
|
||||
- match:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
build-job:
|
||||
tags:
|
||||
- donotcare
|
||||
@@ -355,12 +355,12 @@ def test_ci_generate_pkg_with_deps(ci_generate_test, tmp_path, ci_base_environme
|
||||
assert "stage" in ci_obj
|
||||
assert ci_obj["stage"] == "stage-0"
|
||||
found.append("dependency-install")
|
||||
if "flatten-deps" in ci_key:
|
||||
if "dependent-install" in ci_key:
|
||||
assert "stage" in ci_obj
|
||||
assert ci_obj["stage"] == "stage-1"
|
||||
found.append("flatten-deps")
|
||||
found.append("dependent-install")
|
||||
|
||||
assert "flatten-deps" in found
|
||||
assert "dependent-install" in found
|
||||
assert "dependency-install" in found
|
||||
|
||||
|
||||
@@ -372,14 +372,14 @@ def test_ci_generate_for_pr_pipeline(ci_generate_test, tmp_path, monkeypatch):
|
||||
f"""\
|
||||
spack:
|
||||
specs:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
mirrors:
|
||||
buildcache-destination: {tmp_path / 'ci-mirror'}
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- submapping:
|
||||
- match:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
build-job:
|
||||
tags:
|
||||
- donotcare
|
||||
@@ -899,7 +899,7 @@ def test_ci_generate_override_runner_attrs(
|
||||
f"""\
|
||||
spack:
|
||||
specs:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
- pkg-a
|
||||
mirrors:
|
||||
buildcache-destination: {tmp_path / "ci-mirror"}
|
||||
@@ -908,7 +908,7 @@ def test_ci_generate_override_runner_attrs(
|
||||
- match_behavior: {match_behavior}
|
||||
submapping:
|
||||
- match:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
build-job:
|
||||
tags:
|
||||
- specific-one
|
||||
@@ -1006,8 +1006,8 @@ def test_ci_generate_override_runner_attrs(
|
||||
assert the_elt["script"][0] == "main step"
|
||||
assert len(the_elt["after_script"]) == 1
|
||||
assert the_elt["after_script"][0] == "post step one"
|
||||
if "flatten-deps" in ci_key:
|
||||
# The flatten-deps match specifies that we keep the two
|
||||
if "dependent-install" in ci_key:
|
||||
# The dependent-install match specifies that we keep the two
|
||||
# top level variables, but add a third specifc one. It
|
||||
# also adds a custom tag which should be combined with
|
||||
# the top-level tag.
|
||||
@@ -1182,12 +1182,12 @@ def test_ci_generate_read_broken_specs_url(
|
||||
spec_a = spack.concretize.concretize_one("pkg-a")
|
||||
a_dag_hash = spec_a.dag_hash()
|
||||
|
||||
spec_flattendeps = spack.concretize.concretize_one("flatten-deps")
|
||||
spec_flattendeps = spack.concretize.concretize_one("dependent-install")
|
||||
flattendeps_dag_hash = spec_flattendeps.dag_hash()
|
||||
|
||||
broken_specs_url = tmp_path.as_uri()
|
||||
|
||||
# Mark 'a' as broken (but not 'flatten-deps')
|
||||
# Mark 'a' as broken (but not 'dependent-install')
|
||||
broken_spec_a_url = "{0}/{1}".format(broken_specs_url, a_dag_hash)
|
||||
job_stack = "job_stack"
|
||||
a_job_url = "a_job_url"
|
||||
@@ -1201,7 +1201,7 @@ def test_ci_generate_read_broken_specs_url(
|
||||
f"""\
|
||||
spack:
|
||||
specs:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
- pkg-a
|
||||
mirrors:
|
||||
buildcache-destination: {(tmp_path / "ci-mirror").as_uri()}
|
||||
@@ -1211,7 +1211,7 @@ def test_ci_generate_read_broken_specs_url(
|
||||
- submapping:
|
||||
- match:
|
||||
- pkg-a
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
- pkg-b
|
||||
- dependency-install
|
||||
build-job:
|
||||
@@ -1234,7 +1234,7 @@ def test_ci_generate_read_broken_specs_url(
|
||||
)
|
||||
assert expected in output
|
||||
|
||||
not_expected = f"flatten-deps/{flattendeps_dag_hash[:7]} (in stack"
|
||||
not_expected = f"dependent-install/{flattendeps_dag_hash[:7]} (in stack"
|
||||
assert not_expected not in output
|
||||
|
||||
|
||||
@@ -1447,7 +1447,7 @@ def test_gitlab_config_scopes(ci_generate_test, tmp_path):
|
||||
include: [{configs_path}]
|
||||
view: false
|
||||
specs:
|
||||
- flatten-deps
|
||||
- dependent-install
|
||||
mirrors:
|
||||
buildcache-destination: {tmp_path / "ci-mirror"}
|
||||
ci:
|
||||
|
@@ -51,8 +51,8 @@ def test_create_db_tarball(tmpdir, database):
|
||||
def test_report():
|
||||
out = debug("report")
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system("frontend")
|
||||
host_target = host_platform.target("frontend")
|
||||
host_os = host_platform.default_operating_system()
|
||||
host_target = host_platform.default_target()
|
||||
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
|
||||
assert spack.get_version() in out
|
||||
|
@@ -24,32 +24,24 @@
|
||||
mpi_deps = ["fake"]
|
||||
|
||||
|
||||
def test_direct_dependencies(mock_packages):
|
||||
out = dependencies("mpileaks")
|
||||
actual = set(re.split(r"\s+", out.strip()))
|
||||
expected = set(["callpath"] + mpis)
|
||||
assert expected == actual
|
||||
|
||||
|
||||
def test_transitive_dependencies(mock_packages):
|
||||
out = dependencies("--transitive", "mpileaks")
|
||||
actual = set(re.split(r"\s+", out.strip()))
|
||||
expected = set(["callpath", "dyninst", "libdwarf", "libelf"] + mpis + mpi_deps)
|
||||
assert expected == actual
|
||||
|
||||
|
||||
def test_transitive_dependencies_with_deptypes(mock_packages):
|
||||
out = dependencies("--transitive", "--deptype=link,run", "dtbuild1")
|
||||
deps = set(re.split(r"\s+", out.strip()))
|
||||
assert set(["dtlink2", "dtrun2"]) == deps
|
||||
|
||||
out = dependencies("--transitive", "--deptype=build", "dtbuild1")
|
||||
deps = set(re.split(r"\s+", out.strip()))
|
||||
assert set(["dtbuild2", "dtlink2"]) == deps
|
||||
|
||||
out = dependencies("--transitive", "--deptype=link", "dtbuild1")
|
||||
deps = set(re.split(r"\s+", out.strip()))
|
||||
assert set(["dtlink2"]) == deps
|
||||
@pytest.mark.parametrize(
|
||||
"cli_args,expected",
|
||||
[
|
||||
(["mpileaks"], set(["callpath"] + mpis)),
|
||||
(
|
||||
["--transitive", "mpileaks"],
|
||||
set(["callpath", "dyninst", "libdwarf", "libelf"] + mpis + mpi_deps),
|
||||
),
|
||||
(["--transitive", "--deptype=link,run", "dtbuild1"], {"dtlink2", "dtrun2"}),
|
||||
(["--transitive", "--deptype=build", "dtbuild1"], {"dtbuild2", "dtlink2"}),
|
||||
(["--transitive", "--deptype=link", "dtbuild1"], {"dtlink2"}),
|
||||
],
|
||||
)
|
||||
def test_direct_dependencies(cli_args, expected, mock_runtimes):
|
||||
out = dependencies(*cli_args)
|
||||
result = set(re.split(r"\s+", out.strip()))
|
||||
expected.update(mock_runtimes)
|
||||
assert expected == result
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
|
@@ -1038,6 +1038,58 @@ def test_init_from_yaml(environment_from_manifest):
|
||||
assert not e2.specs_by_hash
|
||||
|
||||
|
||||
def test_init_from_yaml_relative_includes(tmp_path):
|
||||
files = [
|
||||
"relative_copied/packages.yaml",
|
||||
"./relative_copied/compilers.yaml",
|
||||
"repos.yaml",
|
||||
"./config.yaml",
|
||||
]
|
||||
|
||||
manifest = f"""
|
||||
spack:
|
||||
specs: []
|
||||
include: {files}
|
||||
"""
|
||||
|
||||
e1_path = tmp_path / "e1"
|
||||
e1_manifest = e1_path / "spack.yaml"
|
||||
fs.mkdirp(e1_path)
|
||||
with open(e1_manifest, "w", encoding="utf-8") as f:
|
||||
f.write(manifest)
|
||||
|
||||
for f in files:
|
||||
fs.touchp(e1_path / f)
|
||||
|
||||
e2 = _env_create("test2", init_file=e1_manifest)
|
||||
|
||||
for f in files:
|
||||
assert os.path.exists(os.path.join(e2.path, f))
|
||||
|
||||
|
||||
def test_init_from_yaml_relative_includes_outside_env(tmp_path):
|
||||
files = ["../outside_env_not_copied/repos.yaml"]
|
||||
|
||||
manifest = f"""
|
||||
spack:
|
||||
specs: []
|
||||
include: {files}
|
||||
"""
|
||||
|
||||
# subdir to ensure parent of environment dir is not shared
|
||||
e1_path = tmp_path / "e1_subdir" / "e1"
|
||||
e1_manifest = e1_path / "spack.yaml"
|
||||
fs.mkdirp(e1_path)
|
||||
with open(e1_manifest, "w", encoding="utf-8") as f:
|
||||
f.write(manifest)
|
||||
|
||||
for f in files:
|
||||
fs.touchp(e1_path / f)
|
||||
|
||||
with pytest.raises(spack.config.ConfigFileError, match="Detected 1 missing include"):
|
||||
_ = _env_create("test2", init_file=e1_manifest)
|
||||
|
||||
|
||||
def test_env_view_external_prefix(tmp_path, mutable_database, mock_packages):
|
||||
fake_prefix = tmp_path / "a-prefix"
|
||||
fake_bin = fake_prefix / "bin"
|
||||
@@ -2614,7 +2666,7 @@ def test_stack_yaml_remove_from_matrix_no_effect(tmpdir):
|
||||
- packages:
|
||||
- matrix:
|
||||
- [mpileaks, callpath]
|
||||
- [target=be]
|
||||
- [target=default_target]
|
||||
specs:
|
||||
- $packages
|
||||
"""
|
||||
@@ -2639,7 +2691,7 @@ def test_stack_yaml_force_remove_from_matrix(tmpdir):
|
||||
- packages:
|
||||
- matrix:
|
||||
- [mpileaks, callpath]
|
||||
- [target=be]
|
||||
- [target=default_target]
|
||||
specs:
|
||||
- $packages
|
||||
"""
|
||||
@@ -2659,7 +2711,7 @@ def test_stack_yaml_force_remove_from_matrix(tmpdir):
|
||||
|
||||
assert before_user == after_user
|
||||
|
||||
mpileaks_spec = Spec("mpileaks target=be")
|
||||
mpileaks_spec = Spec("mpileaks target=default_target")
|
||||
assert mpileaks_spec in before_conc
|
||||
assert mpileaks_spec not in after_conc
|
||||
|
||||
|
@@ -139,7 +139,7 @@ def test_gc_except_specific_environments(mutable_database, mutable_mock_env_path
|
||||
def test_gc_except_nonexisting_dir_env(mutable_database, mutable_mock_env_path, tmpdir):
|
||||
output = gc("-ye", tmpdir.strpath, fail_on_error=False)
|
||||
assert "No such environment" in output
|
||||
gc.returncode == 1
|
||||
assert gc.returncode == 1
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
|
@@ -26,9 +26,9 @@ def test_manpath_trailing_colon(
|
||||
else ("--sh", "export %s=%s", ";")
|
||||
)
|
||||
|
||||
"""Test that the commands generated by load add the MANPATH prefix
|
||||
inspections. Also test that Spack correctly preserves the default/existing
|
||||
manpath search path via a trailing colon"""
|
||||
# Test that the commands generated by load add the MANPATH prefix
|
||||
# inspections. Also test that Spack correctly preserves the default/existing
|
||||
# manpath search path via a trailing colon
|
||||
install("mpileaks")
|
||||
|
||||
sh_out = load(shell, "mpileaks")
|
||||
@@ -81,7 +81,9 @@ def extract_value(output, variable):
|
||||
|
||||
# Finally, do we list them in topo order?
|
||||
for i, pkg in enumerate(pkgs):
|
||||
set(s.name for s in mpileaks_spec[pkg].traverse(direction="parents")) in set(pkgs[:i])
|
||||
assert {s.name for s in mpileaks_spec[pkg].traverse(direction="parents")}.issubset(
|
||||
pkgs[: i + 1]
|
||||
)
|
||||
|
||||
# Lastly, do we keep track that mpileaks was loaded?
|
||||
assert (
|
||||
|
@@ -304,6 +304,8 @@ def test_run_import_check(tmp_path: pathlib.Path):
|
||||
contents = '''
|
||||
import spack.cmd
|
||||
import spack.config # do not drop this import because of this comment
|
||||
import spack.repo
|
||||
import spack.repo_utils
|
||||
|
||||
# this comment about spack.error should not be removed
|
||||
class Example(spack.build_systems.autotools.AutotoolsPackage):
|
||||
@@ -314,6 +316,7 @@ def foo(config: "spack.error.SpackError"):
|
||||
# the type hint is quoted, so it should not be removed
|
||||
spack.util.executable.Executable("example")
|
||||
print(spack.__version__)
|
||||
print(spack.repo_utils.__file__)
|
||||
'''
|
||||
file.write_text(contents)
|
||||
root = str(tmp_path)
|
||||
@@ -329,6 +332,7 @@ def foo(config: "spack.error.SpackError"):
|
||||
output = output_buf.getvalue()
|
||||
|
||||
assert "issues.py: redundant import: spack.cmd" in output
|
||||
assert "issues.py: redundant import: spack.repo" in output
|
||||
assert "issues.py: redundant import: spack.config" not in output # comment prevents removal
|
||||
assert "issues.py: missing import: spack" in output # used by spack.__version__
|
||||
assert "issues.py: missing import: spack.build_systems.autotools" in output
|
||||
|
@@ -148,7 +148,6 @@ def current_host(request, monkeypatch):
|
||||
cpu, _, is_preference = request.param.partition("-")
|
||||
|
||||
monkeypatch.setattr(spack.platforms.Test, "default", cpu)
|
||||
monkeypatch.setattr(spack.platforms.Test, "front_end", cpu)
|
||||
if not is_preference:
|
||||
target = archspec.cpu.TARGETS[cpu]
|
||||
monkeypatch.setattr(archspec.cpu, "host", lambda: target)
|
||||
@@ -385,10 +384,11 @@ def test_different_compilers_get_different_flags(
|
||||
):
|
||||
"""Tests that nodes get the flags of the associated compiler."""
|
||||
mutable_config.set("compilers", [clang12_with_flags, gcc11_with_flags])
|
||||
t = archspec.cpu.host().family
|
||||
client = spack.concretize.concretize_one(
|
||||
Spec(
|
||||
"cmake-client %gcc@11.1.0 platform=test os=fe target=fe"
|
||||
" ^cmake %clang@12.2.0 platform=test os=fe target=fe"
|
||||
f"cmake-client %gcc@11.1.0 platform=test os=redhat6 target={t}"
|
||||
f" ^cmake %clang@12.2.0 platform=test os=redhat6 target={t}"
|
||||
)
|
||||
)
|
||||
cmake = client["cmake"]
|
||||
@@ -413,7 +413,8 @@ def test_spec_flags_maintain_order(self, mutable_config, gcc11_with_flags):
|
||||
def test_compiler_flags_differ_identical_compilers(self, mutable_config, clang12_with_flags):
|
||||
mutable_config.set("compilers", [clang12_with_flags])
|
||||
# Correct arch to use test compiler that has flags
|
||||
spec = Spec("pkg-a %clang@12.2.0 platform=test os=fe target=fe")
|
||||
t = archspec.cpu.host().family
|
||||
spec = Spec(f"pkg-a %clang@12.2.0 platform=test os=redhat6 target={t}")
|
||||
|
||||
# Get the compiler that matches the spec (
|
||||
compiler = spack.compilers.compiler_for_spec("clang@=12.2.0", spec.architecture)
|
||||
@@ -2111,14 +2112,15 @@ def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch
|
||||
def test_require_targets_are_allowed(self, mutable_database):
|
||||
"""Test that users can set target constraints under the require attribute."""
|
||||
# Configuration to be added to packages.yaml
|
||||
external_conf = {"all": {"require": "target=%s" % spack.platforms.test.Test.front_end}}
|
||||
required_target = archspec.cpu.TARGETS[spack.platforms.test.Test.default].family
|
||||
external_conf = {"all": {"require": f"target={required_target}"}}
|
||||
spack.config.set("packages", external_conf)
|
||||
|
||||
with spack.config.override("concretizer:reuse", False):
|
||||
spec = spack.concretize.concretize_one("mpich")
|
||||
|
||||
for s in spec.traverse():
|
||||
assert s.satisfies("target=%s" % spack.platforms.test.Test.front_end)
|
||||
assert s.satisfies(f"target={required_target}")
|
||||
|
||||
def test_external_python_extensions_have_dependency(self):
|
||||
"""Test that python extensions have access to a python dependency
|
||||
|
@@ -1,17 +1,6 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import pathlib
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.spack_yaml as syaml
|
||||
|
||||
"""
|
||||
These tests include the following package DAGs:
|
||||
@@ -42,6 +31,18 @@
|
||||
y
|
||||
"""
|
||||
|
||||
import pathlib
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.spack_yaml as syaml
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_repo(mutable_config, monkeypatch, mock_stage):
|
||||
|
@@ -1,7 +1,6 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
import pytest
|
||||
@@ -182,7 +181,7 @@ def test_requirement_adds_version_satisfies(
|
||||
|
||||
# Sanity check: early version of T does not include U
|
||||
s0 = spack.concretize.concretize_one("t@2.0")
|
||||
assert not ("u" in s0)
|
||||
assert "u" not in s0
|
||||
|
||||
conf_str = """\
|
||||
packages:
|
||||
@@ -200,11 +199,11 @@ def test_requirement_adds_version_satisfies(
|
||||
|
||||
@pytest.mark.parametrize("require_checksum", (True, False))
|
||||
def test_requirement_adds_git_hash_version(
|
||||
require_checksum, concretize_scope, test_repo, mock_git_version_info, monkeypatch, working_env
|
||||
require_checksum, concretize_scope, test_repo, mock_git_version_info, monkeypatch
|
||||
):
|
||||
# A full commit sha is a checksummed version, so this test should pass in both cases
|
||||
if require_checksum:
|
||||
os.environ["SPACK_CONCRETIZER_REQUIRE_CHECKSUM"] = "yes"
|
||||
monkeypatch.setenv("SPACK_CONCRETIZER_REQUIRE_CHECKSUM", "yes")
|
||||
|
||||
repo_path, filename, commits = mock_git_version_info
|
||||
monkeypatch.setattr(
|
||||
|
@@ -408,7 +408,7 @@ def test_substitute_config_variables(mock_low_high_config, monkeypatch):
|
||||
os.path.join("foo", "$platform", "bar")
|
||||
) == os.path.abspath(os.path.join("foo", "test", "bar"))
|
||||
|
||||
host_target = spack.platforms.host().target("default_target")
|
||||
host_target = spack.platforms.host().default_target()
|
||||
host_target_family = str(host_target.family)
|
||||
assert spack_path.canonicalize_path(
|
||||
os.path.join("foo", "$target_family", "bar")
|
||||
|
@@ -621,7 +621,7 @@ def linux_os():
|
||||
platform = spack.platforms.host()
|
||||
name, version = "debian", "6"
|
||||
if platform.name == "linux":
|
||||
current_os = platform.operating_system("default_os")
|
||||
current_os = platform.default_operating_system()
|
||||
name, version = current_os.name, current_os.version
|
||||
LinuxOS = collections.namedtuple("LinuxOS", ["name", "version"])
|
||||
return LinuxOS(name=name, version=version)
|
||||
@@ -680,7 +680,6 @@ def load_json():
|
||||
def mock_targets(mock_uarch_configuration, monkeypatch):
|
||||
"""Use this fixture to enable mock uarch targets for testing."""
|
||||
targets_json, targets = mock_uarch_configuration
|
||||
|
||||
monkeypatch.setattr(archspec.cpu.schema, "TARGETS_JSON", targets_json)
|
||||
monkeypatch.setattr(archspec.cpu.microarchitecture, "TARGETS", targets)
|
||||
|
||||
@@ -2172,3 +2171,8 @@ def getcode(self):
|
||||
|
||||
def info(self):
|
||||
return self.headers
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_runtimes(config, mock_packages):
|
||||
return mock_packages.packages_with_tags("runtime")
|
||||
|
@@ -13,6 +13,8 @@
|
||||
|
||||
import pytest
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
import spack.cmd.external
|
||||
@@ -98,11 +100,8 @@ def spec_json(self):
|
||||
|
||||
@pytest.fixture
|
||||
def _common_arch(test_platform):
|
||||
return JsonArchEntry(
|
||||
platform=test_platform.name,
|
||||
os=test_platform.front_os,
|
||||
target=test_platform.target("fe").name,
|
||||
)
|
||||
generic = archspec.cpu.TARGETS[test_platform.default].family
|
||||
return JsonArchEntry(platform=test_platform.name, os="redhat6", target=generic.name)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@@ -206,7 +206,7 @@ def test_repo(_create_test_repo, monkeypatch, mock_stage):
|
||||
)
|
||||
def test_redistribute_directive(test_repo, spec_str, distribute_src, distribute_bin):
|
||||
spec = spack.spec.Spec(spec_str)
|
||||
assert spec.package_class.redistribute_source(spec) == distribute_src
|
||||
assert spack.repo.PATH.get_pkg_class(spec.fullname).redistribute_source(spec) == distribute_src
|
||||
concretized_spec = spack.concretize.concretize_one(spec)
|
||||
assert concretized_spec.package.redistribute_binary == distribute_bin
|
||||
|
||||
|
@@ -198,17 +198,6 @@ def test_installed_dependency_request_conflicts(install_mockery, mock_fetch, mut
|
||||
spack.concretize.concretize_one(dependent)
|
||||
|
||||
|
||||
def test_install_dependency_symlinks_pkg(install_mockery, mock_fetch, mutable_mock_repo):
|
||||
"""Test dependency flattening/symlinks mock package."""
|
||||
spec = spack.concretize.concretize_one("flatten-deps")
|
||||
pkg = spec.package
|
||||
PackageInstaller([pkg], explicit=True).install()
|
||||
|
||||
# Ensure dependency directory exists after the installation.
|
||||
dependency_dir = os.path.join(pkg.prefix, "dependency-install")
|
||||
assert os.path.isdir(dependency_dir)
|
||||
|
||||
|
||||
def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
|
||||
"""Test install times added."""
|
||||
spec = spack.concretize.concretize_one("dev-build-test-install-phases")
|
||||
@@ -228,26 +217,6 @@ def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
|
||||
assert all(isinstance(x["seconds"], float) for x in times["phases"])
|
||||
|
||||
|
||||
def test_flatten_deps(install_mockery, mock_fetch, mutable_mock_repo):
|
||||
"""Explicitly test the flattening code for coverage purposes."""
|
||||
# Unfortunately, executing the 'flatten-deps' spec's installation does
|
||||
# not affect code coverage results, so be explicit here.
|
||||
spec = spack.concretize.concretize_one("dependent-install")
|
||||
pkg = spec.package
|
||||
PackageInstaller([pkg], explicit=True).install()
|
||||
|
||||
# Demonstrate that the directory does not appear under the spec
|
||||
# prior to the flatten operation.
|
||||
dependency_name = "dependency-install"
|
||||
assert dependency_name not in os.listdir(pkg.prefix)
|
||||
|
||||
# Flatten the dependencies and ensure the dependency directory is there.
|
||||
spack.package_base.flatten_dependencies(spec, pkg.prefix)
|
||||
|
||||
dependency_dir = os.path.join(pkg.prefix, dependency_name)
|
||||
assert os.path.isdir(dependency_dir)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def install_upstream(tmpdir_factory, gen_mock_layout, install_mockery):
|
||||
"""Provides a function that installs a specified set of specs to an
|
||||
@@ -489,7 +458,7 @@ def test_log_install_without_build_files(install_mockery):
|
||||
spec = spack.concretize.concretize_one("trivial-install-test-package")
|
||||
|
||||
# Attempt installing log without the build log file
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
with pytest.raises(OSError, match="No such file or directory"):
|
||||
spack.installer.log(spec.package)
|
||||
|
||||
|
||||
|
@@ -470,7 +470,7 @@ def _repoerr(repo, name):
|
||||
|
||||
# The call to install_tree will raise the exception since not mocking
|
||||
# creation of dependency package files within *install* directories.
|
||||
with pytest.raises(IOError, match=path if sys.platform != "win32" else ""):
|
||||
with pytest.raises(OSError, match=path if sys.platform != "win32" else ""):
|
||||
inst.dump_packages(spec, path)
|
||||
|
||||
# Now try the error path, which requires the mock directory structure
|
||||
|
@@ -82,7 +82,7 @@ def test_non_existing_src(self, stage):
|
||||
"""Test using a non-existing source."""
|
||||
|
||||
with fs.working_dir(str(stage)):
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
with pytest.raises(OSError, match="No such file or directory"):
|
||||
fs.copy("source/none", "dest")
|
||||
|
||||
def test_multiple_src_file_dest(self, stage):
|
||||
@@ -139,7 +139,7 @@ def test_non_existing_src(self, stage):
|
||||
"""Test using a non-existing source."""
|
||||
|
||||
with fs.working_dir(str(stage)):
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
with pytest.raises(OSError, match="No such file or directory"):
|
||||
fs.install("source/none", "dest")
|
||||
|
||||
def test_multiple_src_file_dest(self, stage):
|
||||
@@ -220,7 +220,7 @@ def test_non_existing_src(self, stage):
|
||||
"""Test using a non-existing source."""
|
||||
|
||||
with fs.working_dir(str(stage)):
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
with pytest.raises(OSError, match="No such file or directory"):
|
||||
fs.copy_tree("source/none", "dest")
|
||||
|
||||
def test_parent_dir(self, stage):
|
||||
@@ -301,7 +301,7 @@ def test_non_existing_src(self, stage):
|
||||
"""Test using a non-existing source."""
|
||||
|
||||
with fs.working_dir(str(stage)):
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
with pytest.raises(OSError, match="No such file or directory"):
|
||||
fs.install_tree("source/none", "dest")
|
||||
|
||||
def test_parent_dir(self, stage):
|
||||
|
@@ -93,28 +93,26 @@
|
||||
pass
|
||||
|
||||
|
||||
"""This is a list of filesystem locations to test locks in. Paths are
|
||||
expanded so that %u is replaced with the current username. '~' is also
|
||||
legal and will be expanded to the user's home directory.
|
||||
|
||||
Tests are skipped for directories that don't exist, so you'll need to
|
||||
update this with the locations of NFS, Lustre, and other mounts on your
|
||||
system.
|
||||
"""
|
||||
#: This is a list of filesystem locations to test locks in. Paths are
|
||||
#: expanded so that %u is replaced with the current username. '~' is also
|
||||
#: legal and will be expanded to the user's home directory.
|
||||
#:
|
||||
#: Tests are skipped for directories that don't exist, so you'll need to
|
||||
#: update this with the locations of NFS, Lustre, and other mounts on your
|
||||
#: system.
|
||||
locations = [
|
||||
tempfile.gettempdir(),
|
||||
os.path.join("/nfs/tmp2/", getpass.getuser()),
|
||||
os.path.join("/p/lscratch*/", getpass.getuser()),
|
||||
]
|
||||
|
||||
"""This is the longest a failed multiproc test will take.
|
||||
Barriers will time out and raise an exception after this interval.
|
||||
In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
|
||||
"""
|
||||
#: This is the longest a failed multiproc test will take.
|
||||
#: Barriers will time out and raise an exception after this interval.
|
||||
#: In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
|
||||
barrier_timeout = 5
|
||||
|
||||
"""This is the lock timeout for expected failures.
|
||||
This may need to be higher for some filesystems."""
|
||||
#: This is the lock timeout for expected failures.
|
||||
#: This may need to be higher for some filesystems.
|
||||
lock_fail_timeout = 0.1
|
||||
|
||||
|
||||
@@ -286,9 +284,8 @@ def wait(self):
|
||||
comm.Barrier() # barrier after each MPI test.
|
||||
|
||||
|
||||
"""``multiproc_test()`` should be called by tests below.
|
||||
``multiproc_test()`` will work for either MPI runs or for local runs.
|
||||
"""
|
||||
#: ``multiproc_test()`` should be called by tests below.
|
||||
#: ``multiproc_test()`` will work for either MPI runs or for local runs.
|
||||
multiproc_test = mpi_multiproc_test if mpi else local_multiproc_test
|
||||
|
||||
|
||||
@@ -1339,7 +1336,7 @@ def test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):
|
||||
"""Test poll lock exception handling."""
|
||||
|
||||
def _lockf(fd, cmd, len, start, whence):
|
||||
raise IOError(err_num, err_msg)
|
||||
raise OSError(err_num, err_msg)
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
lockfile = "lockfile"
|
||||
@@ -1351,7 +1348,7 @@ def _lockf(fd, cmd, len, start, whence):
|
||||
if err_num in [errno.EAGAIN, errno.EACCES]:
|
||||
assert not lock._poll_lock(fcntl.LOCK_EX)
|
||||
else:
|
||||
with pytest.raises(IOError, match=err_msg):
|
||||
with pytest.raises(OSError, match=err_msg):
|
||||
lock._poll_lock(fcntl.LOCK_EX)
|
||||
|
||||
monkeypatch.undo()
|
||||
|
@@ -238,10 +238,7 @@ def test_exclude(self, modulefile_content, module_configuration, host_architectu
|
||||
|
||||
assert len([x for x in content if "module load " in x]) == 1
|
||||
|
||||
# Catch "Exception" to avoid using FileNotFoundError on Python 3
|
||||
# and IOError on Python 2 or common bases like EnvironmentError
|
||||
# which are not officially documented
|
||||
with pytest.raises(Exception):
|
||||
with pytest.raises(FileNotFoundError):
|
||||
modulefile_content(f"callpath target={host_architecture_str}")
|
||||
|
||||
content = modulefile_content(f"zmpi target={host_architecture_str}")
|
||||
|
@@ -1,7 +1,6 @@
|
||||
# Copyright Spack Project Developers. See COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Test class methods on Package objects.
|
||||
|
||||
This doesn't include methods on package *instances* (like do_patch(),
|
||||
@@ -16,6 +15,7 @@
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.compilers
|
||||
import spack.concretize
|
||||
import spack.deptypes as dt
|
||||
@@ -23,15 +23,11 @@
|
||||
import spack.install_test
|
||||
import spack.package
|
||||
import spack.package_base
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
from spack.build_systems.generic import Package
|
||||
from spack.error import InstallError
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def mpi_names(mock_repo_path):
|
||||
return [spec.name for spec in mock_repo_path.providers_for("mpi")]
|
||||
from spack.solver.input_analysis import NoStaticAnalysis, StaticAnalysis
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -53,78 +49,94 @@ def mpileaks_possible_deps(mock_packages, mpi_names):
|
||||
return possible
|
||||
|
||||
|
||||
def test_possible_dependencies(mock_packages, mpileaks_possible_deps):
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class("mpileaks")
|
||||
expanded_possible_deps = pkg_cls.possible_dependencies(expand_virtuals=True)
|
||||
assert mpileaks_possible_deps == expanded_possible_deps
|
||||
assert {
|
||||
"callpath": {"dyninst", "mpi"},
|
||||
"dyninst": {"libdwarf", "libelf"},
|
||||
"libdwarf": {"libelf"},
|
||||
"libelf": set(),
|
||||
"mpi": set(),
|
||||
"mpileaks": {"callpath", "mpi"},
|
||||
} == pkg_cls.possible_dependencies(expand_virtuals=False)
|
||||
|
||||
|
||||
def test_possible_direct_dependencies(mock_packages, mpileaks_possible_deps):
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class("mpileaks")
|
||||
deps = pkg_cls.possible_dependencies(transitive=False, expand_virtuals=False)
|
||||
assert {"callpath": set(), "mpi": set(), "mpileaks": {"callpath", "mpi"}} == deps
|
||||
|
||||
|
||||
def test_possible_dependencies_virtual(mock_packages, mpi_names):
|
||||
expected = dict(
|
||||
(name, set(dep for dep in spack.repo.PATH.get_pkg_class(name).dependencies_by_name()))
|
||||
for name in mpi_names
|
||||
)
|
||||
|
||||
# only one mock MPI has a dependency
|
||||
expected["fake"] = set()
|
||||
|
||||
assert expected == spack.package_base.possible_dependencies("mpi", transitive=False)
|
||||
|
||||
|
||||
def test_possible_dependencies_missing(mock_packages):
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class("missing-dependency")
|
||||
missing = {}
|
||||
pkg_cls.possible_dependencies(transitive=True, missing=missing)
|
||||
assert {"this-is-a-missing-dependency"} == missing["missing-dependency"]
|
||||
|
||||
|
||||
def test_possible_dependencies_with_deptypes(mock_packages):
|
||||
dtbuild1 = spack.repo.PATH.get_pkg_class("dtbuild1")
|
||||
|
||||
assert {
|
||||
"dtbuild1": {"dtrun2", "dtlink2"},
|
||||
"dtlink2": set(),
|
||||
"dtrun2": set(),
|
||||
} == dtbuild1.possible_dependencies(depflag=dt.LINK | dt.RUN)
|
||||
|
||||
assert {
|
||||
"dtbuild1": {"dtbuild2", "dtlink2"},
|
||||
"dtbuild2": set(),
|
||||
"dtlink2": set(),
|
||||
} == dtbuild1.possible_dependencies(depflag=dt.BUILD)
|
||||
|
||||
assert {"dtbuild1": {"dtlink2"}, "dtlink2": set()} == dtbuild1.possible_dependencies(
|
||||
depflag=dt.LINK
|
||||
@pytest.fixture(params=[NoStaticAnalysis, StaticAnalysis])
|
||||
def mock_inspector(config, mock_packages, request):
|
||||
inspector_cls = request.param
|
||||
if inspector_cls is NoStaticAnalysis:
|
||||
return inspector_cls(configuration=config, repo=mock_packages)
|
||||
return inspector_cls(
|
||||
configuration=config,
|
||||
repo=mock_packages,
|
||||
store=spack.store.STORE,
|
||||
binary_index=spack.binary_distribution.BINARY_INDEX,
|
||||
)
|
||||
|
||||
|
||||
def test_possible_dependencies_with_multiple_classes(mock_packages, mpileaks_possible_deps):
|
||||
@pytest.fixture
|
||||
def mpi_names(mock_inspector):
|
||||
return [spec.name for spec in mock_inspector.providers_for("mpi")]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"pkg_name,fn_kwargs,expected",
|
||||
[
|
||||
(
|
||||
"mpileaks",
|
||||
{"expand_virtuals": True, "allowed_deps": dt.ALL},
|
||||
{
|
||||
"fake",
|
||||
"mpileaks",
|
||||
"multi-provider-mpi",
|
||||
"callpath",
|
||||
"dyninst",
|
||||
"mpich2",
|
||||
"libdwarf",
|
||||
"zmpi",
|
||||
"low-priority-provider",
|
||||
"intel-parallel-studio",
|
||||
"mpich",
|
||||
"libelf",
|
||||
},
|
||||
),
|
||||
(
|
||||
"mpileaks",
|
||||
{"expand_virtuals": False, "allowed_deps": dt.ALL},
|
||||
{"callpath", "dyninst", "libdwarf", "libelf", "mpileaks"},
|
||||
),
|
||||
(
|
||||
"mpileaks",
|
||||
{"expand_virtuals": False, "allowed_deps": dt.ALL, "transitive": False},
|
||||
{"callpath", "mpileaks"},
|
||||
),
|
||||
("dtbuild1", {"allowed_deps": dt.LINK | dt.RUN}, {"dtbuild1", "dtrun2", "dtlink2"}),
|
||||
("dtbuild1", {"allowed_deps": dt.BUILD}, {"dtbuild1", "dtbuild2", "dtlink2"}),
|
||||
("dtbuild1", {"allowed_deps": dt.LINK}, {"dtbuild1", "dtlink2"}),
|
||||
],
|
||||
)
|
||||
def test_possible_dependencies(pkg_name, fn_kwargs, expected, mock_runtimes, mock_inspector):
|
||||
"""Tests possible nodes of mpileaks, under different scenarios."""
|
||||
expected.update(mock_runtimes)
|
||||
result, *_ = mock_inspector.possible_dependencies(pkg_name, **fn_kwargs)
|
||||
assert expected == result
|
||||
|
||||
|
||||
def test_possible_dependencies_virtual(mock_inspector, mock_packages, mock_runtimes, mpi_names):
|
||||
expected = set(mpi_names)
|
||||
for name in mpi_names:
|
||||
expected.update(dep for dep in mock_packages.get_pkg_class(name).dependencies_by_name())
|
||||
expected.update(mock_runtimes)
|
||||
|
||||
real_pkgs, *_ = mock_inspector.possible_dependencies(
|
||||
"mpi", transitive=False, allowed_deps=dt.ALL
|
||||
)
|
||||
assert expected == real_pkgs
|
||||
|
||||
|
||||
def test_possible_dependencies_missing(mock_inspector):
|
||||
result, *_ = mock_inspector.possible_dependencies("missing-dependency", allowed_deps=dt.ALL)
|
||||
assert "this-is-a-missing-dependency" not in result
|
||||
|
||||
|
||||
def test_possible_dependencies_with_multiple_classes(
|
||||
mock_inspector, mock_packages, mpileaks_possible_deps
|
||||
):
|
||||
pkgs = ["dt-diamond", "mpileaks"]
|
||||
expected = mpileaks_possible_deps.copy()
|
||||
expected.update(
|
||||
{
|
||||
"dt-diamond": set(["dt-diamond-left", "dt-diamond-right"]),
|
||||
"dt-diamond-left": set(["dt-diamond-bottom"]),
|
||||
"dt-diamond-right": set(["dt-diamond-bottom"]),
|
||||
"dt-diamond-bottom": set(),
|
||||
}
|
||||
)
|
||||
expected = set(mpileaks_possible_deps)
|
||||
expected.update({"dt-diamond", "dt-diamond-left", "dt-diamond-right", "dt-diamond-bottom"})
|
||||
expected.update(mock_packages.packages_with_tags("runtime"))
|
||||
|
||||
assert expected == spack.package_base.possible_dependencies(*pkgs)
|
||||
real_pkgs, *_ = mock_inspector.possible_dependencies(*pkgs, allowed_deps=dt.ALL)
|
||||
assert set(expected) == real_pkgs
|
||||
|
||||
|
||||
def setup_install_test(source_paths, test_root):
|
||||
|
@@ -132,7 +132,8 @@ def test_reporters_extract_skipped(state):
|
||||
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
|
||||
|
||||
assert len(parts) == 1
|
||||
parts[0]["completed"] == expected
|
||||
|
||||
assert parts[0]["completed"] == spack.reporters.extract.completed["skipped"]
|
||||
|
||||
|
||||
def test_reporters_skip_new():
|
||||
|
@@ -198,7 +198,7 @@ def script_dir(sbang_line):
|
||||
],
|
||||
)
|
||||
def test_shebang_interpreter_regex(shebang, interpreter):
|
||||
sbang.get_interpreter(shebang) == interpreter
|
||||
assert sbang.get_interpreter(shebang) == interpreter
|
||||
|
||||
|
||||
def test_shebang_handling(script_dir, sbang_line):
|
||||
|
@@ -319,7 +319,6 @@ def test_dependents_and_dependencies_are_correct(self):
|
||||
("mpich@1.0", "mpileaks ^mpich@2.0"),
|
||||
("mpich%gcc", "mpileaks ^mpich%intel"),
|
||||
("mpich%gcc@4.6", "mpileaks ^mpich%gcc@4.5"),
|
||||
("mpich platform=test target=be", "mpileaks ^mpich platform=test target=fe"),
|
||||
],
|
||||
)
|
||||
def test_unsatisfiable_cases(self, set_dependency, constraint_str, spec_str):
|
||||
@@ -429,31 +428,29 @@ def test_copy_through_spec_build_interface(self):
|
||||
c2 = s["mpileaks"]["mpileaks"].copy()
|
||||
assert c0 == c1 == c2 == s
|
||||
|
||||
"""
|
||||
Here is the graph with deptypes labeled (assume all packages have a 'dt'
|
||||
prefix). Arrows are marked with the deptypes ('b' for 'build', 'l' for
|
||||
'link', 'r' for 'run').
|
||||
# Here is the graph with deptypes labeled (assume all packages have a 'dt'
|
||||
# prefix). Arrows are marked with the deptypes ('b' for 'build', 'l' for
|
||||
# 'link', 'r' for 'run').
|
||||
|
||||
use -bl-> top
|
||||
# use -bl-> top
|
||||
|
||||
top -b-> build1
|
||||
top -bl-> link1
|
||||
top -r-> run1
|
||||
# top -b-> build1
|
||||
# top -bl-> link1
|
||||
# top -r-> run1
|
||||
|
||||
build1 -b-> build2
|
||||
build1 -bl-> link2
|
||||
build1 -r-> run2
|
||||
# build1 -b-> build2
|
||||
# build1 -bl-> link2
|
||||
# build1 -r-> run2
|
||||
|
||||
link1 -bl-> link3
|
||||
# link1 -bl-> link3
|
||||
|
||||
run1 -bl-> link5
|
||||
run1 -r-> run3
|
||||
# run1 -bl-> link5
|
||||
# run1 -r-> run3
|
||||
|
||||
link3 -b-> build2
|
||||
link3 -bl-> link4
|
||||
# link3 -b-> build2
|
||||
# link3 -bl-> link4
|
||||
|
||||
run3 -b-> build3
|
||||
"""
|
||||
# run3 -b-> build3
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,deptypes,expected",
|
||||
|
@@ -460,7 +460,6 @@ def test_concrete_specs_which_satisfies_abstract(self, lhs, rhs, default_mock_co
|
||||
("foo platform=linux", "platform=test os=redhat6 target=x86"),
|
||||
("foo os=redhat6", "platform=test os=debian6 target=x86_64"),
|
||||
("foo target=x86_64", "platform=test os=redhat6 target=x86"),
|
||||
("foo arch=test-frontend-frontend", "platform=test os=frontend target=backend"),
|
||||
("foo%intel", "%gcc"),
|
||||
("foo%intel", "%gcc"),
|
||||
("foo%gcc@4.3", "%gcc@4.4:4.6"),
|
||||
@@ -487,7 +486,6 @@ def test_concrete_specs_which_satisfies_abstract(self, lhs, rhs, default_mock_co
|
||||
("libelf+debug", "libelf~debug"),
|
||||
("libelf+debug~foo", "libelf+debug+foo"),
|
||||
("libelf debug=True", "libelf debug=False"),
|
||||
("libelf platform=test target=be os=be", "libelf target=fe os=fe"),
|
||||
("namespace=builtin.mock", "namespace=builtin"),
|
||||
],
|
||||
)
|
||||
@@ -1991,3 +1989,26 @@ def test_equality_discriminate_on_propagation(lhs, rhs):
|
||||
|
||||
def test_comparison_multivalued_variants():
|
||||
assert Spec("x=a") < Spec("x=a,b") < Spec("x==a,b") < Spec("x==a,b,c")
|
||||
|
||||
|
||||
def test_comparison_after_breaking_hash_change():
|
||||
# We simulate a breaking change in DAG hash computation in Spack. We have two specs that are
|
||||
# entirely equal modulo DAG hash. When deserializing these specs, we don't want them to compare
|
||||
# as equal, because DAG hash is used throughout in Spack to distinguish between specs
|
||||
# (e.g. database, build caches, install dir).
|
||||
s = Spec("example@=1.0")
|
||||
s._mark_concrete(True)
|
||||
|
||||
# compute the dag hash and a change to it
|
||||
dag_hash = s.dag_hash()
|
||||
new_dag_hash = f"{'b' if dag_hash[0] == 'a' else 'a'}{dag_hash[1:]}"
|
||||
|
||||
before_breakage = s.to_dict()
|
||||
after_breakage = s.to_dict()
|
||||
after_breakage["spec"]["nodes"][0]["hash"] = new_dag_hash
|
||||
assert before_breakage != after_breakage
|
||||
|
||||
x = Spec.from_dict(before_breakage)
|
||||
y = Spec.from_dict(after_breakage)
|
||||
assert x != y
|
||||
assert len({x, y}) == 2
|
||||
|
@@ -306,7 +306,7 @@ def _specfile_for(spec_str, filename):
|
||||
(
|
||||
r"os=fe", # Various translations associated with the architecture
|
||||
[Token(SpecTokens.KEY_VALUE_PAIR, value="os=fe")],
|
||||
"arch=test-redhat6-None",
|
||||
"arch=test-debian6-None",
|
||||
),
|
||||
(
|
||||
r"os=default_os",
|
||||
@@ -999,14 +999,14 @@ def test_disambiguate_hash_by_spec(spec1, spec2, constraint, mock_packages, monk
|
||||
("x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64", "two architectures"),
|
||||
("y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64", "two architectures"),
|
||||
("y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le", "two architectures"),
|
||||
("x os=fe os=fe", "'os'"),
|
||||
("x os=fe os=be", "'os'"),
|
||||
("x target=fe target=fe", "'target'"),
|
||||
("x target=fe target=be", "'target'"),
|
||||
("x os=redhat6 os=debian6", "'os'"),
|
||||
("x os=debian6 os=redhat6", "'os'"),
|
||||
("x target=core2 target=x86_64", "'target'"),
|
||||
("x target=x86_64 target=core2", "'target'"),
|
||||
("x platform=test platform=test", "'platform'"),
|
||||
# TODO: these two seem wrong: need to change how arch is initialized (should fail on os)
|
||||
("x os=fe platform=test target=fe os=fe", "'platform'"),
|
||||
("x target=be platform=test os=be os=fe", "'platform'"),
|
||||
("x os=debian6 platform=test target=default_target os=redhat6", "two architectures"),
|
||||
("x target=default_target platform=test os=redhat6 os=debian6", "'platform'"),
|
||||
# Dependencies
|
||||
("^[@foo] zlib", "edge attributes"),
|
||||
("x ^[deptypes=link]foo ^[deptypes=run]foo", "conflicting dependency types"),
|
||||
|
@@ -125,7 +125,7 @@ def check_expand_archive(stage, stage_name, expected_file_list):
|
||||
|
||||
assert os.path.isfile(fn)
|
||||
with open(fn, encoding="utf-8") as _file:
|
||||
_file.read() == contents
|
||||
assert _file.read() == contents
|
||||
|
||||
|
||||
def check_fetch(stage, stage_name):
|
||||
|
@@ -20,12 +20,7 @@
|
||||
|
||||
datadir = os.path.join(spack_root, "lib", "spack", "spack", "test", "data", "compression")
|
||||
|
||||
ext_archive = {}
|
||||
[
|
||||
ext_archive.update({ext: ".".join(["Foo", ext])})
|
||||
for ext in llnl.url.ALLOWED_ARCHIVE_TYPES
|
||||
if "TAR" not in ext
|
||||
]
|
||||
ext_archive = {ext: f"Foo.{ext}" for ext in llnl.url.ALLOWED_ARCHIVE_TYPES if "TAR" not in ext}
|
||||
# Spack does not use Python native handling for tarballs or zip
|
||||
# Don't test tarballs or zip in native test
|
||||
native_archive_list = [
|
||||
|
@@ -204,13 +204,13 @@ def test_no_editor():
|
||||
def assert_exec(exe, args):
|
||||
assert False
|
||||
|
||||
with pytest.raises(EnvironmentError, match=r"No text editor found.*"):
|
||||
with pytest.raises(OSError, match=r"No text editor found.*"):
|
||||
ed.editor("/path/to/file", exec_fn=assert_exec)
|
||||
|
||||
def assert_exec(exe, args):
|
||||
return False
|
||||
|
||||
with pytest.raises(EnvironmentError, match=r"No text editor found.*"):
|
||||
with pytest.raises(OSError, match=r"No text editor found.*"):
|
||||
ed.editor("/path/to/file", exec_fn=assert_exec)
|
||||
|
||||
|
||||
@@ -220,5 +220,5 @@ def test_exec_fn_executable(editor_var, good_exe, bad_exe):
|
||||
assert ed.editor(exec_fn=ed.executable)
|
||||
|
||||
os.environ[editor_var] = bad_exe
|
||||
with pytest.raises(EnvironmentError, match=r"No text editor found.*"):
|
||||
with pytest.raises(OSError, match=r"No text editor found.*"):
|
||||
ed.editor(exec_fn=ed.executable)
|
||||
|
@@ -201,3 +201,15 @@ def test_drop_redundant_rpath(tmpdir, binary_with_rpaths):
|
||||
new_rpaths = elf.get_rpaths(binary)
|
||||
assert set(existing_dirs).issubset(new_rpaths)
|
||||
assert set(non_existing_dirs).isdisjoint(new_rpaths)
|
||||
|
||||
|
||||
def test_elf_invalid_e_shnum(tmp_path):
|
||||
# from llvm/test/Object/Inputs/invalid-e_shnum.elf
|
||||
path = tmp_path / "invalid-e_shnum.elf"
|
||||
with open(path, "wb") as file:
|
||||
file.write(
|
||||
b"\x7fELF\x02\x010000000000\x03\x00>\x0000000000000000000000"
|
||||
b"\x00\x00\x00\x00\x00\x00\x00\x000000000000@\x000000"
|
||||
)
|
||||
with open(path, "rb") as file, pytest.raises(elf.ElfParsingError):
|
||||
elf.parse_elf(file)
|
||||
|
@@ -9,7 +9,7 @@
|
||||
defined by the EDITOR environment variable if VISUAL is not set or the
|
||||
specified editor fails (e.g. no DISPLAY for a graphical editor). If
|
||||
neither variable is set, we fall back to one of several common editors,
|
||||
raising an EnvironmentError if we are unable to find one.
|
||||
raising an OSError if we are unable to find one.
|
||||
"""
|
||||
import os
|
||||
import shlex
|
||||
@@ -141,7 +141,7 @@ def try_env_var(var):
|
||||
return True
|
||||
|
||||
# Fail if nothing could be found
|
||||
raise EnvironmentError(
|
||||
raise OSError(
|
||||
"No text editor found! Please set the VISUAL and/or EDITOR "
|
||||
"environment variable(s) to your preferred text editor."
|
||||
)
|
||||
|
@@ -195,7 +195,10 @@ def parse_program_headers(f: BinaryIO, elf: ElfFile) -> None:
|
||||
elf: ELF file parser data
|
||||
"""
|
||||
# Forward to the program header
|
||||
f.seek(elf.elf_hdr.e_phoff)
|
||||
try:
|
||||
f.seek(elf.elf_hdr.e_phoff)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to program header")
|
||||
|
||||
# Here we have to make a mapping from virtual address to offset in the file.
|
||||
ph_fmt = elf.byte_order + ("LLQQQQQQ" if elf.is_64_bit else "LLLLLLLL")
|
||||
@@ -245,7 +248,10 @@ def parse_pt_interp(f: BinaryIO, elf: ElfFile) -> None:
|
||||
f: file handle
|
||||
elf: ELF file parser data
|
||||
"""
|
||||
f.seek(elf.pt_interp_p_offset)
|
||||
try:
|
||||
f.seek(elf.pt_interp_p_offset)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to PT_INTERP entry")
|
||||
data = read_exactly(f, elf.pt_interp_p_filesz, "Malformed PT_INTERP entry")
|
||||
elf.pt_interp_str = parse_c_string(data)
|
||||
|
||||
@@ -264,7 +270,10 @@ def find_strtab_size_at_offset(f: BinaryIO, elf: ElfFile, offset: int) -> int:
|
||||
"""
|
||||
section_hdr_fmt = elf.byte_order + ("LLQQQQLLQQ" if elf.is_64_bit else "LLLLLLLLLL")
|
||||
section_hdr_size = calcsize(section_hdr_fmt)
|
||||
f.seek(elf.elf_hdr.e_shoff)
|
||||
try:
|
||||
f.seek(elf.elf_hdr.e_shoff)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to section header table")
|
||||
for _ in range(elf.elf_hdr.e_shnum):
|
||||
data = read_exactly(f, section_hdr_size, "Malformed section header")
|
||||
sh = SectionHeader(*unpack(section_hdr_fmt, data))
|
||||
@@ -286,7 +295,10 @@ def retrieve_strtab(f: BinaryIO, elf: ElfFile, offset: int) -> bytes:
|
||||
Returns: file offset
|
||||
"""
|
||||
size = find_strtab_size_at_offset(f, elf, offset)
|
||||
f.seek(offset)
|
||||
try:
|
||||
f.seek(offset)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to string table")
|
||||
return read_exactly(f, size, "Could not read string table")
|
||||
|
||||
|
||||
@@ -319,7 +331,10 @@ def parse_pt_dynamic(f: BinaryIO, elf: ElfFile) -> None:
|
||||
count_runpath = 0
|
||||
count_strtab = 0
|
||||
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
try:
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to PT_DYNAMIC entry")
|
||||
|
||||
# In case of broken ELF files, don't read beyond the advertized size.
|
||||
for _ in range(elf.pt_dynamic_p_filesz // dynamic_array_size):
|
||||
@@ -478,7 +493,10 @@ def get_interpreter(path: str) -> Optional[str]:
|
||||
def _delete_dynamic_array_entry(
|
||||
f: BinaryIO, elf: ElfFile, should_delete: Callable[[int, int], bool]
|
||||
) -> None:
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
try:
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
except OSError:
|
||||
raise ElfParsingError("Could not seek to PT_DYNAMIC entry")
|
||||
dynamic_array_fmt = elf.byte_order + ("qQ" if elf.is_64_bit else "lL")
|
||||
dynamic_array_size = calcsize(dynamic_array_fmt)
|
||||
new_offset = elf.pt_dynamic_p_offset # points to the new dynamic array
|
||||
|
@@ -46,7 +46,7 @@ def _process_ld_so_conf_queue(queue):
|
||||
try:
|
||||
with open(p, "rb") as f:
|
||||
lines = f.readlines()
|
||||
except (IOError, OSError):
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
for line in lines:
|
||||
@@ -132,7 +132,7 @@ def host_dynamic_linker_search_paths():
|
||||
|
||||
if os.path.exists(possible_conf):
|
||||
conf_file = possible_conf
|
||||
except (IOError, OSError, elf_utils.ElfParsingError):
|
||||
except (OSError, elf_utils.ElfParsingError):
|
||||
pass
|
||||
|
||||
# Note: ld_so_conf doesn't error if the file does not exist.
|
||||
|
@@ -8,7 +8,6 @@
|
||||
import spack.directives_meta
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.package_base
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.hash
|
||||
@@ -61,10 +60,18 @@ class RemoveDirectives(ast.NodeTransformer):
|
||||
"""
|
||||
|
||||
def __init__(self, spec):
|
||||
# list of URL attributes and metadata attributes
|
||||
# these will be removed from packages.
|
||||
self.metadata_attrs = [s.url_attr for s in spack.fetch_strategy.all_strategies]
|
||||
self.metadata_attrs += spack.package_base.PackageBase.metadata_attrs
|
||||
#: List of attributes to be excluded from a package's hash.
|
||||
self.metadata_attrs = [s.url_attr for s in spack.fetch_strategy.all_strategies] + [
|
||||
"homepage",
|
||||
"url",
|
||||
"urls",
|
||||
"list_url",
|
||||
"extendable",
|
||||
"parallel",
|
||||
"make_jobs",
|
||||
"maintainers",
|
||||
"tags",
|
||||
]
|
||||
|
||||
self.spec = spec
|
||||
self.in_classdef = False # used to avoid nested classdefs
|
||||
|
@@ -29,8 +29,8 @@ def architecture():
|
||||
import spack.spec
|
||||
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system("default_os")
|
||||
host_target = host_platform.target("default_target")
|
||||
host_os = host_platform.default_operating_system()
|
||||
host_target = host_platform.default_target()
|
||||
|
||||
return spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
|
||||
|
@@ -436,8 +436,8 @@ def _dump_annotated(handler, data, stream=None):
|
||||
width = max(clen(a) for a in _ANNOTATIONS)
|
||||
formats = ["%%-%ds %%s\n" % (width + cextra(a)) for a in _ANNOTATIONS]
|
||||
|
||||
for f, a, l in zip(formats, _ANNOTATIONS, lines):
|
||||
stream.write(f % (a, l))
|
||||
for fmt, annotation, line in zip(formats, _ANNOTATIONS, lines):
|
||||
stream.write(fmt % (annotation, line))
|
||||
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
@@ -1,6 +1,7 @@
|
||||
[project]
|
||||
name="spack"
|
||||
description="The spack package manager"
|
||||
requires-python=">=3.6"
|
||||
dependencies=[
|
||||
"clingo",
|
||||
"setuptools",
|
||||
@@ -67,9 +68,42 @@ features = [
|
||||
"ci",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 99
|
||||
extend-include = ["bin/spack"]
|
||||
extend-exclude = ["lib/spack/external", "*.pyi"]
|
||||
|
||||
[tool.ruff.format]
|
||||
skip-magic-trailing-comma = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"]
|
||||
ignore = ["E731", "E203"]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
split-on-trailing-comma = false
|
||||
section-order = [
|
||||
"future",
|
||||
"standard-library",
|
||||
"third-party",
|
||||
"archspec",
|
||||
"llnl",
|
||||
"spack",
|
||||
"first-party",
|
||||
"local-folder",
|
||||
]
|
||||
|
||||
[tool.ruff.lint.isort.sections]
|
||||
spack = ["spack"]
|
||||
archspec = ["archspec"]
|
||||
llnl = ["llnl"]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"var/spack/repos/*/package.py" = ["F403", "F405", "F811", "F821"]
|
||||
"*-ci-package.py" = ["F403", "F405", "F821"]
|
||||
|
||||
[tool.black]
|
||||
line-length = 99
|
||||
target-version = ['py36', 'py37', 'py38', 'py39', 'py310']
|
||||
include = '(lib/spack|var/spack/repos)/.*\.pyi?$|bin/spack$'
|
||||
extend-exclude = 'lib/spack/external'
|
||||
skip_magic_trailing_comma = true
|
||||
|
@@ -59,11 +59,6 @@ default:
|
||||
SPACK_TARGET_PLATFORM: "linux"
|
||||
SPACK_TARGET_ARCH: "aarch64"
|
||||
|
||||
.linux_power:
|
||||
variables:
|
||||
SPACK_TARGET_PLATFORM: "linux"
|
||||
SPACK_TARGET_ARCH: "ppc64le"
|
||||
|
||||
.win64-msvc2019:
|
||||
variables:
|
||||
SPACK_TARGET_PLATFORM: "win64"
|
||||
@@ -352,35 +347,6 @@ e4s-oneapi-build:
|
||||
- artifacts: True
|
||||
job: e4s-oneapi-generate
|
||||
|
||||
########################################
|
||||
# E4S on Power
|
||||
########################################
|
||||
.e4s-power-generate-tags-and-image:
|
||||
image: { "name": "ghcr.io/spack/ubuntu20.04-runner-ppc64-gcc-11.4:2023.08.01", "entrypoint": [""] }
|
||||
tags: ["spack", "public", "large", "ppc64le"]
|
||||
|
||||
.e4s-power:
|
||||
extends: [".linux_power"]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: e4s-power
|
||||
|
||||
e4s-power-generate:
|
||||
extends: [ ".e4s-power", ".generate-x86_64", ".e4s-power-generate-tags-and-image"]
|
||||
variables:
|
||||
# Override concretization pool for metal runners
|
||||
SPACK_CONCRETIZE_JOBS: 16
|
||||
|
||||
e4s-power-build:
|
||||
extends: [ ".e4s-power", ".build" ]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: e4s-power-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: e4s-power-generate
|
||||
|
||||
#########################################
|
||||
# Build tests for different build-systems
|
||||
#########################################
|
||||
@@ -573,57 +539,6 @@ data-vis-sdk-build:
|
||||
- artifacts: True
|
||||
job: data-vis-sdk-generate
|
||||
|
||||
########################################
|
||||
# AWS ISC Applications (x86_64)
|
||||
########################################
|
||||
|
||||
# Call this AFTER .*-generate
|
||||
.aws-isc-overrides:
|
||||
# This controls image for generate step; build step is controlled by spack.yaml
|
||||
# Note that generator emits OS info for build so these should be the same.
|
||||
image: { "name": "ghcr.io/spack/e4s-amazonlinux-2:v2023-03-09", "entrypoint": [""] }
|
||||
|
||||
.aws-isc:
|
||||
extends: [ ".linux_x86_64_v3" ]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: aws-isc
|
||||
|
||||
aws-isc-generate:
|
||||
extends: [ ".aws-isc", ".generate-x86_64", ".aws-isc-overrides", ".tags-x86_64_v4" ]
|
||||
|
||||
aws-isc-build:
|
||||
extends: [ ".aws-isc", ".build" ]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: aws-isc-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: aws-isc-generate
|
||||
|
||||
# Parallel Pipeline for aarch64 (reuses override image, but generates and builds on aarch64)
|
||||
|
||||
.aws-isc-aarch64:
|
||||
extends: [ ".linux_aarch64" ]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: aws-isc-aarch64
|
||||
|
||||
aws-isc-aarch64-generate:
|
||||
extends: [ ".aws-isc-aarch64", ".generate-aarch64", ".aws-isc-overrides" ]
|
||||
|
||||
aws-isc-aarch64-build:
|
||||
extends: [ ".aws-isc-aarch64", ".build" ]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: aws-isc-aarch64-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: aws-isc-aarch64-generate
|
||||
|
||||
|
||||
########################################
|
||||
# Spack Tutorial
|
||||
########################################
|
||||
|
@@ -1,22 +0,0 @@
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
tags: ["ppc64le"]
|
||||
# Power runners overrides the default script
|
||||
# - don't download make
|
||||
# - no intermediate keys
|
||||
script::
|
||||
- uname -a || true
|
||||
- grep -E 'vendor|model name' /proc/cpuinfo 2>/dev/null | sort -u || head -n10 /proc/cpuinfo 2>/dev/null || true
|
||||
- nproc
|
||||
- . "./share/spack/setup-env.sh"
|
||||
- spack --version
|
||||
- spack arch
|
||||
- cd ${SPACK_CONCRETE_ENV_DIR}
|
||||
- spack env activate --without-view .
|
||||
- if [ -n "$SPACK_BUILD_JOBS" ]; then spack config add "config:build_jobs:$SPACK_BUILD_JOBS"; fi
|
||||
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
|
||||
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
|
||||
- if [[ -r /mnt/key/e4s.gpg ]]; then spack gpg trust /mnt/key/e4s.gpg; fi
|
||||
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
|
||||
- spack --color=always --backtrace ci rebuild > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user