Compare commits

..

1 Commits

Author SHA1 Message Date
Todd Gamblin
0150e965a0 Remove deprecated --spec-file arg from tests
The `spack ci` tests were using the deprecated `--spec-file` argument, which should be
replaced with `--spec`.
2023-12-11 12:33:22 -08:00
9122 changed files with 29725 additions and 71428 deletions

View File

@@ -1,6 +0,0 @@
<!--
Remember that `spackbot` can help with your PR in multiple ways:
- `@spackbot help` shows all the commands that are currently available
- `@spackbot fix style` tries to push a commit to fix style issues in this PR
- `@spackbot re-run pipeline` runs the pipelines again, if you have write access to the repository
-->

View File

@@ -43,7 +43,7 @@ jobs:
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits

View File

@@ -57,7 +57,7 @@ jobs:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
- uses: docker/metadata-action@31cebacef4805868f9ce9a0cb03ee36c32df2ac4
id: docker_meta
with:
images: |
@@ -96,7 +96,7 @@ jobs:
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
- name: Log in to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
@@ -118,5 +118,7 @@ jobs:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}
push: ${{ github.event_name != 'pull_request' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View File

@@ -40,7 +40,7 @@ jobs:
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below

View File

@@ -1,7 +1,7 @@
black==24.2.0
clingo==5.7.1
flake8==7.0.0
isort==5.13.2
mypy==1.8.0
black==23.11.0
clingo==5.6.2
flake8==6.1.0
isort==5.12.0
mypy==1.7.1
types-six==1.16.21.9
vermin==1.6.0

View File

@@ -91,7 +91,7 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
@@ -122,7 +122,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: shelltests,linux
@@ -181,7 +181,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
@@ -216,6 +216,6 @@ jobs:
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,macos

View File

@@ -33,7 +33,7 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
unit-tests-cmd:
@@ -57,7 +57,7 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
build-abseil:

View File

@@ -31,17 +31,13 @@ type: software
message: "If you are referencing Spack in a publication, please cite the paper below."
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
abstract: >-
Large HPC centers spend considerable time supporting software for thousands of users, but the
complexity of HPC software is quickly outpacing the capabilities of existing software management
tools. Scientific applications require specific versions of compilers, MPI, and other dependency
libraries, so using a single, standard software stack is infeasible. However, managing many
configurations is difficult because the configuration space is combinatorial in size. We
introduce Spack, a tool used at Lawrence Livermore National Laboratory to manage this complexity.
Spack provides a novel, re- cursive specification syntax to invoke parametric builds of packages
and dependencies. It allows any number of builds to coexist on the same system, and it ensures
that installed packages can find their dependencies, regardless of the environment. We show
through real-world use cases that Spack supports diverse and demanding applications, bringing
order to HPC software chaos.
Large HPC centers spend considerable time supporting software for thousands of users, but the complexity of HPC software is quickly outpacing the capabilities of existing software management tools.
Scientific applications require specific versions of compilers, MPI, and other dependency libraries, so using a single, standard software stack is infeasible.
However, managing many configurations is difficult because the configuration space is combinatorial in size.
We introduce Spack, a tool used at Lawrence Livermore National Laboratory to manage this complexity.
Spack provides a novel, re- cursive specification syntax to invoke parametric builds of packages and dependencies.
It allows any number of builds to coexist on the same system, and it ensures that installed packages can find their dependencies, regardless of the environment.
We show through real-world use cases that Spack supports diverse and demanding applications, bringing order to HPC software chaos.
preferred-citation:
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
type: conference-paper
@@ -75,7 +71,7 @@ preferred-citation:
type: doi
value: 10.1145/2807591.2807623
- description: "The DOE Document Release Number of the work"
type: other
type: other
value: "LLNL-CONF-669890"
authors:
- family-names: "Gamblin"

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2013-2024 LLNS, LLC and other Spack Project Developers.
Copyright (c) 2013-2023 LLNS, LLC and other Spack Project Developers.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,34 +1,13 @@
<div align="left">
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
<h2>
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-white-text.svg" width="250">
<source media="(prefers-color-scheme: light)" srcset="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-text.svg" width="250">
<img alt="Spack" src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-text.svg" width="250">
</picture>
<br>
<br clear="all">
<a href="https://github.com/spack/spack/actions/workflows/ci.yml"><img src="https://github.com/spack/spack/workflows/ci/badge.svg" alt="CI Status"></a>
<a href="https://github.com/spack/spack/actions/workflows/bootstrapping.yml"><img src="https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg" alt="Bootstrap Status"></a>
<a href="https://github.com/spack/spack/actions/workflows/build-containers.yml"><img src="https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg" alt="Containers Status"></a>
<a href="https://spack.readthedocs.io"><img src="https://readthedocs.org/projects/spack/badge/?version=latest" alt="Documentation Status"></a>
<a href="https://codecov.io/gh/spack/spack"><img src="https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg" alt="Code coverage"/></a>
<a href="https://slack.spack.io"><img src="https://slack.spack.io/badge.svg" alt="Slack"/></a>
<a href="https://matrix.to/#/#spack-space:matrix.org"><img src="https://img.shields.io/matrix/spack-space%3Amatrix.org?label=matrix" alt="Matrix"/></a>
</h2>
**[Getting Started] &nbsp;&nbsp; [Config] &nbsp;&nbsp; [Community] &nbsp;&nbsp; [Contributing] &nbsp;&nbsp; [Packaging Guide]**
[Getting Started]: https://spack.readthedocs.io/en/latest/getting_started.html
[Config]: https://spack.readthedocs.io/en/latest/configuration.html
[Community]: #community
[Contributing]: https://spack.readthedocs.io/en/latest/contribution_guide.html
[Packaging Guide]: https://spack.readthedocs.io/en/latest/packaging_guide.html
</div>
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
[![Matrix](https://img.shields.io/matrix/spack-space%3Amatrix.org?label=Matrix)](https://matrix.to/#/#spack-space:matrix.org)
Spack is a multi-platform package manager that builds and installs
multiple versions and configurations of software. It works on Linux,

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# sbang project developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,7 +1,7 @@
#!/bin/sh
# -*- python -*-
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
:: Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
:: Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
:: Spack Project Developers. See the top-level COPYRIGHT file for details.
::
:: SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -1130,10 +1130,6 @@ A version specifier can also be a list of ranges and specific versions,
separated by commas. For example, ``@1.0:1.5,=1.7.1`` matches any version
in the range ``1.0:1.5`` and the specific version ``1.7.1``.
^^^^^^^^^^^^
Git versions
^^^^^^^^^^^^
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -153,43 +153,7 @@ keyring, and trusting all downloaded keys.
List of popular build caches
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_'
-------------------
Build cache signing
-------------------
By default, Spack will add a cryptographic signature to each package pushed to
a build cache, and verifies the signature when installing from a build cache.
Keys for signing can be managed with the :ref:`spack gpg <cmd-spack-gpg>` command,
as well as ``spack buildcache keys`` as mentioned above.
You can disable signing when pushing with ``spack buildcache push --unsigned``,
and disable verification when installing from any build cache with
``spack install --no-check-signature``.
Alternatively, signing and verification can be enabled or disabled on a per build cache
basis:
.. code-block:: console
$ spack mirror add --signed <name> <url> # enable signing and verification
$ spack mirror add --unsigned <name> <url> # disable signing and verification
$ spack mirror set --signed <name> # enable signing and verification for an existing mirror
$ spack mirror set --unsigned <name> # disable signing and verification for an existing mirror
Or you can directly edit the ``mirrors.yaml`` configuration file:
.. code-block:: yaml
mirrors:
<name>:
url: <url>
signed: false # disable signing and verification
See also :ref:`mirrors`.
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_
----------
Relocation
@@ -287,13 +251,87 @@ To significantly speed up Spack in GitHub Actions, binaries can be cached in
GitHub Packages. This service is an OCI registry that can be linked to a GitHub
repository.
A typical workflow is to include a ``spack.yaml`` environment in your repository
that specifies the packages to install, the target architecture, and the build
cache to use under ``mirrors``:
.. code-block:: yaml
spack:
specs:
- python@3.11
config:
install_tree:
root: /opt/spack
padded_length: 128
packages:
all:
require: target=x86_64_v2
mirrors:
local-buildcache: oci://ghcr.io/<organization>/<repository>
A GitHub action can then be used to install the packages and push them to the
build cache:
.. code-block:: yaml
name: Install Spack packages
on: push
env:
SPACK_COLOR: always
jobs:
example:
runs-on: ubuntu-22.04
permissions:
packages: write
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Checkout Spack
uses: actions/checkout@v3
with:
repository: spack/spack
path: spack
- name: Setup Spack
run: echo "$PWD/spack/bin" >> "$GITHUB_PATH"
- name: Concretize
run: spack -e . concretize
- name: Install
run: spack -e . install --no-check-signature
- name: Run tests
run: ./my_view/bin/python3 -c 'print("hello world")'
- name: Push to buildcache
run: |
spack -e . mirror set --oci-username ${{ github.actor }} --oci-password "${{ secrets.GITHUB_TOKEN }}" local-buildcache
spack -e . buildcache push --base-image ubuntu:22.04 --unsigned --update-index local-buildcache
if: ${{ !cancelled() }}
The first time this action runs, it will build the packages from source and
push them to the build cache. Subsequent runs will pull the binaries from the
build cache. The concretizer will ensure that prebuilt binaries are favored
over source builds.
The build cache entries appear in the GitHub Packages section of your repository,
and contain instructions for pulling and running them with ``docker`` or ``podman``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Spack's public build cache for GitHub Actions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack offers a public build cache for GitHub Actions with a set of common packages,
which lets you get started quickly. See the following resources for more information:
* `spack/setup-spack <https://github.com/spack/setup-spack>`_ for setting up Spack in GitHub
Actions
* `spack/github-actions-buildcache <https://github.com/spack/github-actions-buildcache>`_ for
more details on the public build cache
* `spack/github-actions-buildcache <https://github.com/spack/github-actions-buildcache>`_
.. _cmd-spack-buildcache:

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -90,7 +90,7 @@ and optimizers do require a paid license. In Spack, they are packaged as:
TODO: Confirm and possible change(!) the scope of MPI components (runtime
vs. devel) in current (and previous?) *cluster/professional/composer*
editions, i.e., presence in downloads, possibly subject to license
coverage(!); see `discussion in PR #4300
coverage(!); see `disussion in PR #4300
<https://github.com/spack/spack/pull/4300#issuecomment-305582898>`_. [NB:
An "mpi" subdirectory is not indicative of the full MPI SDK being present
(i.e., ``mpicc``, ..., and header files). The directory may just as well

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -88,13 +88,13 @@ command-line. However, Makefiles that use ``?=`` for assignment honor
environment variables. Since Spack already sets ``CC``, ``CXX``, ``F77``,
and ``FC``, you won't need to worry about setting these variables. If
there are any other variables you need to set, you can do this in the
``setup_build_environment`` method:
``edit`` method:
.. code-block:: python
def setup_build_environment(self, env):
env.set("PREFIX", prefix)
env.set("BLASLIB", spec["blas"].libs.ld_flags)
def edit(self, spec, prefix):
env["PREFIX"] = prefix
env["BLASLIB"] = spec["blas"].libs.ld_flags
`cbench <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cbench/package.py>`_
@@ -140,7 +140,7 @@ Edit Makefile
Some Makefiles are just plain stubborn and will ignore command-line
variables. The only way to ensure that these packages build correctly
is to directly edit the Makefile. Spack provides a ``FileFilter`` class
and a ``filter`` method to help with this. For example:
and a ``filter_file`` method to help with this. For example:
.. code-block:: python

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -199,7 +199,6 @@ def setup(sphinx):
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "_io.BytesIO"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
@@ -216,7 +215,6 @@ def setup(sphinx):
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
]
# The reST default role (used for this markup: `text`) to use for all documents.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes.
system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes:
------------------------

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -9,96 +9,34 @@
Container Images
================
Spack :ref:`environments` can easily be turned into container images. This page
outlines two ways in which this can be done:
1. By installing the environment on the host system, and copying the installations
into the container image. This approach does not require any tools like Docker
or Singularity to be installed.
2. By generating a Docker or Singularity recipe that can be used to build the
container image. In this approach, Spack builds the software inside the
container runtime, not on the host system.
The first approach is easiest if you already have an installed environment,
the second approach gives more control over the container image.
---------------------------
From existing installations
---------------------------
If you already have a Spack environment installed on your system, you can
share the binaries as an OCI compatible container image. To get started you
just have to configure and OCI registry and run ``spack buildcache push``.
.. code-block:: console
# Create and install an environment in the current directory
spack env create -d .
spack -e . add pkg-a pkg-b
spack -e . install
# Configure the registry
spack -e . mirror add --oci-username ... --oci-password ... container-registry oci://example.com/name/image
# Push the image
spack -e . buildcache push --update-index --base-image ubuntu:22.04 --tag my_env container-registry
The resulting container image can then be run as follows:
.. code-block:: console
$ docker run -it example.com/name/image:my_env
The image generated by Spack consists of the specified base image with each package from the
environment as a separate layer on top. The image is minimal by construction, it only contains the
environment roots and its runtime dependencies.
.. note::
When using registries like GHCR and Docker Hub, the ``--oci-password`` flag is not
the password for your account, but a personal access token you need to generate separately.
The specified ``--base-image`` should have a libc that is compatible with the host system.
For example if your host system is Ubuntu 20.04, you can use ``ubuntu:20.04``, ``ubuntu:22.04``
or newer: the libc in the container image must be at least the version of the host system,
assuming ABI compatibility. It is also perfectly fine to use a completely different
Linux distribution as long as the libc is compatible.
For convenience, Spack also turns the OCI registry into a :ref:`build cache <binary_caches_oci>`,
so that future ``spack install`` of the environment will simply pull the binaries from the
registry instead of doing source builds. The flag ``--update-index`` is needed to make Spack
take the build cache into account when concretizing.
.. note::
When generating container images in CI, the approach above is recommended when CI jobs
already run in a sandboxed environment. You can simply use ``spack`` directly
in the CI job and push the resulting image to a registry. Subsequent CI jobs should
run faster because Spack can install from the same registry instead of rebuilding from
sources.
---------------------------------------------
Generating recipes for Docker and Singularity
---------------------------------------------
Apart from copying existing installations into container images, Spack can also
generate recipes for container images. This is useful if you want to run Spack
itself in a sandboxed environment instead of on the host system.
Since recipes need a little bit more boilerplate than
Spack :ref:`environments` are a great tool to create container images, but
preparing one that is suitable for production requires some more boilerplate
than just:
.. code-block:: docker
COPY spack.yaml /environment
RUN spack -e /environment install
Spack provides a command to generate customizable recipes for container images. Customizations
include minimizing the size of the image, installing packages in the base image using the system
package manager, and setting up a proper entrypoint to run the image.
Additional actions may be needed to minimize the size of the
container, or to update the system software that is installed in the base
image, or to set up a proper entrypoint to run the image. These tasks are
usually both necessary and repetitive, so Spack comes with a command
to generate recipes for container images starting from a ``spack.yaml``.
~~~~~~~~~~~~~~~~~~~~
.. seealso::
This page is a reference for generating recipes to build container images.
It means that your environment is built from scratch inside the container
runtime.
Since v0.21, Spack can also create container images from existing package installations
on your host system. See :ref:`binary_caches_oci` for more information on
that topic.
--------------------
A Quick Introduction
~~~~~~~~~~~~~~~~~~~~
--------------------
Consider having a Spack environment like the following:
@@ -109,8 +47,8 @@ Consider having a Spack environment like the following:
- gromacs+mpi
- mpich
Producing a ``Dockerfile`` from it is as simple as changing directories to
where the ``spack.yaml`` file is stored and running the following command:
Producing a ``Dockerfile`` from it is as simple as moving to the directory
where the ``spack.yaml`` file is stored and giving the following command:
.. code-block:: console
@@ -176,9 +114,9 @@ configuration are discussed in details in the sections below.
.. _container_spack_images:
~~~~~~~~~~~~~~~~~~~~~~~~~~
--------------------------
Spack Images on Docker Hub
~~~~~~~~~~~~~~~~~~~~~~~~~~
--------------------------
Docker images with Spack preinstalled and ready to be used are
built when a release is tagged, or nightly on ``develop``. The images
@@ -248,9 +186,9 @@ by Spack use them as default base images for their ``build`` stage,
even though handles to use custom base images provided by users are
available to accommodate complex use cases.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuring the Container Recipe
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
---------------------------------
Creating Images From Environments
---------------------------------
Any Spack Environment can be used for the automatic generation of container
recipes. Sensible defaults are provided for things like the base image or the
@@ -291,18 +229,18 @@ under the ``container`` attribute of environments:
A detailed description of the options available can be found in the :ref:`container_config_options` section.
~~~~~~~~~~~~~~~~~~~
-------------------
Setting Base Images
~~~~~~~~~~~~~~~~~~~
-------------------
The ``images`` subsection is used to select both the image where
Spack builds the software and the image where the built software
is installed. This attribute can be set in different ways and
which one to use depends on the use case at hand.
""""""""""""""""""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Official Spack Images From Dockerhub
""""""""""""""""""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To generate a recipe that uses an official Docker image from the
Spack organization to build the software and the corresponding official OS image
@@ -507,9 +445,9 @@ responsibility to ensure that:
Therefore we don't recommend its use in cases that can be otherwise
covered by the simplified mode shown first.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
----------------------------
Singularity Definition Files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
----------------------------
In addition to producing recipes in ``Dockerfile`` format Spack can produce
Singularity Definition Files by just changing the value of the ``format``
@@ -530,9 +468,9 @@ attribute:
The minimum version of Singularity required to build a SIF (Singularity Image Format)
image from the recipes generated by Spack is ``3.5.3``.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
------------------------------
Extending the Jinja2 Templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
------------------------------
The Dockerfile and the Singularity definition file that Spack can generate are based on
a few Jinja2 templates that are rendered according to the environment being containerized.
@@ -653,9 +591,9 @@ The recipe that gets generated contains the two extra instruction that we added
.. _container_config_options:
~~~~~~~~~~~~~~~~~~~~~~~
-----------------------
Configuration Reference
~~~~~~~~~~~~~~~~~~~~~~~
-----------------------
The tables below describe all the configuration options that are currently supported
to customize the generation of container recipes:
@@ -752,13 +690,13 @@ to customize the generation of container recipes:
- Description string
- No
~~~~~~~~~~~~~~
--------------
Best Practices
~~~~~~~~~~~~~~
--------------
"""
^^^
MPI
"""
^^^
Due to the dependency on Fortran for OpenMPI, which is the spack default
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
@@ -769,9 +707,9 @@ For execution on HPC clusters, it can be helpful to import the docker
image into Singularity in order to start a program with an *external*
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
""""
^^^^
CUDA
""""
^^^^
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
Ubuntu. Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
Avoid double-installing CUDA by adding, e.g.
@@ -790,9 +728,9 @@ to your ``spack.yaml``.
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
device kernels.
"""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^
Docker on Windows and OSX
"""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
memory by default, and some spack packages may fail to build due to lack of

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -357,23 +357,91 @@ If there is a hook that you would like and is missing, you can propose to add a
``pre_install(spec)``
"""""""""""""""""""""
A ``pre_install`` hook is run within the install subprocess, directly before the install starts.
It expects a single argument of a spec.
A ``pre_install`` hook is run within an install subprocess, directly before
the install starts. It expects a single argument of a spec, and is run in
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
as we have defined them here, but rather callback functions associated with
a package install.
"""""""""""""""""""""""""""""""""""""
``post_install(spec, explicit=None)``
"""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""
``post_install(spec)``
""""""""""""""""""""""
A ``post_install`` hook is run within the install subprocess, directly after the install finishes,
but before the build stage is removed and the spec is registered in the database. It expects two
arguments: spec and an optional boolean indicating whether this spec is being installed explicitly.
A ``post_install`` hook is run within an install subprocess, directly after
the install finishes, but before the build stage is removed. If you
write one of these hooks, you should expect it to accept a spec as the only
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
also seen in packages, but in this context not related to the hooks described
here.
""""""""""""""""""""""""""""""""""""""""""""""""""""
``pre_uninstall(spec)`` and ``post_uninstall(spec)``
""""""""""""""""""""""""""""""""""""""""""""""""""""
These hooks are currently used for cleaning up module files after uninstall.
""""""""""""""""""""""""""
``on_install_start(spec)``
""""""""""""""""""""""""""
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
in the install function of a ``PackageInstaller``,
and importantly is not part of a build process, but before it. This is when
we have just newly grabbed the task, and are preparing to install. If you
write a hook of this type, you should provide the spec to it.
.. code-block:: python
def on_install_start(spec):
"""On start of an install, we want to...
"""
print('on_install_start')
""""""""""""""""""""""""""""
``on_install_success(spec)``
""""""""""""""""""""""""""""
This hook is run on a successful install, and is also run inside the build
process, akin to ``post_install``. The main difference is that this hook
is run outside of the context of the stage directory, meaning after the
build stage has been removed and the user is alerted that the install was
successful. If you need to write a hook that is run on success of a particular
phase, you should use ``on_phase_success``.
""""""""""""""""""""""""""""
``on_install_failure(spec)``
""""""""""""""""""""""""""""
This hook is run given an install failure that happens outside of the build
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
If you need to write a hook that is relevant to a failure within a build
process, you would want to instead use ``on_phase_failure``.
"""""""""""""""""""""""""""
``on_install_cancel(spec)``
"""""""""""""""""""""""""""
The same, but triggered if a spec install is cancelled for any reason.
"""""""""""""""""""""""""""""""""""""""""""""""
``on_phase_success(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""""
This hook is run within the install subprocess, and specifically when a phase
successfully finishes. Since we are interested in the package, the name of
the phase, and any output from it, we require:
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
- **phase_name**: the name of the phase that was successful (e.g., configure)
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
"""""""""""""""""""""""""""""""""""""""""""""
``on_phase_error(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""
In the case of an error during a phase, we might want to trigger some event
with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -142,21 +142,6 @@ user's prompt to begin with the environment name in brackets.
$ spack env activate -p myenv
[myenv] $ ...
The ``activate`` command can also be used to create a new environment, if it is
not already defined, by adding the ``--create`` flag. Managed and anonymous
environments, anonymous environments are explained in the next section,
can both be created using the same flags that `spack env create` accepts.
If an environment already exists then spack will simply activate it and ignore the
create specific flags.
.. code-block:: console
$ spack env activate --create -p myenv
# ...
# [creates if myenv does not exist yet]
# ...
[myenv] $ ...
To deactivate an environment, use the command:
.. code-block:: console
@@ -416,23 +401,6 @@ that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
If the package being developed supports out-of-source builds then users can use the
``--build_directory`` flag to control the location and name of the build directory.
This is a shortcut to set the ``package_attributes:build_directory`` in the
``packages`` configuration (see :ref:`assigning-package-attributes`).
The supplied location will become the build-directory for that package in all future builds.
.. warning::
Potential pitfalls of setting the build directory
Spack does not check for out-of-source build compatibility with the packages and
so the onerous of making sure the package supports out-of-source builds is on
the user.
For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds
while all ``CMake`` packages do.
Understanding these nuances are on the software developers and we strongly encourage
developers to only redirect the build directory if they understand their package's
build-system.
^^^^^^^
Loading
^^^^^^^
@@ -489,11 +457,11 @@ a ``packages.yaml`` file) could contain:
.. code-block:: yaml
spack:
# ...
...
packages:
all:
compiler: [intel]
# ...
...
This configuration sets the default compiler for all packages to
``intel``.
@@ -839,7 +807,7 @@ directories.
.. code-block:: yaml
spack:
# ...
...
view:
mpis:
root: /path/to/view
@@ -883,7 +851,7 @@ automatically named ``default``, so that
.. code-block:: yaml
spack:
# ...
...
view: True
is equivalent to
@@ -891,7 +859,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: .spack-env/view
@@ -901,7 +869,7 @@ and
.. code-block:: yaml
spack:
# ...
...
view: /path/to/view
is equivalent to
@@ -909,7 +877,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: /path/to/view
@@ -952,17 +920,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
(5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -623,7 +623,7 @@ Fortran.
compilers:
- compiler:
# ...
...
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -10,7 +10,7 @@ Modules (modules.yaml)
======================
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is sometimes embraced also by
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `Lmod
@@ -21,38 +21,14 @@ Modules are one of several ways you can use Spack packages. For other
options that may fit your use case better, you should also look at
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
-----------
Quick start
-----------
----------------------------
Using module files via Spack
----------------------------
In the current version of Spack, module files are not generated by default. To get started, you
can generate module files for all currently installed packages by running either
.. code-block:: console
$ spack module tcl refresh
or
.. code-block:: console
$ spack module lmod refresh
Spack can also generate module files for all future installations automatically through the
following configuration:
.. code-block:: console
$ spack config add modules:default:enable:[tcl]
or
.. code-block:: console
$ spack config add modules:default:enable:[lmod]
Assuming you have a module system installed, you should now be able to use the ``module`` command
to interact with them:
If you have installed a supported module system you should be able to
run ``module avail`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
.. code-block:: console
@@ -89,17 +65,33 @@ scheme used at your site.
Module file customization
-------------------------
Module files are generated by post-install hooks after the successful
installation of a package.
.. note::
Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to
inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file
or changing scopes or environments.
Later in this section there is a subsection on :ref:`regenerating
modules <cmd-spack-module-refresh>` that will allow you to bring
your modules to a consistent state.
The table below summarizes the essential information associated with
the different file formats that can be generated by Spack:
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** |
+===========+==============+==============================+==============================================+======================+
| ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
+=============================+====================+===============================+==============================================+======================+
| **Tcl - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
Spack ships with sensible defaults for the generation of module files, but
@@ -110,7 +102,7 @@ In general you can override or extend the default behavior by:
2. writing specific rules in the ``modules.yaml`` configuration file
3. writing your own templates to override or extend the defaults
The former method lets you express changes in the run-time environment
The former method let you express changes in the run-time environment
that are needed to use the installed software properly, e.g. injecting variables
from language interpreters into their extensions. The latter two instead permit to
fine tune the filesystem layout, content and creation of module files to meet
@@ -118,62 +110,79 @@ site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables dynamically in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two methods that you can implement in any ``package.py`` to dynamically affect the
content of the module files generated by Spack. The most important one is
``setup_run_environment``, which can be used to set environment variables in the module file that
depend on the spec:
There are two methods that you can override in any ``package.py`` to affect the
content of the module files generated by Spack. The first one:
.. code-block:: python
def setup_run_environment(self, env):
if self.spec.satisfies("+foo"):
env.set("FOO", "bar")
pass
The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``,
which allows a dependency to set variables in the module file of its dependents. This is typically
used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the
search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate
the packages at runtime.
For example, a simplified version of the ``python`` package could look like this:
can alter the content of the module file associated with the same package where it is overridden.
The second method:
.. code-block:: python
def setup_dependent_run_environment(self, env, dependent_spec):
if dependent_spec.package.extends(self.spec):
env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python)
pass
and would make any package that ``extends("python")`` have its library directory added to the
``PYTHONPATH`` environment variable in the module file. It's much more convenient to set this
variable here, than to repeat it in every Python extension's ``setup_run_environment`` method.
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
An example in which it is crucial to override both methods
is given by the ``r`` package. This package installs libraries and headers
in non-standard locations and it is possible to prepend the appropriate directory
to the corresponding environment variables:
================== =================================
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
================== =================================
with the following snippet:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_run_environment
The ``r`` package also knows which environment variable should be modified
to make language extensions provided by other packages available, and modifies
it appropriately in the override of the second method:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_dependent_run_environment
.. _modules-yaml:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``modules.yaml`` config file and module sets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^
Write a configuration file
^^^^^^^^^^^^^^^^^^^^^^^^^^
The configuration files that control module generation behavior are named ``modules.yaml``. The
default configuration looks like this:
The configuration files that control module generation behavior
are named ``modules.yaml``. The default configuration:
.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml
:language: yaml
You can define one or more **module sets**, each of which can be configured separately with regard
to install location, naming scheme, inclusion and exclusion, autoloading, et cetera.
activates the hooks to generate ``tcl`` module files and inspects
the installation folder of each package for the presence of a set of subdirectories
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
to the environment variables listed below the folder name.
The default module set is aptly named ``default``. All
:ref:`Spack commands that operate on modules <maintaining-module-files>` apply to the ``default``
module set, unless another module set is specified explicitly (with the ``--name`` flag).
Spack modules can be configured for multiple module sets. The default
module set is named ``default``. All Spack commands which operate on
modules default to apply the ``default`` module set, but can be
applied to any module set in the configuration.
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
Changing the modules root
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
As shown in the table above, the default module root for ``lmod`` is
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
@@ -189,7 +198,7 @@ set by changing the ``roots`` key of the configuration.
my_custom_lmod_modules:
roots:
lmod: /path/to/install/custom/lmod/modules
# ...
...
This configuration will create two module sets. The default module set
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
@@ -215,32 +224,25 @@ location could be confusing to users of your modules. In the next
section, we will discuss enabling and disabling module types (module
file generators) for each module set.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically generating module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""
Activate other hooks
""""""""""""""""""""
Spack can be configured to automatically generate module files as part of package installation.
This is done by adding the desired module systems to the ``enable`` list.
Any other module file generator shipped with Spack can be activated adding it to the
list under the ``enable`` key in the module file. Currently the only generator that
is not active by default is ``lmod``, which produces hierarchical lua module files.
Each module system can then be configured separately. In fact, you should list configuration
options that affect a particular type of module files under a top level key corresponding
to the generator being customized:
.. code-block:: yaml
modules:
default:
enable:
- tcl
- lmod
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuring ``tcl`` and ``lmod`` modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can configure the behavior of either module system separately, under a key corresponding to
the generator being customized:
.. code-block:: yaml
modules:
default:
- tcl
- lmod
tcl:
# contains environment modules specific customizations
lmod:
@@ -251,82 +253,16 @@ either change the layout of the module files on the filesystem, or they will aff
their content. For the latter point it is possible to use anonymous specs
to fine tune the set of packages on which the modifications should be applied.
.. _autoloading-dependencies:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Autoloading and hiding dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A module file should set the variables that are needed for an application to work. But since an
application often has many dependencies, where should all the environment variables for those be
set? In Spack the rule is that each package sets the runtime variables that are needed by the
package itself, and no more. This way, dependencies can be loaded standalone too, and duplication
of environment variables is avoided.
That means however that if you want to use an application, you need to load the modules for all its
dependencies. Of course this is not something you would want users to do manually.
Since Spack knows the dependency graph of every package, it can easily generate module files that
automatically load the modules for its dependencies recursively. It is enabled by default for both
Lmod and Environment Modules under the ``autoload: direct`` config option. The former system has
builtin support through the ``depends_on`` function, the latter simply uses a ``module load``
statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values:
* ``none``: no autoloading
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
packages you've explicitly installed are exposed by ``module avail``, but still allows for
autoloading of hidden dependencies. Lmod should support hiding implicits in general, while
Environment Modules requires version 4.7 or higher.
.. note::
If supported by your module system, we highly encourage the following configuration that enables
autoloading and hiding of implicits. It ensures all runtime variables are set correctly,
including those for dependencies, without overwhelming the user with a large number of available
modules. Further, it makes it easier to get readable module names without collisions, see the
section below on :ref:`modules-projections`.
.. code-block:: yaml
modules:
default:
tcl:
hide_implicits: true
all:
autoload: direct # or `run`
lmod:
hide_implicits: true
all:
autoload: direct # or `run`
.. _anonymous_specs:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables for selected packages in config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""
Selection by anonymous specs
""""""""""""""""""""""""""""
In the configuration file you can filter particular specs, and make further changes to the
environment variables that go into their module files. This is very powerful when you want to avoid
:ref:`modifying the package itself <overide-api-calls-in-package-py>`, or when you want to set
certain variables on multiple selected packages at once.
For instance, in the snippet below:
In the configuration file you can use *anonymous specs* (i.e. specs
that **are not required to have a root package** and are thus used just
to express constraints) to apply certain modifications on a selected set
of the installed software. For instance, in the snippet below:
.. code-block:: yaml
@@ -369,28 +305,12 @@ the variable ``FOOBAR`` will be unset.
.. note::
Order does matter
The modifications associated with the ``all`` keyword are always evaluated
first, no matter where they appear in the configuration file. All the other changes to
environment variables for matching specs are evaluated from top to bottom.
first, no matter where they appear in the configuration file. All the other
spec constraints are instead evaluated top to bottom.
.. warning::
As general advice, it's often better to set as few unnecessary variables as possible. For
example, the following seemingly innocent and potentially useful configuration
.. code-block:: yaml
all:
environment:
set:
"{name}_ROOT": "{prefix}"
sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break
the ``gcc`` compiler: it uses this variable as its default search path for certain object
files and libraries, and by merely setting it, everything fails to link.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or
to force them to be written. Consider the case where you want to hide from users
@@ -410,19 +330,14 @@ you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
or any ``llvm`` installation.
It is safe to combine ``exclude`` and ``autoload``
:ref:`mentioned above <autoloading-dependencies>`. When ``exclude`` prevents a module file to be
generated for a dependency, the ``autoload`` feature will simply not generate a statement to load
it.
.. _modules-projections:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
Customize the naming of modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
The names of environment modules generated by Spack are not always easy to
The names of environment modules generated by spack are not always easy to
fully comprehend due to the long hash in the name. There are three module
configuration options to help with that. The first is a global setting to
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
@@ -438,13 +353,6 @@ shows how to set hash length in the module file names:
tcl:
hash_length: 7
.. tip::
Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number
modules exposed to the user. The hidden modules always contain the hash in their name, and are
not influenced by the ``hash_length`` setting. Hidden implicits thus make it easier to use a
short hash length or no hash at all, without risking name conflicts.
To help make module names more readable, and to help alleviate name conflicts
with a short hash, one can use the ``suffixes`` option in the modules
configuration file. This option will add strings to modules that match a spec.
@@ -457,12 +365,12 @@ For instance, the following config options,
tcl:
all:
suffixes:
^python@3.12: 'python-3.12'
^python@2.7.12: 'python-2.7.12'
^openblas: 'openblas'
will add a ``python-3.12`` version string to any packages compiled with
Python matching the spec, ``python@3.12``. This is useful to know which
version of Python a set of Python extensions is associated with. Likewise, the
will add a ``python-2.7.12`` version string to any packages compiled with
python matching the spec, ``python@2.7.12``. This is useful to know which
version of python a set of python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
@@ -560,11 +468,41 @@ that are already in the Lmod hierarchy.
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the Lmod project <https://github.com/TACC/Lmod/issues/114>`_.
""""""""""""""""""""""
Select default modules
""""""""""""""""""""""
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _customize-env-modifications:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
Customize environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
You can control which prefixes in a Spack package are added to
environment variables with the ``prefix_inspections`` section; this
@@ -662,9 +600,9 @@ stack to users who are likely to inspect the modules to find full
paths to software, when it is desirable to present the users with a
simpler set of paths than those generated by the Spack install tree.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Filter out environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections.
@@ -684,37 +622,49 @@ do so by using the ``exclude_env_vars``:
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
^^^^^^^^^^^^^^^^^^^^^^
Select default modules
^^^^^^^^^^^^^^^^^^^^^^
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. _autoloading-dependencies:
"""""""""""""""""""""
Autoload dependencies
"""""""""""""""""""""
Often it is required for a module to have its (transient) dependencies loaded as well.
One example where this is useful is when one package needs to use executables provided
by its dependency; when the dependency is autoloaded, the executable will be in the
PATH. Similarly for scripting languages such as Python, packages and their dependencies
have to be loaded together.
Autoloading is enabled by default for Lmod and Environment Modules. The former
has builtin support for through the ``depends_on`` function. The latter uses
``module load`` statement to load and track dependencies.
Autoloading can also be enabled conditionally:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
modules:
default:
tcl:
all:
autoload: none
^python:
autoload: direct
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
The configuration file above will produce module files that will
load their direct dependencies if the package installed depends on ``python``.
The allowed values for the ``autoload`` statement are either ``none``,
``direct`` or ``all``.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _maintaining-module-files:
.. note::
Tcl prerequisites
In the ``tcl`` section of the configuration file it is possible to use
the ``prerequisites`` directive that accepts the same values as
``autoload``. It will produce module files that have a ``prereq``
statement, which autoloads dependencies on Environment Modules when its
``auto_handling`` configuration option is enabled. If Environment Modules
is installed with Spack, ``auto_handling`` is enabled by default starting
version 4.2. Otherwise it is enabled by default since version 5.0.
------------------------
Maintaining Module Files

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -97,35 +97,6 @@ Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Extra attributes for external packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes external packages require additional attributes to be used
effectively. This information can be defined on a per-package basis
and stored in the ``extra_attributes`` section of the external package
configuration. In addition to per-package information, this section
can be used to define environment modifications to be performed
whenever the package is used. For example, if an external package is
built without ``rpath`` support, it may require ``LD_LIBRARY_PATH``
settings to find its dependencies. This could be configured as
follows:
.. code-block:: yaml
packages:
mpich:
externals:
- spec: mpich@3.3 %clang@12.0.0 +hwloc
prefix: /path/to/mpich
extra_attributes:
environment:
prepend_path:
LD_LIBRARY_PATH: /path/to/hwloc/lib64
See :ref:`configuration_environment_variables` for more information on
how to configure environment modifications in Spack config files.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prevent packages from being built from sources
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -487,56 +458,6 @@ present. For instance with a configuration like:
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Conflicts and strong preferences
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts"
from configuration files:
.. code-block:: yaml
packages:
all:
prefer:
- '%clang'
conflict:
- '+shared'
The ``prefer`` and ``conflict`` sections can be used whenever a ``require`` section is allowed.
The argument is always a list of constraints, and each constraint can be either a simple string,
or a more complex object:
.. code-block:: yaml
packages:
all:
conflict:
- spec: '%clang'
when: 'target=x86_64_v3'
message: 'reason why clang cannot be used'
The ``spec`` attribute is mandatory, while both ``when`` and ``message`` are optional.
.. note::
Requirements allow for expressing both "strong preferences" and "conflicts".
The syntax for doing so, though, may not be immediately clear. For
instance, if we want to prevent any package from using ``%clang``, we can set:
.. code-block:: yaml
packages:
all:
require:
- one_of: ['%clang', '@:']
Since only one of the requirements must hold, and ``@:`` is always true, the rule above is
equivalent to a conflict. For "strong preferences" we need to substitute the ``one_of`` policy
with ``any_of``.
.. _package-preferences:
-------------------
@@ -647,8 +568,6 @@ manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected.
.. _assigning-package-attributes:
----------------------------
Assigning Package Attributes
----------------------------
@@ -659,11 +578,10 @@ You can assign class-level attributes in the configuration:
packages:
mpileaks:
package_attributes:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -237,7 +237,7 @@ for details):
.. code-block:: python
:linenos:
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -4379,16 +4379,10 @@ implementation was selected for this build:
elif "mvapich" in spec:
configure_args.append("--with-mvapich")
It's also a bit more concise than satisfies.
.. note::
The ``satisfies()`` method tests whether this spec has, at least, all the constraints of the argument spec,
while ``in`` tests whether a spec or any of its dependencies satisfy the provided spec.
If the provided spec is anonymous (e.g., ":1.2:", "+shared") or has the
same name as the spec being checked, then ``in`` works the same as
``satisfies()``; however, use of ``satisfies()`` is more intuitive.
It's also a bit more concise than satisfies. The difference between
the two functions is that ``satisfies()`` tests whether spec
constraints overlap at all, while ``in`` tests whether a spec or any
of its dependencies satisfy the provided spec.
^^^^^^^^^^^^^^^^^^^^^^^
Architecture specifiers
@@ -5290,7 +5284,7 @@ installed example.
example = which(self.prefix.bin.example)
example()
Output showing the identification of each test part after running the tests
Output showing the identification of each test part after runnig the tests
is illustrated below.
.. code-block:: console
@@ -5787,7 +5781,7 @@ with those implemented in the package itself.
* - `Cxx
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cxx>`_
- Compiles and runs several ``hello`` programs
* - `Fortran
* - `Fortan
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/fortran>`_
- Compiles and runs ``hello`` programs (``F`` and ``f90``)
* - `Mpi
@@ -6979,18 +6973,3 @@ you probably care most about are:
You may also care about `license exceptions
<https://spdx.org/licenses/exceptions-index.html>`_ that use the ``WITH`` operator,
e.g. ``Apache-2.0 WITH LLVM-exception``.
Many of the licenses that are currently in the spack repositories have been
automatically determined. While this is great for bulk adding license
information and is most likely correct, there are sometimes edge cases that
require manual intervention. To determine which licenses are validated and
which are not, there is the `checked_by` parameter in the license directive:
.. code-block:: python
license("<license>", when="<when>", checked_by="<github username>")
When you have validated a github license, either when doing so explicitly or
as part of packaging a new package, please set the `checked_by` parameter
to your Github username to signal that the license has been manually
verified.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -810,7 +810,7 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
.. code-block:: yaml
spack:
# ...
...
ci:
pipeline-gen:
- build-job:

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -17,7 +17,7 @@ experimental software separately from the built-in repository. Spack
allows you to configure local repositories using either the
``repos.yaml`` or the ``spack repo`` command.
A package repository is a directory structured like this::
A package repository a directory structured like this::
repo/
repo.yaml

View File

@@ -2,12 +2,12 @@ sphinx==7.2.6
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.0
python-levenshtein==0.23.0
docutils==0.20.1
pygments==2.17.2
urllib3==2.2.1
pytest==8.0.2
isort==5.13.2
black==24.2.0
flake8==7.0.0
mypy==1.8.0
urllib3==2.1.0
pytest==7.4.3
isort==5.12.0
black==23.11.0
flake8==6.1.0
mypy==1.7.1

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -142,7 +142,7 @@ Reputational Key
----------------
The Reputational Key is the public facing key used to sign complete groups of
development and release packages. Only one key pair exists in this class of
development and release packages. Only one key pair exsits in this class of
keys. In contrast to the Intermediate CI Key the Reputational Key *should* be
used to verify package integrity. At the end of develop and release pipeline a
final pipeline job pulls down all signed package metadata built by the pipeline,
@@ -272,7 +272,7 @@ Internal Implementation
The technical implementation of the pipeline signing process includes components
defined in Amazon Web Services, the Kubernetes cluster, at affilicated
institutions, and the GitLab/GitLab Runner deployment. We present the technical
institutions, and the GitLab/GitLab Runner deployment. We present the techincal
implementation in two interdependent sections. The first addresses how secrets
are managed through the lifecycle of a develop or release pipeline. The second
section describes how Gitlab Runner and pipelines are configured and managed to
@@ -295,7 +295,7 @@ infrastructure.
-----------------------
Multiple intermediate CI signing keys exist, one Intermediate CI Key for jobs
run in AWS, and one key for each affiliated institution (e.g. University of
run in AWS, and one key for each affiliated institution (e.g. Univerity of
Oregon). Here we describe how the Intermediate CI Key is managed in AWS:
The Intermediate CI Key (including the Signing Intermediate CI Private Key is
@@ -305,7 +305,7 @@ contains an ASCII-armored export of just the *public* components of the
Reputational Key. This secret also contains the *public* components of each of
the affiliated institutions' Intermediate CI Key. These are potentially needed
to verify dependent packages which may have been found in the public mirror or
built by a protected job running on an affiliated institution's infrastructure
built by a protected job running on an affiliated institution's infrastrcuture
in an earlier stage of the pipeline.
Procedurally the ``spack-intermediate-ci-signing-key`` secret is used in

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

2
lib/spack/env/cc vendored
View File

@@ -1,7 +1,7 @@
#!/bin/sh -f
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -171,7 +171,7 @@ def polite_path(components: Iterable[str]):
@memoized
def _polite_antipattern():
# A regex of all the characters we don't want in a filename
return re.compile(r"[^A-Za-z0-9_+.-]")
return re.compile(r"[^A-Za-z0-9_.-]")
def polite_filename(filename: str) -> str:
@@ -920,34 +920,28 @@ def get_filetype(path_name):
return output.strip()
def has_shebang(path):
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
try:
with open(path, "rb") as f:
return f.read(2) == b"#!"
except OSError:
return False
@system_path_filter
def is_nonsymlink_exe_with_shebang(path):
"""Returns whether the path is an executable regular file with a shebang. Returns False too
when the path is a symlink to a script, and also when the file cannot be opened."""
"""
Returns whether the path is an executable script with a shebang.
Return False when the path is a *symlink* to an executable script.
"""
try:
st = os.lstat(path)
except OSError:
return False
# Should not be a symlink
if stat.S_ISLNK(st.st_mode):
return False
# Should not be a symlink
if stat.S_ISLNK(st.st_mode):
return False
# Should be executable
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
return False
# Should be executable
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
# Should start with a shebang
with open(path, "rb") as f:
return f.read(2) == b"#!"
except (IOError, OSError):
return False
return has_shebang(path)
@system_path_filter(arg_slice=slice(1))
def chgrp_if_not_world_writable(path, group):
@@ -1240,47 +1234,6 @@ def get_single_file(directory):
return fnames[0]
@system_path_filter
def windows_sfn(path: os.PathLike):
"""Returns 8.3 Filename (SFN) representation of
path
8.3 Filenames (SFN or short filename) is a file
naming convention used prior to Win95 that Windows
still (and will continue to) support. This convention
caps filenames at 8 characters, and most importantly
does not allow for spaces in addition to other specifications.
The scheme is generally the same as a normal Windows
file scheme, but all spaces are removed and the filename
is capped at 6 characters. The remaining characters are
replaced with ~N where N is the number file in a directory
that a given file represents i.e. Program Files and Program Files (x86)
would be PROGRA~1 and PROGRA~2 respectively.
Further, all file/directory names are all caps (although modern Windows
is case insensitive in practice).
Conversion is accomplished by fileapi.h GetShortPathNameW
Returns paths in 8.3 Filename form
Note: this method is a no-op on Linux
Args:
path: Path to be transformed into SFN (8.3 filename) format
"""
# This should not be run-able on linux/macos
if sys.platform != "win32":
return path
path = str(path)
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value
@contextmanager
def temp_cwd():
tmp_dir = tempfile.mkdtemp()
@@ -1424,89 +1377,120 @@ def traverse_tree(
yield (source_path, dest_path)
def lexists_islink_isdir(path):
"""Computes the tuple (lexists(path), islink(path), isdir(path)) in a minimal
number of stat calls on unix. Use os.path and symlink.islink methods for windows."""
if sys.platform == "win32":
if not os.path.lexists(path):
return False, False, False
return os.path.lexists(path), islink(path), os.path.isdir(path)
# First try to lstat, so we know if it's a link or not.
try:
lst = os.lstat(path)
except (IOError, OSError):
return False, False, False
is_link = stat.S_ISLNK(lst.st_mode)
# Check whether file is a dir.
if not is_link:
is_dir = stat.S_ISDIR(lst.st_mode)
return True, is_link, is_dir
# Check whether symlink points to a dir.
try:
st = os.stat(path)
is_dir = stat.S_ISDIR(st.st_mode)
except (IOError, OSError):
# Dangling symlink (i.e. it lexists but not exists)
is_dir = False
return True, is_link, is_dir
class BaseDirectoryVisitor:
"""Base class and interface for :py:func:`visit_directory_tree`."""
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_file(self, root, rel_path, depth):
"""Handle the non-symlink file at ``os.path.join(root, rel_path)``
Parameters:
root: root directory
rel_path: relative path to current file from ``root``
root (str): root directory
rel_path (str): relative path to current file from ``root``
depth (int): depth of current file from the ``root`` directory"""
pass
def visit_symlinked_file(self, root: str, rel_path: str, depth) -> None:
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``. Note: ``rel_path`` is
the location of the symlink, not to what it is pointing to. The symlink may be dangling.
def visit_symlinked_file(self, root, rel_path, depth):
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``.
Note: ``rel_path`` is the location of the symlink, not to what it is
pointing to. The symlink may be dangling.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory"""
pass
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
"""Return True from this function to recurse into the directory at
os.path.join(root, rel_path). Return False in order not to recurse further.
Parameters:
root: root directory
rel_path: relative path to current directory from ``root``
depth: depth of current directory from the ``root`` directory
root (str): root directory
rel_path (str): relative path to current directory from ``root``
depth (int): depth of current directory from the ``root`` directory
Returns:
bool: ``True`` when the directory should be recursed into. ``False`` when
not"""
return False
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
"""Return ``True`` to recurse into the symlinked directory and ``False`` in order not to.
Note: ``rel_path`` is the path to the symlink itself. Following symlinked directories
blindly can cause infinite recursion due to cycles.
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""Return ``True`` to recurse into the symlinked directory and ``False`` in
order not to. Note: ``rel_path`` is the path to the symlink itself.
Following symlinked directories blindly can cause infinite recursion due to
cycles.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory
Returns:
bool: ``True`` when the directory should be recursed into. ``False`` when
not"""
return False
def after_visit_dir(self, root: str, rel_path: str, depth: int) -> None:
"""Called after recursion into ``rel_path`` finished. This function is not called when
``rel_path`` was not recursed into.
def after_visit_dir(self, root, rel_path, depth):
"""Called after recursion into ``rel_path`` finished. This function is not
called when ``rel_path`` was not recursed into.
Parameters:
root: root directory
rel_path: relative path to current directory from ``root``
depth: depth of current directory from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current directory from ``root``
depth (int): depth of current directory from the ``root`` directory"""
pass
def after_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> None:
"""Called after recursion into ``rel_path`` finished. This function is not called when
``rel_path`` was not recursed into.
def after_visit_symlinked_dir(self, root, rel_path, depth):
"""Called after recursion into ``rel_path`` finished. This function is not
called when ``rel_path`` was not recursed into.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory"""
pass
def visit_directory_tree(
root: str, visitor: BaseDirectoryVisitor, rel_path: str = "", depth: int = 0
):
"""Recurses the directory root depth-first through a visitor pattern using the interface from
:py:class:`BaseDirectoryVisitor`
def visit_directory_tree(root, visitor, rel_path="", depth=0):
"""Recurses the directory root depth-first through a visitor pattern using the
interface from :py:class:`BaseDirectoryVisitor`
Parameters:
root: path of directory to recurse into
visitor: what visitor to use
rel_path: current relative path from the root
depth: current depth from the root
root (str): path of directory to recurse into
visitor (BaseDirectoryVisitor): what visitor to use
rel_path (str): current relative path from the root
depth (str): current depth from the root
"""
dir = os.path.join(root, rel_path)
dir_entries = sorted(os.scandir(dir), key=lambda d: d.name)
@@ -1514,19 +1498,26 @@ def visit_directory_tree(
for f in dir_entries:
rel_child = os.path.join(rel_path, f.name)
islink = f.is_symlink()
# On Windows, symlinks to directories are distinct from symlinks to files, and it is
# possible to create a broken symlink to a directory (e.g. using os.symlink without
# `target_is_directory=True`), invoking `isdir` on a symlink on Windows that is broken in
# this manner will result in an error. In this case we can work around the issue by reading
# the target and resolving the directory ourselves
# On Windows, symlinks to directories are distinct from
# symlinks to files, and it is possible to create a
# broken symlink to a directory (e.g. using os.symlink
# without `target_is_directory=True`), invoking `isdir`
# on a symlink on Windows that is broken in this manner
# will result in an error. In this case we can work around
# the issue by reading the target and resolving the
# directory ourselves
try:
isdir = f.is_dir()
except OSError as e:
if sys.platform == "win32" and hasattr(e, "winerror") and e.winerror == 5 and islink:
# if path is a symlink, determine destination and evaluate file vs directory
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
# link_target might be relative but resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative to the CWD and therefore makes sense
# link_target might be relative but
# resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative
# to the CWD and therefore
# makes sense
isdir = os.path.isdir(link_target)
else:
raise e

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -843,30 +843,6 @@ def __repr__(self):
return repr(self.instance)
def get_entry_points(*, group: str):
"""Wrapper for ``importlib.metadata.entry_points``
Args:
group: entry points to select
Returns:
EntryPoints for ``group`` or empty list if unsupported
"""
try:
import importlib.metadata # type: ignore # novermin
except ImportError:
return []
try:
return importlib.metadata.entry_points(group=group)
except TypeError:
# Prior to Python 3.10, entry_points accepted no parameters and always
# returned a dictionary of entry points, keyed by group. See
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
return importlib.metadata.entry_points().get(group, [])
def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file.

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -8,7 +8,7 @@
import filecmp
import os
import shutil
from typing import Callable, Dict, List, Optional, Tuple
from collections import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree
@@ -51,32 +51,32 @@ class SourceMergeVisitor(BaseDirectoryVisitor):
- A list of merge conflicts in dst/
"""
def __init__(self, ignore: Optional[Callable[[str], bool]] = None):
def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need to prepend the <projection>
# bit to the relative path in the destination dir.
self.projection: str = ""
# When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the
# destination dir.
self.projection = ""
# Two files f and g conflict if they are not os.path.samefile(f, g) and they are both
# projected to the same destination file. These conflicts are not necessarily fatal, and
# can be resolved or ignored. For example <prefix>/LICENSE or
# <site-packages>/<namespace>/__init__.py conflicts can be ignored).
self.file_conflicts: List[MergeConflict] = []
# When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or
# or <site-packages>/<namespace>/__init__.py conflicts can be
# ignored).
self.file_conflicts = []
# When we have to create a dir where a file is, or a file where a dir is, we have fatal
# errors, listed here.
self.fatal_conflicts: List[MergeConflict] = []
# When we have to create a dir where a file is, or a file
# where a dir is, we have fatal errors, listed here.
self.fatal_conflicts = []
# What directories we have to make; this is an ordered dict, so that we have a fast lookup
# and can run mkdir in order.
self.directories: Dict[str, Tuple[str, str]] = {}
# What directories we have to make; this is an ordered set,
# so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_root, src_rel). This is an ordered dict, where files
# are guaranteed to be grouped by src_root in the order they were visited.
self.files: Dict[str, Tuple[str, str]] = {}
# Files to link. Maps dst_rel to (src_root, src_rel)
self.files = OrderedDict()
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
"""
Register a directory if dst / rel_path is not blocked by a file or ignored.
"""
@@ -104,7 +104,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
self.directories[proj_rel_path] = (root, rel_path)
return True
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Replace symlinked dirs with actual directories when possible in low depths,
otherwise handle it as a file (i.e. we link to the symlink).
@@ -136,56 +136,40 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
self.visit_file(root, rel_path, depth)
return False
def visit_file(self, root: str, rel_path: str, depth: int, *, symlink: bool = False) -> None:
def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
pass
elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(*self.directories[proj_rel_path]),
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
elif proj_rel_path in self.files:
# When two files project to the same path, they conflict iff they are distinct.
# If they are the same (i.e. one links to the other), register regular files rather
# than symlinks. The reason is that in copy-type views, we need a copy of the actual
# file, not the symlink.
src_a = os.path.join(*self.files[proj_rel_path])
src_b = os.path.join(root, rel_path)
try:
samefile = os.path.samefile(src_a, src_b)
except OSError:
samefile = False
if not samefile:
# Distinct files produce a conflict.
self.file_conflicts.append(
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
# In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
return
if not symlink:
# Remove the link in favor of the actual file. The del is necessary to maintain the
# order of the files dict, which is grouped by root.
del self.files[proj_rel_path]
self.files[proj_rel_path] = (root, rel_path)
)
else:
# Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth, symlink=True)
self.visit_file(root, rel_path, depth)
def set_projection(self, projection: str) -> None:
def set_projection(self, projection):
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
@@ -213,19 +197,24 @@ def set_projection(self, projection: str) -> None:
class DestinationMergeVisitor(BaseDirectoryVisitor):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor and:
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
and:
a. registers additional conflicts when merging to the destination prefix
b. removes redundant mkdir operations when directories already exist in the destination prefix.
a. registers additional conflicts when merging
to the destination prefix
b. removes redundant mkdir operations when
directories already exist in the destination
prefix.
This also makes sure that symlinked directories in the target prefix will never be merged with
This also makes sure that symlinked directories
in the target prefix will never be merged with
directories in the sources directories.
"""
def __init__(self, source_merge_visitor: SourceMergeVisitor):
def __init__(self, source_merge_visitor):
self.src = source_merge_visitor
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
# If destination dir is a file in a src dir, add a conflict,
# and don't traverse deeper
if rel_path in self.src.files:
@@ -247,7 +236,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
# don't descend into it.
return False
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Symlinked directories in the destination prefix should
be seen as files; we should not accidentally merge
@@ -273,7 +262,7 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
# Never descend into symlinked target dirs.
return False
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
@@ -291,7 +280,7 @@ def visit_file(self, root: str, rel_path: str, depth: int) -> None:
)
)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -189,7 +189,6 @@ def _windows_can_symlink() -> bool:
import llnl.util.filesystem as fs
fs.touchp(fpath)
fs.mkdirp(dpath)
try:
os.symlink(dpath, dlink)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -204,23 +204,17 @@ def color_when(value):
class match_to_ansi:
def __init__(self, color=True, enclose=False, zsh=False):
def __init__(self, color=True, enclose=False):
self.color = _color_when_value(color)
self.enclose = enclose
self.zsh = zsh
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.zsh:
result = rf"\e[0;{s}m"
else:
result = f"\033[{s}m"
if self.enclose:
result = rf"\[{result}\]"
return result
return r"\[\033[%sm\]" % s
else:
return "\033[%sm" % s
else:
return ""
@@ -267,11 +261,9 @@ def colorize(string, **kwargs):
codes, for output to non-console devices.
enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width.
zsh (bool): If True, use zsh ansi codes instead of bash ones (for variables like PS1)
"""
color = _color_when_value(kwargs.get("color", get_color_when()))
zsh = kwargs.get("zsh", False)
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string, zsh)
string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string)
string = string.replace("}}", "}")
return string

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -244,7 +244,7 @@ def _search_duplicate_specs_in_externals(error_cls):
+ lines
+ ["as they might result in non-deterministic hashes"]
)
except (TypeError, AttributeError):
except TypeError:
details = []
errors.append(error_cls(summary=error_msg, details=details))
@@ -292,6 +292,12 @@ def _avoid_mismatched_variants(error_cls):
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
def make_error(config_data, summary):
s = io.StringIO()
s.write("Occurring in the following file:\n")
syaml.dump_config(config_data, stream=s, blame=True)
return error_cls(summary=summary, details=[s.getvalue()])
for pkg_name in packages_yaml:
# 'all:' must be more forgiving, since it is setting defaults for everything
if pkg_name == "all" or "variants" not in packages_yaml[pkg_name]:
@@ -311,7 +317,7 @@ def _avoid_mismatched_variants(error_cls):
f"Setting a preference for the '{pkg_name}' package to the "
f"non-existing variant '{variant.name}'"
)
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
errors.append(make_error(preferences, summary))
continue
# Variant cannot accept this value
@@ -323,41 +329,11 @@ def _avoid_mismatched_variants(error_cls):
f"Setting the variant '{variant.name}' of the '{pkg_name}' package "
f"to the invalid value '{str(variant)}'"
)
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
errors.append(make_error(preferences, summary))
return errors
@config_packages
def _wrongly_named_spec(error_cls):
"""Warns if the wrong name is used for an external spec"""
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
for pkg_name in packages_yaml:
if pkg_name == "all":
continue
externals = packages_yaml[pkg_name].get("externals", [])
is_virtual = spack.repo.PATH.is_virtual(pkg_name)
for entry in externals:
spec = spack.spec.Spec(entry["spec"])
regular_pkg_is_wrong = not is_virtual and pkg_name != spec.name
virtual_pkg_is_wrong = is_virtual and not any(
p.name == spec.name for p in spack.repo.PATH.providers_for(pkg_name)
)
if regular_pkg_is_wrong or virtual_pkg_is_wrong:
summary = f"Wrong external spec detected for '{pkg_name}': {spec}"
errors.append(_make_config_error(entry, summary, error_cls=error_cls))
return errors
def _make_config_error(config_data, summary, error_cls):
s = io.StringIO()
s.write("Occurring in the following file:\n")
syaml.dump_config(config_data, stream=s, blame=True)
return error_cls(summary=summary, details=[s.getvalue()])
#: Sanity checks on package directives
package_directives = AuditClass(
group="packages",
@@ -691,8 +667,8 @@ def _unknown_variants_in_directives(pkgs, error_cls):
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
# Check "conflicts" directive
for trigger, conflicts in pkg_cls.conflicts.items():
for conflict, _ in conflicts:
for conflict, triggers in pkg_cls.conflicts.items():
for trigger, _ in triggers:
vrn = spack.spec.Spec(conflict)
try:
vrn.constrain(trigger)
@@ -718,21 +694,25 @@ def _unknown_variants_in_directives(pkgs, error_cls):
)
# Check "depends_on" directive
for trigger in pkg_cls.dependencies:
vrn = spack.spec.Spec(trigger)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="depends_on", error_cls=error_cls
for _, triggers in pkg_cls.dependencies.items():
triggers = list(triggers)
for trigger in list(triggers):
vrn = spack.spec.Spec(trigger)
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="depends_on", error_cls=error_cls
)
)
)
# Check "provides" directive
for when_spec in pkg_cls.provided:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, when_spec, directive="provides", error_cls=error_cls
# Check "patch" directive
for _, triggers in pkg_cls.provided.items():
triggers = [spack.spec.Spec(x) for x in triggers]
for vrn in triggers:
errors.extend(
_analyze_variants_in_directive(
pkg_cls, vrn, directive="patch", error_cls=error_cls
)
)
)
# Check "resource" directive
for vrn in pkg_cls.resources:
@@ -756,92 +736,61 @@ def _issues_in_depends_on_directive(pkgs, error_cls):
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
for when, deps_by_name in pkg_cls.dependencies.items():
for dep_name, dep in deps_by_name.items():
# Check if there are nested dependencies declared. We don't want directives like:
#
# depends_on('foo+bar ^fee+baz')
#
# but we'd like to have two dependencies listed instead.
nested_dependencies = dep.spec.dependencies()
for dependency_name, dependency_data in pkg_cls.dependencies.items():
# Check if there are nested dependencies declared. We don't want directives like:
#
# depends_on('foo+bar ^fee+baz')
#
# but we'd like to have two dependencies listed instead.
for when, dependency_edge in dependency_data.items():
dependency_spec = dependency_edge.spec
nested_dependencies = dependency_spec.dependencies()
if nested_dependencies:
summary = f"{pkg_name}: nested dependency declaration '{dep.spec}'"
ndir = len(nested_dependencies) + 1
summary = (
f"{pkg_name}: invalid nested dependency "
f"declaration '{str(dependency_spec)}'"
)
details = [
f"split depends_on('{dep.spec}', when='{when}') into {ndir} directives",
f"split depends_on('{str(dependency_spec)}', when='{str(when)}') "
f"into {len(nested_dependencies) + 1} directives",
f"in {filename}",
]
errors.append(error_cls(summary=summary, details=details))
def check_virtual_with_variants(spec, msg):
if not spec.virtual or not spec.variants:
return
error = error_cls(
f"{pkg_name}: {msg}",
f"remove variants from '{spec}' in depends_on directive in {filename}",
)
errors.append(error)
# No need to analyze virtual packages
if spack.repo.PATH.is_virtual(dependency_name):
continue
check_virtual_with_variants(dep.spec, "virtual dependency cannot have variants")
check_virtual_with_variants(dep.spec, "virtual when= spec cannot have variants")
try:
dependency_pkg_cls = spack.repo.PATH.get_pkg_class(dependency_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
summary = pkg_name + ": unknown package '{0}' in " "'depends_on' directive".format(
dependency_name
)
details = [" in " + filename]
errors.append(error_cls(summary=summary, details=details))
continue
# No need to analyze virtual packages
if spack.repo.PATH.is_virtual(dep_name):
continue
# check for unknown dependencies
try:
dependency_pkg_cls = spack.repo.PATH.get_pkg_class(dep_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
summary = f"{pkg_name}: unknown package '{dep_name}' in 'depends_on' directive"
details = [f" in {filename}"]
errors.append(error_cls(summary=summary, details=details))
continue
# Check for self-referential specs similar to:
#
# depends_on("foo@X.Y", when="^foo+bar")
#
# That would allow clingo to choose whether to have foo@X.Y+bar in the graph.
problematic_edges = [
x for x in when.edges_to_dependencies(dep_name) if not x.virtuals
]
if problematic_edges and not dep.patches:
summary = (
f"{pkg_name}: dependency on '{dep.spec}' when '{when}' is self-referential"
)
details = [
(
f" please specify better using '^[virtuals=...] {dep_name}', or "
f"substitute with an equivalent condition on '{pkg_name}'"
),
f" in {filename}",
]
errors.append(error_cls(summary=summary, details=details))
continue
# check variants
dependency_variants = dep.spec.variants
for _, dependency_edge in dependency_data.items():
dependency_variants = dependency_edge.spec.variants
for name, value in dependency_variants.items():
try:
v, _ = dependency_pkg_cls.variants[name]
v.validate_or_raise(value, pkg_cls=dependency_pkg_cls)
except Exception as e:
summary = (
f"{pkg_name}: wrong variant used for dependency in 'depends_on()'"
pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive"
)
error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = (
f"variant {str(e).strip()} does not exist in package {dep_name}"
)
error_msg += f" in package '{dep_name}'"
error_msg = "the variant {0} does not " "exist".format(error_msg)
error_msg += " in package '" + dependency_name + "'"
errors.append(
error_cls(summary=summary, details=[error_msg, f"in {filename}"])
error_cls(summary=summary, details=[error_msg, "in " + filename])
)
return errors
@@ -908,17 +857,14 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
dependencies_to_check = []
for dependency_name, dependency_data in pkg_cls.dependencies.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.PATH.is_virtual(dependency_name):
continue
for _, deps_by_name in pkg_cls.dependencies.items():
for dep_name, dep in deps_by_name.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.PATH.is_virtual(dep_name):
continue
dependencies_to_check.append(dep.spec)
dependencies_to_check.extend([edge.spec for edge in dependency_data.values()])
host_architecture = spack.spec.ArchSpec.default_arch()
for s in dependencies_to_check:
@@ -990,53 +936,39 @@ def _named_specs_in_when_arguments(pkgs, error_cls):
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
def _refers_to_pkg(when):
when_spec = spack.spec.Spec(when)
return when_spec.name is None or when_spec.name == pkg_name
def _error_items(when_dict):
for when, elts in when_dict.items():
if not _refers_to_pkg(when):
yield when, elts, [f"using '{when}', should be '^{when}'"]
def _extracts_errors(triggers, summary):
_errors = []
for trigger in list(triggers):
if not _refers_to_pkg(trigger):
when_spec = spack.spec.Spec(trigger)
if when_spec.name is not None and when_spec.name != pkg_name:
details = [f"using '{trigger}', should be '^{trigger}'"]
_errors.append(error_cls(summary=summary, details=details))
return _errors
for when, dnames, details in _error_items(pkg_cls.dependencies):
errors.extend(
error_cls(f"{pkg_name}: wrong 'when=' condition for '{dname}' dependency", details)
for dname in dnames
)
for dname, triggers in pkg_cls.dependencies.items():
summary = f"{pkg_name}: wrong 'when=' condition for the '{dname}' dependency"
errors.extend(_extracts_errors(triggers, summary))
for vname, (variant, triggers) in pkg_cls.variants.items():
summary = f"{pkg_name}: wrong 'when=' condition for the '{vname}' variant"
errors.extend(_extracts_errors(triggers, summary))
for when, providers, details in _error_items(pkg_cls.provided):
errors.extend(
error_cls(f"{pkg_name}: wrong 'when=' condition for '{provided}' virtual", details)
for provided in providers
)
for provided, triggers in pkg_cls.provided.items():
summary = f"{pkg_name}: wrong 'when=' condition for the '{provided}' virtual"
errors.extend(_extracts_errors(triggers, summary))
for when, requirements, details in _error_items(pkg_cls.requirements):
errors.append(
error_cls(f"{pkg_name}: wrong 'when=' condition in 'requires' directive", details)
)
for _, triggers in pkg_cls.requirements.items():
triggers = [when_spec for when_spec, _, _ in triggers]
summary = f"{pkg_name}: wrong 'when=' condition in 'requires' directive"
errors.extend(_extracts_errors(triggers, summary))
for when, _, details in _error_items(pkg_cls.patches):
errors.append(
error_cls(f"{pkg_name}: wrong 'when=' condition in 'patch' directives", details)
)
triggers = list(pkg_cls.patches)
summary = f"{pkg_name}: wrong 'when=' condition in 'patch' directives"
errors.extend(_extracts_errors(triggers, summary))
for when, _, details in _error_items(pkg_cls.resources):
errors.append(
error_cls(f"{pkg_name}: wrong 'when=' condition in 'resource' directives", details)
)
triggers = list(pkg_cls.resources)
summary = f"{pkg_name}: wrong 'when=' condition in 'resource' directives"
errors.extend(_extracts_errors(triggers, summary))
return llnl.util.lang.dedupe(errors)

View File

@@ -1,10 +1,11 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
import collections
import errno
import hashlib
import io
import itertools
@@ -22,8 +23,9 @@
import urllib.parse
import urllib.request
import warnings
from contextlib import closing
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from contextlib import closing, contextmanager
from gzip import GzipFile
from typing import Dict, List, NamedTuple, Optional, Set, Tuple
from urllib.error import HTTPError, URLError
import llnl.util.filesystem as fsys
@@ -48,7 +50,6 @@
import spack.stage
import spack.store
import spack.traverse as traverse
import spack.util.archive
import spack.util.crypto
import spack.util.file_cache as file_cache
import spack.util.gpg
@@ -67,10 +68,7 @@
BUILD_CACHE_RELATIVE_PATH = "build_cache"
BUILD_CACHE_KEYS_RELATIVE_PATH = "_pgp"
#: The build cache layout version that this version of Spack creates.
#: Version 2: includes parent directories of the package prefix in the tarball
CURRENT_BUILD_CACHE_LAYOUT_VERSION = 2
CURRENT_BUILD_CACHE_LAYOUT_VERSION = 1
class BuildCacheDatabase(spack_db.Database):
@@ -1132,46 +1130,195 @@ def generate_key_index(key_prefix, tmpdir=None):
shutil.rmtree(tmpdir)
@contextmanager
def gzip_compressed_tarfile(path):
"""Create a reproducible, compressed tarfile"""
# Create gzip compressed tarball of the install prefix
# 1) Use explicit empty filename and mtime 0 for gzip header reproducibility.
# If the filename="" is dropped, Python will use fileobj.name instead.
# This should effectively mimick `gzip --no-name`.
# 2) On AMD Ryzen 3700X and an SSD disk, we have the following on compression speed:
# compresslevel=6 gzip default: llvm takes 4mins, roughly 2.1GB
# compresslevel=9 python default: llvm takes 12mins, roughly 2.1GB
# So we follow gzip.
with open(path, "wb") as f, ChecksumWriter(f) as inner_checksum, closing(
GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=inner_checksum)
) as gzip_file, ChecksumWriter(gzip_file) as outer_checksum, tarfile.TarFile(
name="", mode="w", fileobj=outer_checksum
) as tar:
yield tar, inner_checksum, outer_checksum
def _tarinfo_name(absolute_path: str, *, _path=pathlib.PurePath) -> str:
"""Compute tarfile entry name as the relative path from the (system) root."""
return _path(*_path(absolute_path).parts[1:]).as_posix()
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
"""Create a tarfile of an install prefix of a spec. Skips existing buildinfo file.
Only adds regular files, symlinks and dirs. Skips devices, fifos. Preserves hardlinks.
Normalizes permissions like git. Tar entries are added in depth-first pre-order, with
dir entries partitioned by file | dir, and sorted alphabetically, for reproducibility.
Partitioning ensures only one dir is in memory at a time, and sorting improves compression.
Args:
tar: tarfile object to add files to
prefix: absolute install prefix of spec"""
if not os.path.isabs(prefix) or not os.path.isdir(prefix):
raise ValueError(f"prefix '{prefix}' must be an absolute path to a directory")
hardlink_to_tarinfo_name: Dict[Tuple[int, int], str] = dict()
stat_key = lambda stat: (stat.st_dev, stat.st_ino)
try: # skip buildinfo file if it exists
files_to_skip = [stat_key(os.lstat(buildinfo_file_name(prefix)))]
skip = lambda entry: stat_key(entry.stat(follow_symlinks=False)) in files_to_skip
except OSError:
skip = lambda entry: False
files_to_skip = []
spack.util.archive.reproducible_tarfile_from_prefix(
tar,
prefix,
# Spack <= 0.21 did not include parent directories, leading to issues when tarballs are
# used in runtimes like AWS lambda.
include_parent_directories=True,
skip=skip,
)
dir_stack = [prefix]
while dir_stack:
dir = dir_stack.pop()
# Add the dir before its contents
dir_info = tarfile.TarInfo(_tarinfo_name(dir))
dir_info.type = tarfile.DIRTYPE
dir_info.mode = 0o755
tar.addfile(dir_info)
# Sort by name: reproducible & improves compression
with os.scandir(dir) as it:
entries = sorted(it, key=lambda entry: entry.name)
new_dirs = []
for entry in entries:
if entry.is_dir(follow_symlinks=False):
new_dirs.append(entry.path)
continue
file_info = tarfile.TarInfo(_tarinfo_name(entry.path))
s = entry.stat(follow_symlinks=False)
# Skip existing binary distribution files.
id = stat_key(s)
if id in files_to_skip:
continue
# Normalize the mode
file_info.mode = 0o644 if s.st_mode & 0o100 == 0 else 0o755
if entry.is_symlink():
file_info.type = tarfile.SYMTYPE
file_info.linkname = os.readlink(entry.path)
tar.addfile(file_info)
elif entry.is_file(follow_symlinks=False):
# Deduplicate hardlinks
if s.st_nlink > 1:
if id in hardlink_to_tarinfo_name:
file_info.type = tarfile.LNKTYPE
file_info.linkname = hardlink_to_tarinfo_name[id]
tar.addfile(file_info)
continue
hardlink_to_tarinfo_name[id] = file_info.name
# If file not yet seen, copy it.
file_info.type = tarfile.REGTYPE
file_info.size = s.st_size
with open(entry.path, "rb") as f:
tar.addfile(file_info, f)
dir_stack.extend(reversed(new_dirs)) # we pop, so reverse to stay alphabetical
class ChecksumWriter(io.BufferedIOBase):
"""Checksum writer computes a checksum while writing to a file."""
myfileobj = None
def __init__(self, fileobj, algorithm=hashlib.sha256):
self.fileobj = fileobj
self.hasher = algorithm()
self.length = 0
def hexdigest(self):
return self.hasher.hexdigest()
def write(self, data):
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(data)
self.hasher.update(data)
self.length += length
return length
def read(self, size=-1):
raise OSError(errno.EBADF, "read() on write-only object")
def read1(self, size=-1):
raise OSError(errno.EBADF, "read1() on write-only object")
def peek(self, n):
raise OSError(errno.EBADF, "peek() on write-only object")
@property
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj.close()
self.fileobj = None
def flush(self):
self.fileobj.flush()
def fileno(self):
return self.fileobj.fileno()
def rewind(self):
raise OSError("Can't rewind while computing checksum")
def readable(self):
return False
def writable(self):
return True
def seekable(self):
return True
def tell(self):
return self.fileobj.tell()
def seek(self, offset, whence=io.SEEK_SET):
# In principle forward seek is possible with b"0" padding,
# but this is not implemented.
if offset == 0 and whence == io.SEEK_CUR:
return
raise OSError("Can't seek while computing checksum")
def readline(self, size=-1):
raise OSError(errno.EBADF, "readline() on write-only object")
def _do_create_tarball(tarfile_path: str, binaries_dir: str, buildinfo: dict):
with spack.util.archive.gzip_compressed_tarfile(tarfile_path) as (
tar,
inner_checksum,
outer_checksum,
):
with gzip_compressed_tarfile(tarfile_path) as (tar, inner_checksum, outer_checksum):
# Tarball the install prefix
tarfile_of_spec_prefix(tar, binaries_dir)
# Serialize buildinfo for the tarball
bstring = syaml.dump(buildinfo, default_flow_style=True).encode("utf-8")
tarinfo = tarfile.TarInfo(
name=spack.util.archive.default_path_to_name(buildinfo_file_name(binaries_dir))
)
tarinfo = tarfile.TarInfo(name=_tarinfo_name(buildinfo_file_name(binaries_dir)))
tarinfo.type = tarfile.REGTYPE
tarinfo.size = len(bstring)
tarinfo.mode = 0o644
@@ -1458,14 +1605,14 @@ def _get_valid_spec_file(path: str, max_supported_layout: int) -> Tuple[Dict, in
return spec_dict, layout_version
def download_tarball(spec, unsigned: Optional[bool] = False, mirrors_for_spec=None):
def download_tarball(spec, unsigned=False, mirrors_for_spec=None):
"""
Download binary tarball for given package into stage area, returning
path to downloaded tarball if successful, None otherwise.
Args:
spec (spack.spec.Spec): Concrete spec
unsigned: if ``True`` or ``False`` override the mirror signature verification defaults
unsigned (bool): Whether or not to require signed binaries
mirrors_for_spec (list): Optional list of concrete specs and mirrors
obtained by calling binary_distribution.get_mirrors_for_spec().
These will be checked in order first before looking in other
@@ -1486,9 +1633,7 @@ def download_tarball(spec, unsigned: Optional[bool] = False, mirrors_for_spec=No
"signature_verified": "true-if-binary-pkg-was-already-verified"
}
"""
configured_mirrors: Iterable[spack.mirror.Mirror] = spack.mirror.MirrorCollection(
binary=True
).values()
configured_mirrors = spack.mirror.MirrorCollection(binary=True).values()
if not configured_mirrors:
tty.die("Please add a spack mirror to allow download of pre-compiled packages.")
@@ -1506,16 +1651,8 @@ def download_tarball(spec, unsigned: Optional[bool] = False, mirrors_for_spec=No
# mirror for the spec twice though.
try_first = [i["mirror_url"] for i in mirrors_for_spec] if mirrors_for_spec else []
try_next = [i.fetch_url for i in configured_mirrors if i.fetch_url not in try_first]
mirror_urls = try_first + try_next
# TODO: turn `mirrors_for_spec` into a list of Mirror instances, instead of doing that here.
def fetch_url_to_mirror(url):
for mirror in configured_mirrors:
if mirror.fetch_url == url:
return mirror
return spack.mirror.Mirror(url)
mirrors = [fetch_url_to_mirror(url) for url in mirror_urls]
mirrors = try_first + try_next
tried_to_verify_sigs = []
@@ -1524,24 +1661,21 @@ def fetch_url_to_mirror(url):
# we remove support for deprecated spec formats and buildcache layouts.
for try_signed in (True, False):
for mirror in mirrors:
# Override mirror's default if
currently_unsigned = unsigned if unsigned is not None else not mirror.signed
# If it's an OCI index, do things differently, since we cannot compose URLs.
fetch_url = mirror.fetch_url
parsed = urllib.parse.urlparse(mirror)
# TODO: refactor this to some "nice" place.
if fetch_url.startswith("oci://"):
ref = spack.oci.image.ImageReference.from_string(
fetch_url[len("oci://") :]
).with_tag(spack.oci.image.default_tag(spec))
if parsed.scheme == "oci":
ref = spack.oci.image.ImageReference.from_string(mirror[len("oci://") :]).with_tag(
spack.oci.image.default_tag(spec)
)
# Fetch the manifest
try:
response = spack.oci.opener.urlopen(
urllib.request.Request(
url=ref.manifest_url(),
headers={"Accept": ", ".join(spack.oci.oci.manifest_content_type)},
headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
)
)
except Exception:
@@ -1571,7 +1705,7 @@ def fetch_url_to_mirror(url):
except InvalidMetadataFile as e:
tty.warn(
f"Ignoring binary package for {spec.name}/{spec.dag_hash()[:7]} "
f"from {fetch_url} due to invalid metadata file: {e}"
f"from {mirror} due to invalid metadata file: {e}"
)
local_specfile_stage.destroy()
continue
@@ -1593,16 +1727,13 @@ def fetch_url_to_mirror(url):
"tarball_stage": tarball_stage,
"specfile_stage": local_specfile_stage,
"signature_verified": False,
"signature_required": not currently_unsigned,
}
else:
ext = "json.sig" if try_signed else "json"
specfile_path = url_util.join(
fetch_url, BUILD_CACHE_RELATIVE_PATH, specfile_prefix
)
specfile_path = url_util.join(mirror, BUILD_CACHE_RELATIVE_PATH, specfile_prefix)
specfile_url = f"{specfile_path}.{ext}"
spackfile_url = url_util.join(fetch_url, BUILD_CACHE_RELATIVE_PATH, tarball)
spackfile_url = url_util.join(mirror, BUILD_CACHE_RELATIVE_PATH, tarball)
local_specfile_stage = try_fetch(specfile_url)
if local_specfile_stage:
local_specfile_path = local_specfile_stage.save_filename
@@ -1615,21 +1746,21 @@ def fetch_url_to_mirror(url):
except InvalidMetadataFile as e:
tty.warn(
f"Ignoring binary package for {spec.name}/{spec.dag_hash()[:7]} "
f"from {fetch_url} due to invalid metadata file: {e}"
f"from {mirror} due to invalid metadata file: {e}"
)
local_specfile_stage.destroy()
continue
if try_signed and not currently_unsigned:
if try_signed and not unsigned:
# If we found a signed specfile at the root, try to verify
# the signature immediately. We will not download the
# tarball if we could not verify the signature.
tried_to_verify_sigs.append(specfile_url)
signature_verified = try_verify(local_specfile_path)
if not signature_verified:
tty.warn(f"Failed to verify: {specfile_url}")
tty.warn("Failed to verify: {0}".format(specfile_url))
if currently_unsigned or signature_verified or not try_signed:
if unsigned or signature_verified or not try_signed:
# We will download the tarball in one of three cases:
# 1. user asked for --no-check-signature
# 2. user didn't ask for --no-check-signature, but we
@@ -1652,7 +1783,6 @@ def fetch_url_to_mirror(url):
"tarball_stage": tarball_stage,
"specfile_stage": local_specfile_stage,
"signature_verified": signature_verified,
"signature_required": not currently_unsigned,
}
local_specfile_stage.destroy()
@@ -1851,7 +1981,7 @@ def is_backup_file(file):
relocate.relocate_text(text_names, prefix_to_prefix_text)
def _extract_inner_tarball(spec, filename, extract_to, signature_required: bool, remote_checksum):
def _extract_inner_tarball(spec, filename, extract_to, unsigned, remote_checksum):
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, ".spack")
spackfile_path = os.path.join(stagepath, spackfile_name)
@@ -1871,7 +2001,7 @@ def _extract_inner_tarball(spec, filename, extract_to, signature_required: bool,
else:
raise ValueError("Cannot find spec file for {0}.".format(extract_to))
if signature_required:
if not unsigned:
if os.path.exists("%s.asc" % specfile_path):
suppress = config.get("config:suppress_gpg_warnings", False)
try:
@@ -1900,12 +2030,11 @@ def _extract_inner_tarball(spec, filename, extract_to, signature_required: bool,
def _tar_strip_component(tar: tarfile.TarFile, prefix: str):
"""Yield all members of tarfile that start with given prefix, and strip that prefix (including
symlinks)"""
"""Strip the top-level directory `prefix` from the member names in a tarfile."""
# Including trailing /, otherwise we end up with absolute paths.
regex = re.compile(re.escape(prefix) + "/*")
# Only yield members in the package prefix.
# Remove the top-level directory from the member (link)names.
# Note: when a tarfile is created, relative in-prefix symlinks are
# expanded to matching member names of tarfile entries. So, we have
# to ensure that those are updated too.
@@ -1913,17 +2042,15 @@ def _tar_strip_component(tar: tarfile.TarFile, prefix: str):
# them.
for m in tar.getmembers():
result = regex.match(m.name)
if not result:
continue
assert result is not None
m.name = m.name[result.end() :]
if m.linkname:
result = regex.match(m.linkname)
if result:
m.linkname = m.linkname[result.end() :]
yield m
def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
def extract_tarball(spec, download_result, unsigned=False, force=False, timer=timer.NULL_TIMER):
"""
extract binary tarball for given package into install area
"""
@@ -1949,8 +2076,7 @@ def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
bchecksum = spec_dict["binary_cache_checksum"]
filename = download_result["tarball_stage"].save_filename
signature_verified: bool = download_result["signature_verified"]
signature_required: bool = download_result["signature_required"]
signature_verified = download_result["signature_verified"]
tmpdir = None
if layout_version == 0:
@@ -1959,14 +2085,12 @@ def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
# and another tarball containing the actual install tree.
tmpdir = tempfile.mkdtemp()
try:
tarfile_path = _extract_inner_tarball(
spec, filename, tmpdir, signature_required, bchecksum
)
tarfile_path = _extract_inner_tarball(spec, filename, tmpdir, unsigned, bchecksum)
except Exception as e:
_delete_staged_downloads(download_result)
shutil.rmtree(tmpdir)
raise e
elif 1 <= layout_version <= 2:
elif layout_version == 1:
# Newer buildcache layout: the .spack file contains just
# in the install tree, the signature, if it exists, is
# wrapped around the spec.json at the root. If sig verify
@@ -1974,10 +2098,9 @@ def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
# the tarball.
tarfile_path = filename
if signature_required and not signature_verified:
if not unsigned and not signature_verified:
raise UnsignedPackageException(
"To install unsigned packages, use the --no-check-signature option, "
"or configure the mirror with signed: false."
"To install unsigned packages, use the --no-check-signature option."
)
# compute the sha256 checksum of the tarball
@@ -1994,10 +2117,8 @@ def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
try:
with closing(tarfile.open(tarfile_path, "r")) as tar:
# Remove install prefix from tarfil to extract directly into spec.prefix
tar.extractall(
path=spec.prefix,
members=_tar_strip_component(tar, prefix=_ensure_common_prefix(tar)),
)
_tar_strip_component(tar, prefix=_ensure_common_prefix(tar))
tar.extractall(path=spec.prefix)
except Exception:
shutil.rmtree(spec.prefix, ignore_errors=True)
_delete_staged_downloads(download_result)
@@ -2032,47 +2153,20 @@ def extract_tarball(spec, download_result, force=False, timer=timer.NULL_TIMER):
def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
# Find the lowest `binary_distribution` file (hard-coded forward slash is on purpose).
binary_distribution = min(
(
e.name
for e in tar.getmembers()
if e.isfile() and e.name.endswith(".spack/binary_distribution")
),
key=len,
default=None,
)
# Get the shortest length directory.
common_prefix = min((e.name for e in tar.getmembers() if e.isdir()), key=len, default=None)
if binary_distribution is None:
raise ValueError("Tarball is not a Spack package, missing binary_distribution file")
if common_prefix is None:
raise ValueError("Tarball does not contain a common prefix")
pkg_path = pathlib.PurePosixPath(binary_distribution).parent.parent
# Even the most ancient Spack version has required to list the dir of the package itself, so
# guard against broken tarballs where `path.parent.parent` is empty.
if pkg_path == pathlib.PurePosixPath():
raise ValueError("Invalid tarball, missing package prefix dir")
pkg_prefix = str(pkg_path)
# Ensure all tar entries are in the pkg_prefix dir, and if they're not, they should be parent
# dirs of it.
has_prefix = False
# Validate that each file starts with the prefix
for member in tar.getmembers():
stripped = member.name.rstrip("/")
if not (
stripped.startswith(pkg_prefix) or member.isdir() and pkg_prefix.startswith(stripped)
):
raise ValueError(f"Tarball contains file {stripped} outside of prefix {pkg_prefix}")
if member.isdir() and stripped == pkg_prefix:
has_prefix = True
if not member.name.startswith(common_prefix):
raise ValueError(
f"Tarball contains file {member.name} outside of prefix {common_prefix}"
)
# This is technically not required, but let's be defensive about the existence of the package
# prefix dir.
if not has_prefix:
raise ValueError(f"Tarball does not contain a common prefix {pkg_prefix}")
return pkg_prefix
return common_prefix
def install_root_node(spec, unsigned=False, force=False, sha256=None):
@@ -2119,7 +2213,7 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
# don't print long padded paths while extracting/relocating binaries
with spack.util.path.filter_padding():
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
extract_tarball(spec, download_result, force)
extract_tarball(spec, download_result, unsigned, force)
spack.hooks.post_install(spec, False)
spack.store.STORE.db.add(spec, spack.store.STORE.layout)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -542,7 +542,7 @@ def verify_patchelf(patchelf: "spack.util.executable.Executable") -> bool:
return version >= spack.version.Version("0.13.1")
def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
def ensure_patchelf_in_path_or_raise() -> None:
"""Ensure patchelf is in the PATH or raise."""
# The old concretizer is not smart and we're doing its job: if the latest patchelf
# does not concretize because the compiler doesn't support C++17, we try to

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -146,7 +146,7 @@ def mypy_root_spec() -> str:
def black_root_spec() -> str:
"""Return the root spec used to bootstrap black"""
return _root_spec("py-black@:24.1.0")
return _root_spec("py-black@:23.1.0")
def flake8_root_spec() -> str:

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

Some files were not shown because too many files have changed in this diff Show More