Compare commits

..

7 Commits

Author SHA1 Message Date
Cameron Smith
9dead10d70 fix style 2024-01-11 19:21:57 -05:00
Angel Castillo
63ba7db2d2 find mpi exec (#42050) 2024-01-11 14:31:06 -05:00
Cameron Smith
19c0208c1a pumi: fix style 2024-01-10 10:34:12 -05:00
Cameron Smith
9682347254 pumi: fix mpi test paths 2024-01-10 10:34:12 -05:00
Cameron Smith
f4f7309504 pumi: test dir fixes
thank you @tldahlgren
2024-01-10 10:34:11 -05:00
Cameron Smith
071a34df27 double quotes 2024-01-10 10:34:11 -05:00
Cameron Smith
8d35a8498b pumi: fix path to smoketest input data 2024-01-10 10:34:11 -05:00
1533 changed files with 17163 additions and 45233 deletions

View File

@@ -1,6 +0,0 @@
<!--
Remember that `spackbot` can help with your PR in multiple ways:
- `@spackbot help` shows all the commands that are currently available
- `@spackbot fix style` tries to push a commit to fix style issues in this PR
- `@spackbot re-run pipeline` runs the pipelines again, if you have write access to the repository
-->

View File

@@ -22,7 +22,7 @@ jobs:
matrix:
operating_system: ["ubuntu-latest", "macos-latest"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: ${{inputs.python_version}}
@@ -43,7 +43,7 @@ jobs:
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
@@ -158,7 +158,7 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: "3.12"
@@ -182,7 +182,7 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap clingo
run: |
set -ex
@@ -207,7 +207,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
@@ -250,7 +250,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -287,7 +287,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -320,7 +320,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -338,7 +338,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh

View File

@@ -55,9 +55,9 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
- uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c
id: docker_meta
with:
images: |
@@ -96,10 +96,10 @@ jobs:
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
- name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -107,16 +107,18 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@af5a7ed5ba88268d5278f7203fb52cd833f66d6e
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}
push: ${{ github.event_name != 'pull_request' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View File

@@ -35,12 +35,12 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below

View File

@@ -14,7 +14,7 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c

View File

@@ -1,5 +1,5 @@
black==24.3.0
clingo==5.7.1
black==23.12.1
clingo==5.6.2
flake8==7.0.0
isort==5.13.2
mypy==1.8.0

View File

@@ -51,7 +51,7 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
@@ -91,14 +91,14 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
@@ -122,7 +122,7 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: shelltests,linux
@@ -137,7 +137,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -156,7 +156,7 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
@@ -181,7 +181,7 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
@@ -191,7 +191,7 @@ jobs:
matrix:
python-version: ["3.11"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
@@ -216,6 +216,6 @@ jobs:
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,macos

View File

@@ -18,7 +18,7 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: '3.11'
@@ -35,7 +35,7 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
@@ -69,7 +69,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version

View File

@@ -15,7 +15,7 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
@@ -33,13 +33,13 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
@@ -57,13 +57,13 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c

View File

@@ -1130,10 +1130,6 @@ A version specifier can also be a list of ranges and specific versions,
separated by commas. For example, ``@1.0:1.5,=1.7.1`` matches any version
in the range ``1.0:1.5`` and the specific version ``1.7.1``.
^^^^^^^^^^^^
Git versions
^^^^^^^^^^^^
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``

View File

@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -173,72 +173,6 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
]
^^^^^^^
Testing
^^^^^^^
``PerlPackage`` provides a simple stand-alone test of the successfully
installed package to confirm that installed perl module(s) can be used.
These tests can be performed any time after the installation using
``spack -v test run``. (For more information on the command, see
:ref:`cmd-spack-test-run`.)
The base class automatically detects perl modules based on the presence
of ``*.pm`` files under the package's library directory. For example,
the files under ``perl-bignum``'s perl library are:
.. code-block:: console
$ find . -name "*.pm"
./bigfloat.pm
./bigrat.pm
./Math/BigFloat/Trace.pm
./Math/BigInt/Trace.pm
./Math/BigRat/Trace.pm
./bigint.pm
./bignum.pm
which results in the package having the ``use_modules`` property containing:
.. code-block:: python
use_modules = [
"bigfloat",
"bigrat",
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
"bigint",
"bignum",
]
.. note::
This list can often be used to catch missing dependencies.
If the list is somehow wrong, you can provide the names of the modules
yourself by overriding ``use_modules`` like so:
.. code-block:: python
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
If you only want a subset of the automatically detected modules to be
tested, you could instead define the ``skip_modules`` property on the
package. So, instead of overriding ``use_modules`` as shown above, you
could define the following:
.. code-block:: python
skip_modules = [
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
]
for the same use tests.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack
^^^^^^^^^^^^^^^^^^^^^

View File

@@ -199,7 +199,6 @@ def setup(sphinx):
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "_io.BytesIO"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
@@ -216,7 +215,6 @@ def setup(sphinx):
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
]
# The reST default role (used for this markup: `text`) to use for all documents.

View File

@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes.
system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes:
------------------------

View File

@@ -357,23 +357,91 @@ If there is a hook that you would like and is missing, you can propose to add a
``pre_install(spec)``
"""""""""""""""""""""
A ``pre_install`` hook is run within the install subprocess, directly before the install starts.
It expects a single argument of a spec.
A ``pre_install`` hook is run within an install subprocess, directly before
the install starts. It expects a single argument of a spec, and is run in
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
as we have defined them here, but rather callback functions associated with
a package install.
"""""""""""""""""""""""""""""""""""""
``post_install(spec, explicit=None)``
"""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""
``post_install(spec)``
""""""""""""""""""""""
A ``post_install`` hook is run within the install subprocess, directly after the install finishes,
but before the build stage is removed and the spec is registered in the database. It expects two
arguments: spec and an optional boolean indicating whether this spec is being installed explicitly.
A ``post_install`` hook is run within an install subprocess, directly after
the install finishes, but before the build stage is removed. If you
write one of these hooks, you should expect it to accept a spec as the only
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
also seen in packages, but in this context not related to the hooks described
here.
""""""""""""""""""""""""""""""""""""""""""""""""""""
``pre_uninstall(spec)`` and ``post_uninstall(spec)``
""""""""""""""""""""""""""""""""""""""""""""""""""""
These hooks are currently used for cleaning up module files after uninstall.
""""""""""""""""""""""""""
``on_install_start(spec)``
""""""""""""""""""""""""""
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
in the install function of a ``PackageInstaller``,
and importantly is not part of a build process, but before it. This is when
we have just newly grabbed the task, and are preparing to install. If you
write a hook of this type, you should provide the spec to it.
.. code-block:: python
def on_install_start(spec):
"""On start of an install, we want to...
"""
print('on_install_start')
""""""""""""""""""""""""""""
``on_install_success(spec)``
""""""""""""""""""""""""""""
This hook is run on a successful install, and is also run inside the build
process, akin to ``post_install``. The main difference is that this hook
is run outside of the context of the stage directory, meaning after the
build stage has been removed and the user is alerted that the install was
successful. If you need to write a hook that is run on success of a particular
phase, you should use ``on_phase_success``.
""""""""""""""""""""""""""""
``on_install_failure(spec)``
""""""""""""""""""""""""""""
This hook is run given an install failure that happens outside of the build
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
If you need to write a hook that is relevant to a failure within a build
process, you would want to instead use ``on_phase_failure``.
"""""""""""""""""""""""""""
``on_install_cancel(spec)``
"""""""""""""""""""""""""""
The same, but triggered if a spec install is cancelled for any reason.
"""""""""""""""""""""""""""""""""""""""""""""""
``on_phase_success(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""""
This hook is run within the install subprocess, and specifically when a phase
successfully finishes. Since we are interested in the package, the name of
the phase, and any output from it, we require:
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
- **phase_name**: the name of the phase that was successful (e.g., configure)
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
"""""""""""""""""""""""""""""""""""""""""""""
``on_phase_error(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""
In the case of an error during a phase, we might want to trigger some event
with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -142,21 +142,6 @@ user's prompt to begin with the environment name in brackets.
$ spack env activate -p myenv
[myenv] $ ...
The ``activate`` command can also be used to create a new environment, if it is
not already defined, by adding the ``--create`` flag. Managed and anonymous
environments, anonymous environments are explained in the next section,
can both be created using the same flags that `spack env create` accepts.
If an environment already exists then spack will simply activate it and ignore the
create specific flags.
.. code-block:: console
$ spack env activate --create -p myenv
# ...
# [creates if myenv does not exist yet]
# ...
[myenv] $ ...
To deactivate an environment, use the command:
.. code-block:: console
@@ -416,23 +401,6 @@ that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
If the package being developed supports out-of-source builds then users can use the
``--build_directory`` flag to control the location and name of the build directory.
This is a shortcut to set the ``package_attributes:build_directory`` in the
``packages`` configuration (see :ref:`assigning-package-attributes`).
The supplied location will become the build-directory for that package in all future builds.
.. warning::
Potential pitfalls of setting the build directory
Spack does not check for out-of-source build compatibility with the packages and
so the onerous of making sure the package supports out-of-source builds is on
the user.
For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds
while all ``CMake`` packages do.
Understanding these nuances are on the software developers and we strongly encourage
developers to only redirect the build directory if they understand their package's
build-system.
^^^^^^^
Loading
^^^^^^^
@@ -489,11 +457,11 @@ a ``packages.yaml`` file) could contain:
.. code-block:: yaml
spack:
# ...
...
packages:
all:
compiler: [intel]
# ...
...
This configuration sets the default compiler for all packages to
``intel``.
@@ -839,7 +807,7 @@ directories.
.. code-block:: yaml
spack:
# ...
...
view:
mpis:
root: /path/to/view
@@ -883,7 +851,7 @@ automatically named ``default``, so that
.. code-block:: yaml
spack:
# ...
...
view: True
is equivalent to
@@ -891,7 +859,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: .spack-env/view
@@ -901,7 +869,7 @@ and
.. code-block:: yaml
spack:
# ...
...
view: /path/to/view
is equivalent to
@@ -909,7 +877,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: /path/to/view
@@ -952,17 +920,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if

View File

@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
(5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)

View File

@@ -250,10 +250,9 @@ Compiler configuration
Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
or automatically by running ``spack compiler find``, but for
convenience Spack will automatically detect compilers the first time
it needs them.
specifying them manually in ``compilers.yaml``, or automatically by
running ``spack compiler find``, but for convenience Spack will
automatically detect compilers the first time it needs them.
.. _cmd-spack-compilers:
@@ -458,48 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
.. code-block:: yaml
packages:
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
environment:
set:
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
paths:
cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler
@@ -666,7 +623,7 @@ Fortran.
compilers:
- compiler:
# ...
...
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++

View File

@@ -10,7 +10,7 @@ Modules (modules.yaml)
======================
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is sometimes embraced also by
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `Lmod
@@ -21,38 +21,14 @@ Modules are one of several ways you can use Spack packages. For other
options that may fit your use case better, you should also look at
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
-----------
Quick start
-----------
----------------------------
Using module files via Spack
----------------------------
In the current version of Spack, module files are not generated by default. To get started, you
can generate module files for all currently installed packages by running either
.. code-block:: console
$ spack module tcl refresh
or
.. code-block:: console
$ spack module lmod refresh
Spack can also generate module files for all future installations automatically through the
following configuration:
.. code-block:: console
$ spack config add modules:default:enable:[tcl]
or
.. code-block:: console
$ spack config add modules:default:enable:[lmod]
Assuming you have a module system installed, you should now be able to use the ``module`` command
to interact with them:
If you have installed a supported module system you should be able to
run ``module avail`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
.. code-block:: console
@@ -89,17 +65,33 @@ scheme used at your site.
Module file customization
-------------------------
Module files are generated by post-install hooks after the successful
installation of a package.
.. note::
Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to
inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file
or changing scopes or environments.
Later in this section there is a subsection on :ref:`regenerating
modules <cmd-spack-module-refresh>` that will allow you to bring
your modules to a consistent state.
The table below summarizes the essential information associated with
the different file formats that can be generated by Spack:
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** |
+===========+==============+==============================+==============================================+======================+
| ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
+=============================+====================+===============================+==============================================+======================+
| **Tcl - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
Spack ships with sensible defaults for the generation of module files, but
@@ -110,7 +102,7 @@ In general you can override or extend the default behavior by:
2. writing specific rules in the ``modules.yaml`` configuration file
3. writing your own templates to override or extend the defaults
The former method lets you express changes in the run-time environment
The former method let you express changes in the run-time environment
that are needed to use the installed software properly, e.g. injecting variables
from language interpreters into their extensions. The latter two instead permit to
fine tune the filesystem layout, content and creation of module files to meet
@@ -118,62 +110,79 @@ site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables dynamically in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two methods that you can implement in any ``package.py`` to dynamically affect the
content of the module files generated by Spack. The most important one is
``setup_run_environment``, which can be used to set environment variables in the module file that
depend on the spec:
There are two methods that you can override in any ``package.py`` to affect the
content of the module files generated by Spack. The first one:
.. code-block:: python
def setup_run_environment(self, env):
if self.spec.satisfies("+foo"):
env.set("FOO", "bar")
pass
The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``,
which allows a dependency to set variables in the module file of its dependents. This is typically
used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the
search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate
the packages at runtime.
For example, a simplified version of the ``python`` package could look like this:
can alter the content of the module file associated with the same package where it is overridden.
The second method:
.. code-block:: python
def setup_dependent_run_environment(self, env, dependent_spec):
if dependent_spec.package.extends(self.spec):
env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python)
pass
and would make any package that ``extends("python")`` have its library directory added to the
``PYTHONPATH`` environment variable in the module file. It's much more convenient to set this
variable here, than to repeat it in every Python extension's ``setup_run_environment`` method.
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
An example in which it is crucial to override both methods
is given by the ``r`` package. This package installs libraries and headers
in non-standard locations and it is possible to prepend the appropriate directory
to the corresponding environment variables:
================== =================================
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
================== =================================
with the following snippet:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_run_environment
The ``r`` package also knows which environment variable should be modified
to make language extensions provided by other packages available, and modifies
it appropriately in the override of the second method:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_dependent_run_environment
.. _modules-yaml:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``modules.yaml`` config file and module sets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^
Write a configuration file
^^^^^^^^^^^^^^^^^^^^^^^^^^
The configuration files that control module generation behavior are named ``modules.yaml``. The
default configuration looks like this:
The configuration files that control module generation behavior
are named ``modules.yaml``. The default configuration:
.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml
:language: yaml
You can define one or more **module sets**, each of which can be configured separately with regard
to install location, naming scheme, inclusion and exclusion, autoloading, et cetera.
activates the hooks to generate ``tcl`` module files and inspects
the installation folder of each package for the presence of a set of subdirectories
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
to the environment variables listed below the folder name.
The default module set is aptly named ``default``. All
:ref:`Spack commands that operate on modules <maintaining-module-files>` apply to the ``default``
module set, unless another module set is specified explicitly (with the ``--name`` flag).
Spack modules can be configured for multiple module sets. The default
module set is named ``default``. All Spack commands which operate on
modules default to apply the ``default`` module set, but can be
applied to any module set in the configuration.
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
Changing the modules root
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
As shown in the table above, the default module root for ``lmod`` is
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
@@ -189,7 +198,7 @@ set by changing the ``roots`` key of the configuration.
my_custom_lmod_modules:
roots:
lmod: /path/to/install/custom/lmod/modules
# ...
...
This configuration will create two module sets. The default module set
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
@@ -215,32 +224,25 @@ location could be confusing to users of your modules. In the next
section, we will discuss enabling and disabling module types (module
file generators) for each module set.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically generating module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""
Activate other hooks
""""""""""""""""""""
Spack can be configured to automatically generate module files as part of package installation.
This is done by adding the desired module systems to the ``enable`` list.
Any other module file generator shipped with Spack can be activated adding it to the
list under the ``enable`` key in the module file. Currently the only generator that
is not active by default is ``lmod``, which produces hierarchical lua module files.
Each module system can then be configured separately. In fact, you should list configuration
options that affect a particular type of module files under a top level key corresponding
to the generator being customized:
.. code-block:: yaml
modules:
default:
enable:
- tcl
- lmod
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuring ``tcl`` and ``lmod`` modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can configure the behavior of either module system separately, under a key corresponding to
the generator being customized:
.. code-block:: yaml
modules:
default:
- tcl
- lmod
tcl:
# contains environment modules specific customizations
lmod:
@@ -251,82 +253,16 @@ either change the layout of the module files on the filesystem, or they will aff
their content. For the latter point it is possible to use anonymous specs
to fine tune the set of packages on which the modifications should be applied.
.. _autoloading-dependencies:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Autoloading and hiding dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A module file should set the variables that are needed for an application to work. But since an
application often has many dependencies, where should all the environment variables for those be
set? In Spack the rule is that each package sets the runtime variables that are needed by the
package itself, and no more. This way, dependencies can be loaded standalone too, and duplication
of environment variables is avoided.
That means however that if you want to use an application, you need to load the modules for all its
dependencies. Of course this is not something you would want users to do manually.
Since Spack knows the dependency graph of every package, it can easily generate module files that
automatically load the modules for its dependencies recursively. It is enabled by default for both
Lmod and Environment Modules under the ``autoload: direct`` config option. The former system has
builtin support through the ``depends_on`` function, the latter simply uses a ``module load``
statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values:
* ``none``: no autoloading
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
packages you've explicitly installed are exposed by ``module avail``, but still allows for
autoloading of hidden dependencies. Lmod should support hiding implicits in general, while
Environment Modules requires version 4.7 or higher.
.. note::
If supported by your module system, we highly encourage the following configuration that enables
autoloading and hiding of implicits. It ensures all runtime variables are set correctly,
including those for dependencies, without overwhelming the user with a large number of available
modules. Further, it makes it easier to get readable module names without collisions, see the
section below on :ref:`modules-projections`.
.. code-block:: yaml
modules:
default:
tcl:
hide_implicits: true
all:
autoload: direct # or `run`
lmod:
hide_implicits: true
all:
autoload: direct # or `run`
.. _anonymous_specs:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables for selected packages in config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""
Selection by anonymous specs
""""""""""""""""""""""""""""
In the configuration file you can filter particular specs, and make further changes to the
environment variables that go into their module files. This is very powerful when you want to avoid
:ref:`modifying the package itself <overide-api-calls-in-package-py>`, or when you want to set
certain variables on multiple selected packages at once.
For instance, in the snippet below:
In the configuration file you can use *anonymous specs* (i.e. specs
that **are not required to have a root package** and are thus used just
to express constraints) to apply certain modifications on a selected set
of the installed software. For instance, in the snippet below:
.. code-block:: yaml
@@ -369,28 +305,12 @@ the variable ``FOOBAR`` will be unset.
.. note::
Order does matter
The modifications associated with the ``all`` keyword are always evaluated
first, no matter where they appear in the configuration file. All the other changes to
environment variables for matching specs are evaluated from top to bottom.
first, no matter where they appear in the configuration file. All the other
spec constraints are instead evaluated top to bottom.
.. warning::
As general advice, it's often better to set as few unnecessary variables as possible. For
example, the following seemingly innocent and potentially useful configuration
.. code-block:: yaml
all:
environment:
set:
"{name}_ROOT": "{prefix}"
sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break
the ``gcc`` compiler: it uses this variable as its default search path for certain object
files and libraries, and by merely setting it, everything fails to link.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or
to force them to be written. Consider the case where you want to hide from users
@@ -410,19 +330,14 @@ you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
or any ``llvm`` installation.
It is safe to combine ``exclude`` and ``autoload``
:ref:`mentioned above <autoloading-dependencies>`. When ``exclude`` prevents a module file to be
generated for a dependency, the ``autoload`` feature will simply not generate a statement to load
it.
.. _modules-projections:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
Customize the naming of modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
The names of environment modules generated by Spack are not always easy to
The names of environment modules generated by spack are not always easy to
fully comprehend due to the long hash in the name. There are three module
configuration options to help with that. The first is a global setting to
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
@@ -438,13 +353,6 @@ shows how to set hash length in the module file names:
tcl:
hash_length: 7
.. tip::
Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number
modules exposed to the user. The hidden modules always contain the hash in their name, and are
not influenced by the ``hash_length`` setting. Hidden implicits thus make it easier to use a
short hash length or no hash at all, without risking name conflicts.
To help make module names more readable, and to help alleviate name conflicts
with a short hash, one can use the ``suffixes`` option in the modules
configuration file. This option will add strings to modules that match a spec.
@@ -457,12 +365,12 @@ For instance, the following config options,
tcl:
all:
suffixes:
^python@3.12: 'python-3.12'
^python@2.7.12: 'python-2.7.12'
^openblas: 'openblas'
will add a ``python-3.12`` version string to any packages compiled with
Python matching the spec, ``python@3.12``. This is useful to know which
version of Python a set of Python extensions is associated with. Likewise, the
will add a ``python-2.7.12`` version string to any packages compiled with
python matching the spec, ``python@2.7.12``. This is useful to know which
version of python a set of python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
@@ -560,11 +468,41 @@ that are already in the Lmod hierarchy.
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the Lmod project <https://github.com/TACC/Lmod/issues/114>`_.
""""""""""""""""""""""
Select default modules
""""""""""""""""""""""
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _customize-env-modifications:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
Customize environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
You can control which prefixes in a Spack package are added to
environment variables with the ``prefix_inspections`` section; this
@@ -662,9 +600,9 @@ stack to users who are likely to inspect the modules to find full
paths to software, when it is desirable to present the users with a
simpler set of paths than those generated by the Spack install tree.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Filter out environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections.
@@ -684,37 +622,49 @@ do so by using the ``exclude_env_vars``:
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
^^^^^^^^^^^^^^^^^^^^^^
Select default modules
^^^^^^^^^^^^^^^^^^^^^^
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. _autoloading-dependencies:
"""""""""""""""""""""
Autoload dependencies
"""""""""""""""""""""
Often it is required for a module to have its (transient) dependencies loaded as well.
One example where this is useful is when one package needs to use executables provided
by its dependency; when the dependency is autoloaded, the executable will be in the
PATH. Similarly for scripting languages such as Python, packages and their dependencies
have to be loaded together.
Autoloading is enabled by default for Lmod and Environment Modules. The former
has builtin support for through the ``depends_on`` function. The latter uses
``module load`` statement to load and track dependencies.
Autoloading can also be enabled conditionally:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
modules:
default:
tcl:
all:
autoload: none
^python:
autoload: direct
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
The configuration file above will produce module files that will
load their direct dependencies if the package installed depends on ``python``.
The allowed values for the ``autoload`` statement are either ``none``,
``direct`` or ``all``.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _maintaining-module-files:
.. note::
Tcl prerequisites
In the ``tcl`` section of the configuration file it is possible to use
the ``prerequisites`` directive that accepts the same values as
``autoload``. It will produce module files that have a ``prereq``
statement, which autoloads dependencies on Environment Modules when its
``auto_handling`` configuration option is enabled. If Environment Modules
is installed with Spack, ``auto_handling`` is enabled by default starting
version 4.2. Otherwise it is enabled by default since version 5.0.
------------------------
Maintaining Module Files

View File

@@ -487,56 +487,6 @@ present. For instance with a configuration like:
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Conflicts and strong preferences
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts"
from configuration files:
.. code-block:: yaml
packages:
all:
prefer:
- '%clang'
conflict:
- '+shared'
The ``prefer`` and ``conflict`` sections can be used whenever a ``require`` section is allowed.
The argument is always a list of constraints, and each constraint can be either a simple string,
or a more complex object:
.. code-block:: yaml
packages:
all:
conflict:
- spec: '%clang'
when: 'target=x86_64_v3'
message: 'reason why clang cannot be used'
The ``spec`` attribute is mandatory, while both ``when`` and ``message`` are optional.
.. note::
Requirements allow for expressing both "strong preferences" and "conflicts".
The syntax for doing so, though, may not be immediately clear. For
instance, if we want to prevent any package from using ``%clang``, we can set:
.. code-block:: yaml
packages:
all:
require:
- one_of: ['%clang', '@:']
Since only one of the requirements must hold, and ``@:`` is always true, the rule above is
equivalent to a conflict. For "strong preferences" we need to substitute the ``one_of`` policy
with ``any_of``.
.. _package-preferences:
-------------------
@@ -647,8 +597,6 @@ manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected.
.. _assigning-package-attributes:
----------------------------
Assigning Package Attributes
----------------------------
@@ -659,11 +607,10 @@ You can assign class-level attributes in the configuration:
packages:
mpileaks:
package_attributes:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these

View File

@@ -6979,18 +6979,3 @@ you probably care most about are:
You may also care about `license exceptions
<https://spdx.org/licenses/exceptions-index.html>`_ that use the ``WITH`` operator,
e.g. ``Apache-2.0 WITH LLVM-exception``.
Many of the licenses that are currently in the spack repositories have been
automatically determined. While this is great for bulk adding license
information and is most likely correct, there are sometimes edge cases that
require manual intervention. To determine which licenses are validated and
which are not, there is the `checked_by` parameter in the license directive:
.. code-block:: python
license("<license>", when="<when>", checked_by="<github username>")
When you have validated a github license, either when doing so explicitly or
as part of packaging a new package, please set the `checked_by` parameter
to your Github username to signal that the license has been manually
verified.

View File

@@ -810,7 +810,7 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
.. code-block:: yaml
spack:
# ...
...
ci:
pipeline-gen:
- build-job:

View File

@@ -17,7 +17,7 @@ experimental software separately from the built-in repository. Spack
allows you to configure local repositories using either the
``repos.yaml`` or the ``spack repo`` command.
A package repository is a directory structured like this::
A package repository a directory structured like this::
repo/
repo.yaml

View File

@@ -2,12 +2,12 @@ sphinx==7.2.6
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.0
python-levenshtein==0.23.0
docutils==0.20.1
pygments==2.17.2
urllib3==2.2.1
pytest==8.1.1
urllib3==2.1.0
pytest==7.4.4
isort==5.13.2
black==24.3.0
black==23.12.1
flake8==7.0.0
mypy==1.9.0
mypy==1.8.0

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
* Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
astunparse
----------------

View File

@@ -1,3 +1,2 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.3"
__version__ = "0.2.2"

View File

@@ -3,7 +3,6 @@
"""
import sys
from .cli import main
sys.exit(main())

View File

@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
def cpu() -> int:
"""Run the `archspec cpu` subcommand."""
try:
print(archspec.cpu.host())
except FileNotFoundError as exc:
print(exc)
return 1
print(archspec.cpu.host())
return 0

View File

@@ -5,14 +5,10 @@
"""The "cpu" package permits to query and compare different
CPU microarchitectures.
"""
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host
from .microarchitecture import (
TARGETS,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
version_components,
)
__all__ = [
"Microarchitecture",

View File

@@ -4,17 +4,15 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures"""
import collections
import functools
import os
import platform
import re
import struct
import subprocess
import warnings
from typing import Dict, List, Optional, Set, Tuple, Union
from ..vendor.cpuid.cpuid import CPUID
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture
from .schema import CPUID_JSON, TARGETS_JSON
from .microarchitecture import generic_microarchitecture, TARGETS
from .schema import TARGETS_JSON
#: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu
@@ -24,46 +22,43 @@
#: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {}
# Constants for commonly used architectures
X86_64 = "x86_64"
AARCH64 = "aarch64"
PPC64LE = "ppc64le"
PPC64 = "ppc64"
RISCV64 = "riscv64"
def detection(operating_system: str):
"""Decorator to mark functions that are meant to return partial information on the current cpu.
def info_dict(operating_system):
"""Decorator to mark functions that are meant to return raw info on
the current cpu.
Args:
operating_system: operating system where this function can be used.
operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
"""
def decorator(factory):
INFO_FACTORY[operating_system].append(factory)
return factory
@functools.wraps(factory)
def _impl():
info = factory()
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
return info
return _impl
return decorator
def partial_uarch(
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture(
name=name,
parents=[],
vendor=vendor,
features=features or set(),
compilers={},
generation=generation,
)
@detection(operating_system="Linux")
def proc_cpuinfo() -> Microarchitecture:
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
data = {}
@info_dict(operating_system="Linux")
def proc_cpuinfo():
"""Returns a raw info dictionary by parsing the first entry of
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
for line in file:
key, separator, value = line.partition(":")
@@ -75,96 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
#
# we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo
if separator != ":" and data:
if separator != ":" and info:
break
data[key.strip()] = value.strip()
architecture = _machine()
if architecture == X86_64:
return partial_uarch(
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
)
if architecture == AARCH64:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
)
if architecture in (PPC64LE, PPC64):
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
return partial_uarch(generation=generation)
if architecture == RISCV64:
if data.get("uarch") == "sifive,u74-mc":
data["uarch"] = "u74mc"
return partial_uarch(name=data.get("uarch", RISCV64))
return generic_microarchitecture(architecture)
class CpuidInfoCollector:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
info[key.strip()] = value.strip()
return info
def _check_output(args, env):
@@ -173,25 +83,14 @@ def _check_output(args, env):
return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": "x86_64",
"ARM64": "aarch64",
}
def _machine():
"""Return the machine architecture we are on"""
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us
if operating_system not in ("Darwin", "Windows"):
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
@@ -204,47 +103,54 @@ def _machine():
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64
return "aarch64"
return X86_64
return "x86_64"
@detection(operating_system="Darwin")
def sysctl_info() -> Microarchitecture:
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str:
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == X86_64:
features = (
f'{sysctl("-n", "machdep.cpu.features").lower()} '
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}'
if _machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
)
features = set(features.split())
info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
# Flags detected on Darwin turned to their linux counterpart
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items():
if darwin_flag in features:
features.update(linux_flag.split())
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
info = {
"vendor_id": "Apple",
"flags": [],
"model": model,
"CPU implementer": "Apple",
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
@@ -254,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
"""Adjust the vendor field to make it human-readable"""
if "CPU implementer" not in data:
return "generic"
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
"""
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources:
@@ -267,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"]
return arm_vendors.get(arm_code, arm_code)
arm_code = info["CPU implementer"]
if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
return set(data.get(key, "").split())
def raw_info_dictionary():
"""Returns a dictionary with information on the cpu of the current host.
def detected_info() -> Microarchitecture:
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
This function calls all the viable factories one after the other until
there's one that is able to produce the requested information.
"""
# pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]:
try:
return factory()
info = factory()
except Exception as exc:
warnings.warn(str(exc))
return generic_microarchitecture(_machine())
if info:
adjust_raw_flags(info)
adjust_raw_vendor(info)
break
return info
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
"""Returns an unordered list of known micro-architectures that are compatible with the
partial Microarchitecture passed as input.
def compatible_microarchitectures(info):
"""Returns an unordered list of known micro-architectures that are
compatible with the info dictionary passed as argument.
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family)
@@ -306,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
def host():
"""Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu
info = detected_info()
# Retrieve a dictionary with raw information on the host's cpu
info = raw_info_dictionary()
# Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info)
@@ -334,15 +258,16 @@ def sorting_fn(item):
return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
def compatibility_check(architecture_family):
"""Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument,
and an arbitrary target Microarchitecture as the second argument. It returns True if the
target is compatible with first argument, False otherwise.
A compatibility check function takes the raw info dictionary as a first
argument and an arbitrary target as the second argument. It returns True
if the target is compatible with the info dictionary, False otherwise.
Args:
architecture_family: architecture family for which this test can be used
architecture_family (str or tuple): architecture family for which
this test can be used, e.g. x86_64 or ppc64le etc.
"""
# Turn the argument into something iterable
if isinstance(architecture_family, str):
@@ -355,57 +280,86 @@ def decorator(func):
return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64))
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
# We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[_machine()]
arch_root = TARGETS[basename]
return (
target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation
) and target.generation <= generation
@compatibility_check(architecture_family=X86_64)
@compatibility_check(architecture_family="x86_64")
def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
# We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features
arch_root = TARGETS[X86_64]
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic")
and target.features.issubset(info.features)
and target.vendor in (vendor, "generic")
and target.features.issubset(features)
)
@compatibility_check(architecture_family=AARCH64)
@compatibility_check(architecture_family="aarch64")
def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with
basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64:
if target.vendor == "generic" and target.name != "aarch64":
return False
arch_root = TARGETS[AARCH64]
arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor,
vendor,
"generic",
)
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model = TARGETS[info.name]
model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features)
return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family=RISCV64)
@compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64]
basename = "riscv64"
uarch = info.get("uarch")
# sifive unmatched board
if uarch == "sifive,u74-mc":
uarch = "u74mc"
# catch-all for unknown uarchs
else:
uarch = "riscv64"
arch_root = TARGETS[basename]
return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic"
target == uarch or target.vendor == "generic"
)

View File

@@ -13,7 +13,6 @@
import archspec
import archspec.cpu.alias
import archspec.cpu.schema
from .alias import FEATURE_ALIASES
from .schema import LazyDictionary
@@ -48,7 +47,7 @@ class Microarchitecture:
which has "broadwell" as a parent, supports running binaries
optimized for "broadwell".
vendor (str): vendor of the micro-architecture
features (set of str): supported CPU flags. Note that the semantic
features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will
@@ -181,28 +180,24 @@ def generic(self):
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
return max(generics, key=lambda x: len(x.ancestors))
def to_dict(self):
"""Returns a dictionary representation of this object."""
return {
"name": str(self.name),
"vendor": str(self.vendor),
"features": sorted(str(x) for x in self.features),
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
}
def to_dict(self, return_list_of_items=False):
"""Returns a dictionary representation of this object.
@staticmethod
def from_dict(data) -> "Microarchitecture":
"""Construct a microarchitecture from a dictionary representation."""
return Microarchitecture(
name=data["name"],
parents=[TARGETS[x] for x in data["parents"]],
vendor=data["vendor"],
features=set(data["features"]),
compilers=data.get("compilers", {}),
generation=data.get("generation", 0),
)
Args:
return_list_of_items (bool): if True returns an ordered list of
items instead of the dictionary
"""
list_of_items = [
("name", str(self.name)),
("vendor", str(self.vendor)),
("features", sorted(str(x) for x in self.features)),
("generation", self.generation),
("parents", [str(x) for x in self.parents]),
]
if return_list_of_items:
return list_of_items
return dict(list_of_items)
def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs
@@ -276,7 +271,9 @@ def tuplify(ver):
flags = flags_fmt.format(**compiler_entry)
return flags
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
msg = (
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
)
if compiler_info:
versions = [x["versions"] for x in compiler_info]
msg += f' [supported compiler versions are {", ".join(versions)}]'
@@ -292,7 +289,9 @@ def generic_microarchitecture(name):
Args:
name (str): name of the micro-architecture
"""
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
)
def version_components(version):
@@ -346,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]

View File

@@ -7,9 +7,7 @@
"""
import collections.abc
import json
import os
import pathlib
from typing import Tuple
import os.path
class LazyDictionary(collections.abc.MutableMapping):
@@ -48,65 +46,21 @@ def __len__(self):
return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
#: Environment variable that might point to a directory with extensions to JSON files
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r", encoding="utf-8") as file:
return json.load(file)
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))

View File

@@ -9,11 +9,11 @@ language specific APIs.
Currently the repository contains the following JSON files:
```console
cpu/
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64
── cpuid_schema.json # Schema for the file above
├── microarchitectures.json # Contains information on CPU microarchitectures
└── microarchitectures_schema.json # Schema for the file above
.
├── COPYRIGHT
── cpu
   ├── microarchitectures.json # Contains information on CPU microarchitectures
   └── microarchitectures_schema.json # Schema for the file above
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"vendor": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"highest_extension_support": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
},
"extension-flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
}
}
}

View File

@@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Anders Høst
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,76 +0,0 @@
cpuid.py
========
Now, this is silly!
Pure Python library for accessing information about x86 processors
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
instruction. Well, not exactly pure Python...
It works by allocating a small piece of virtual memory, copying
a raw x86 function to that memory, giving the memory execute
permissions and then calling the memory as a function. The injected
function executes the CPUID instruction and copies the result back
to a ctypes.Structure where is can be read by Python.
It should work fine on both 32 and 64 bit versions of Windows and Linux
running x86 processors. Apple OS X and other BSD systems should also work,
not tested though...
Why?
----
For poops and giggles. Plus, having access to a low-level feature
without having to compile a C wrapper is pretty neat.
Examples
--------
Getting info with eax=0:
import cpuid
q = cpuid.CPUID()
eax, ebx, ecx, edx = q(0)
Running the files:
$ python example.py
Vendor ID : GenuineIntel
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
Vector instructions supported:
SSE : Yes
SSE2 : Yes
SSE3 : Yes
SSSE3 : Yes
SSE4.1 : Yes
SSE4.2 : Yes
SSE4a : --
AVX : --
AVX2 : --
$ python cpuid.py
CPUID A B C D
00000000 0000000b 756e6547 6c65746e 49656e69
00000001 000106a5 00100800 009ce3bd bfebfbff
00000002 55035a01 00f0b2e4 00000000 09ca212c
00000003 00000000 00000000 00000000 00000000
00000004 00000000 00000000 00000000 00000000
00000005 00000040 00000040 00000003 00001120
00000006 00000003 00000002 00000001 00000000
00000007 00000000 00000000 00000000 00000000
00000008 00000000 00000000 00000000 00000000
00000009 00000000 00000000 00000000 00000000
0000000a 07300403 00000044 00000000 00000603
0000000b 00000000 00000000 00000095 00000000
80000000 80000008 00000000 00000000 00000000
80000001 00000000 00000000 00000001 28100800
80000002 65746e49 2952286c 6f655820 2952286e
80000003 55504320 20202020 20202020 57202020
80000004 30353533 20402020 37302e33 007a4847
80000005 00000000 00000000 00000000 00000000
80000006 00000000 00000000 01006040 00000000
80000007 00000000 00000000 00000000 00000100
80000008 00003024 00000000 00000000 00000000

View File

@@ -1,172 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import platform
import os
import ctypes
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
# Posix x86_64:
# Three first call registers : RDI, RSI, RDX
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
# Windows x86_64:
# Three first call registers : RCX, RDX, R8
# Volatile registers : RAX, RCX, RDX, R8-11
# cdecl 32 bit:
# Three first call registers : Stack (%esp)
# Volatile registers : EAX, ECX, EDX
_POSIX_64_OPC = [
0x53, # push %rbx
0x89, 0xf0, # mov %esi,%eax
0x89, 0xd1, # mov %edx,%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%rdi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
0x5b, # pop %rbx
0xc3 # retq
]
_WINDOWS_64_OPC = [
0x53, # push %rbx
0x89, 0xd0, # mov %edx,%eax
0x49, 0x89, 0xc9, # mov %rcx,%r9
0x44, 0x89, 0xc1, # mov %r8d,%ecx
0x0f, 0xa2, # cpuid
0x41, 0x89, 0x01, # mov %eax,(%r9)
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
0x5b, # pop %rbx
0xc3 # retq
]
_CDECL_32_OPC = [
0x53, # push %ebx
0x57, # push %edi
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%edi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
0x5f, # pop %edi
0x5b, # pop %ebx
0xc3 # ret
]
is_windows = os.name == "nt"
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
class CPUID_struct(ctypes.Structure):
_register_names = ("eax", "ebx", "ecx", "edx")
_fields_ = [(r, c_uint32) for r in _register_names]
def __getitem__(self, item):
if item not in self._register_names:
raise KeyError(item)
return getattr(self, item)
def __repr__(self):
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
class CPUID(object):
def __init__(self):
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
raise SystemError("Only available for x86")
if is_windows:
if is_64bit:
# VirtualAlloc seems to fail under some weird
# circumstances when ctypes.windll.kernel32 is
# used under 64 bit Python. CDLL fixes this.
self.win = ctypes.CDLL("kernel32.dll")
opc = _WINDOWS_64_OPC
else:
# Here ctypes.windll.kernel32 is needed to get the
# right DLL. Otherwise it will fail when running
# 32 bit Python on 64 bit Windows.
self.win = ctypes.windll.kernel32
opc = _CDECL_32_OPC
else:
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
size = len(opc)
code = (ctypes.c_ubyte * size)(*opc)
if is_windows:
self.win.VirtualAlloc.restype = c_void_p
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
if not self.addr:
raise MemoryError("Could not allocate RWX memory")
ctypes.memmove(self.addr, code, size)
else:
from mmap import (
mmap,
MAP_PRIVATE,
MAP_ANONYMOUS,
PROT_WRITE,
PROT_READ,
PROT_EXEC,
)
self.mm = mmap(
-1,
size,
flags=MAP_PRIVATE | MAP_ANONYMOUS,
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
)
self.mm.write(code)
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
self.func_ptr = func_type(self.addr)
def __call__(self, eax, ecx=0):
struct = self.registers_for(eax=eax, ecx=ecx)
return struct.eax, struct.ebx, struct.ecx, struct.edx
def registers_for(self, eax, ecx=0):
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
containing eax, ebx, ecx, and edx.
"""
struct = CPUID_struct()
self.func_ptr(struct, eax, ecx)
return struct
def __del__(self):
if is_windows:
self.win.VirtualFree.restype = c_long
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
self.win.VirtualFree(self.addr, 0, 0x8000)
else:
self.mm.close()
if __name__ == "__main__":
def valid_inputs():
cpuid = CPUID()
for eax in (0x0, 0x80000000):
highest, _, _, _ = cpuid(eax)
while eax <= highest:
regs = cpuid(eax)
yield (eax, regs)
eax += 1
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
for eax, regs in valid_inputs():
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import struct
import cpuid
def cpu_vendor(cpu):
_, b, c, d = cpu(0)
return struct.pack("III", b, d, c).decode("utf-8")
def cpu_name(cpu):
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
for i in range(2, 5)))
return name.split('\x00', 1)[0]
def is_set(cpu, leaf, subleaf, reg_idx, bit):
"""
@param {leaf} %eax
@param {sublead} %ecx, 0 in most cases
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
@param {bit} bit of reg selected by {reg_idx}, 0-based
"""
regs = cpu(leaf, subleaf)
if (1 << bit) & regs[reg_idx]:
return "Yes"
else:
return "--"
if __name__ == "__main__":
cpu = cpuid.CPUID()
print("Vendor ID : %s" % cpu_vendor(cpu))
print("CPU name : %s" % cpu_name(cpu))
print()
print("Vector instructions supported:")
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
# Intel RDT CMT/MBM
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))

View File

@@ -42,6 +42,11 @@ def convert_to_posix_path(path: str) -> str:
return format_os_path(path, mode=Path.unix)
def convert_to_windows_path(path: str) -> str:
"""Converts the input path to Windows style."""
return format_os_path(path, mode=Path.windows)
def convert_to_platform_path(path: str) -> str:
"""Converts the input path to the current platform's native style."""
return format_os_path(path, mode=Path.platform_path)

View File

@@ -171,7 +171,7 @@ def polite_path(components: Iterable[str]):
@memoized
def _polite_antipattern():
# A regex of all the characters we don't want in a filename
return re.compile(r"[^A-Za-z0-9_+.-]")
return re.compile(r"[^A-Za-z0-9_.-]")
def polite_filename(filename: str) -> str:
@@ -237,6 +237,16 @@ def _get_mime_type():
return file_command("-b", "-h", "--mime-type")
@memoized
def _get_mime_type_compressed():
"""Same as _get_mime_type but attempts to check for
compression first
"""
mime_uncompressed = _get_mime_type()
mime_uncompressed.add_default_arg("-Z")
return mime_uncompressed
def mime_type(filename):
"""Returns the mime type and subtype of a file.
@@ -252,6 +262,21 @@ def mime_type(filename):
return type, subtype
def compressed_mime_type(filename):
"""Same as mime_type but checks for type that has been compressed
Args:
filename (str): file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -283,6 +308,13 @@ def paths_containing_libs(paths, library_names):
return rpaths_to_include
@system_path_filter
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file(
regex: str,
repl: Union[str, Callable[[Match], str]],
@@ -877,34 +909,39 @@ def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def has_shebang(path):
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
try:
with open(path, "rb") as f:
return f.read(2) == b"#!"
except OSError:
return False
@system_path_filter
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable("file")
file.add_default_env("LC_ALL", "C")
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
return output.strip()
@system_path_filter
def is_nonsymlink_exe_with_shebang(path):
"""Returns whether the path is an executable regular file with a shebang. Returns False too
when the path is a symlink to a script, and also when the file cannot be opened."""
"""
Returns whether the path is an executable script with a shebang.
Return False when the path is a *symlink* to an executable script.
"""
try:
st = os.lstat(path)
except OSError:
return False
# Should not be a symlink
if stat.S_ISLNK(st.st_mode):
return False
# Should not be a symlink
if stat.S_ISLNK(st.st_mode):
return False
# Should be executable
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
return False
# Should be executable
if not st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
# Should start with a shebang
with open(path, "rb") as f:
return f.read(2) == b"#!"
except (IOError, OSError):
return False
return has_shebang(path)
@system_path_filter(arg_slice=slice(1))
def chgrp_if_not_world_writable(path, group):
@@ -1126,6 +1163,20 @@ def write_tmp_and_move(filename):
shutil.move(tmp, filename)
@contextmanager
@system_path_filter
def open_if_filename(str_or_file, mode="r"):
"""Takes either a path or a file object, and opens it if it is a path.
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, str):
with open(str_or_file, mode) as f:
yield f
else:
yield str_or_file
@system_path_filter
def touch(path):
"""Creates an empty file at the specified path."""
@@ -1183,47 +1234,6 @@ def get_single_file(directory):
return fnames[0]
@system_path_filter
def windows_sfn(path: os.PathLike):
"""Returns 8.3 Filename (SFN) representation of
path
8.3 Filenames (SFN or short filename) is a file
naming convention used prior to Win95 that Windows
still (and will continue to) support. This convention
caps filenames at 8 characters, and most importantly
does not allow for spaces in addition to other specifications.
The scheme is generally the same as a normal Windows
file scheme, but all spaces are removed and the filename
is capped at 6 characters. The remaining characters are
replaced with ~N where N is the number file in a directory
that a given file represents i.e. Program Files and Program Files (x86)
would be PROGRA~1 and PROGRA~2 respectively.
Further, all file/directory names are all caps (although modern Windows
is case insensitive in practice).
Conversion is accomplished by fileapi.h GetShortPathNameW
Returns paths in 8.3 Filename form
Note: this method is a no-op on Linux
Args:
path: Path to be transformed into SFN (8.3 filename) format
"""
# This should not be run-able on linux/macos
if sys.platform != "win32":
return path
path = str(path)
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value
@contextmanager
def temp_cwd():
tmp_dir = tempfile.mkdtemp()
@@ -1238,6 +1248,19 @@ def temp_cwd():
shutil.rmtree(tmp_dir, **kwargs)
@contextmanager
@system_path_filter
def temp_rename(orig_path, temp_path):
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
if not same_path:
shutil.move(orig_path, temp_path)
try:
yield
finally:
if not same_path:
shutil.move(temp_path, orig_path)
@system_path_filter
def can_access(file_name):
"""True if we have read/write access to the file."""
@@ -1354,89 +1377,120 @@ def traverse_tree(
yield (source_path, dest_path)
def lexists_islink_isdir(path):
"""Computes the tuple (lexists(path), islink(path), isdir(path)) in a minimal
number of stat calls on unix. Use os.path and symlink.islink methods for windows."""
if sys.platform == "win32":
if not os.path.lexists(path):
return False, False, False
return os.path.lexists(path), islink(path), os.path.isdir(path)
# First try to lstat, so we know if it's a link or not.
try:
lst = os.lstat(path)
except (IOError, OSError):
return False, False, False
is_link = stat.S_ISLNK(lst.st_mode)
# Check whether file is a dir.
if not is_link:
is_dir = stat.S_ISDIR(lst.st_mode)
return True, is_link, is_dir
# Check whether symlink points to a dir.
try:
st = os.stat(path)
is_dir = stat.S_ISDIR(st.st_mode)
except (IOError, OSError):
# Dangling symlink (i.e. it lexists but not exists)
is_dir = False
return True, is_link, is_dir
class BaseDirectoryVisitor:
"""Base class and interface for :py:func:`visit_directory_tree`."""
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_file(self, root, rel_path, depth):
"""Handle the non-symlink file at ``os.path.join(root, rel_path)``
Parameters:
root: root directory
rel_path: relative path to current file from ``root``
root (str): root directory
rel_path (str): relative path to current file from ``root``
depth (int): depth of current file from the ``root`` directory"""
pass
def visit_symlinked_file(self, root: str, rel_path: str, depth) -> None:
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``. Note: ``rel_path`` is
the location of the symlink, not to what it is pointing to. The symlink may be dangling.
def visit_symlinked_file(self, root, rel_path, depth):
"""Handle the symlink to a file at ``os.path.join(root, rel_path)``.
Note: ``rel_path`` is the location of the symlink, not to what it is
pointing to. The symlink may be dangling.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory"""
pass
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
"""Return True from this function to recurse into the directory at
os.path.join(root, rel_path). Return False in order not to recurse further.
Parameters:
root: root directory
rel_path: relative path to current directory from ``root``
depth: depth of current directory from the ``root`` directory
root (str): root directory
rel_path (str): relative path to current directory from ``root``
depth (int): depth of current directory from the ``root`` directory
Returns:
bool: ``True`` when the directory should be recursed into. ``False`` when
not"""
return False
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
"""Return ``True`` to recurse into the symlinked directory and ``False`` in order not to.
Note: ``rel_path`` is the path to the symlink itself. Following symlinked directories
blindly can cause infinite recursion due to cycles.
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""Return ``True`` to recurse into the symlinked directory and ``False`` in
order not to. Note: ``rel_path`` is the path to the symlink itself.
Following symlinked directories blindly can cause infinite recursion due to
cycles.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory
Returns:
bool: ``True`` when the directory should be recursed into. ``False`` when
not"""
return False
def after_visit_dir(self, root: str, rel_path: str, depth: int) -> None:
"""Called after recursion into ``rel_path`` finished. This function is not called when
``rel_path`` was not recursed into.
def after_visit_dir(self, root, rel_path, depth):
"""Called after recursion into ``rel_path`` finished. This function is not
called when ``rel_path`` was not recursed into.
Parameters:
root: root directory
rel_path: relative path to current directory from ``root``
depth: depth of current directory from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current directory from ``root``
depth (int): depth of current directory from the ``root`` directory"""
pass
def after_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> None:
"""Called after recursion into ``rel_path`` finished. This function is not called when
``rel_path`` was not recursed into.
def after_visit_symlinked_dir(self, root, rel_path, depth):
"""Called after recursion into ``rel_path`` finished. This function is not
called when ``rel_path`` was not recursed into.
Parameters:
root: root directory
rel_path: relative path to current symlink from ``root``
depth: depth of current symlink from the ``root`` directory"""
root (str): root directory
rel_path (str): relative path to current symlink from ``root``
depth (int): depth of current symlink from the ``root`` directory"""
pass
def visit_directory_tree(
root: str, visitor: BaseDirectoryVisitor, rel_path: str = "", depth: int = 0
):
"""Recurses the directory root depth-first through a visitor pattern using the interface from
:py:class:`BaseDirectoryVisitor`
def visit_directory_tree(root, visitor, rel_path="", depth=0):
"""Recurses the directory root depth-first through a visitor pattern using the
interface from :py:class:`BaseDirectoryVisitor`
Parameters:
root: path of directory to recurse into
visitor: what visitor to use
rel_path: current relative path from the root
depth: current depth from the root
root (str): path of directory to recurse into
visitor (BaseDirectoryVisitor): what visitor to use
rel_path (str): current relative path from the root
depth (str): current depth from the root
"""
dir = os.path.join(root, rel_path)
dir_entries = sorted(os.scandir(dir), key=lambda d: d.name)
@@ -1444,19 +1498,26 @@ def visit_directory_tree(
for f in dir_entries:
rel_child = os.path.join(rel_path, f.name)
islink = f.is_symlink()
# On Windows, symlinks to directories are distinct from symlinks to files, and it is
# possible to create a broken symlink to a directory (e.g. using os.symlink without
# `target_is_directory=True`), invoking `isdir` on a symlink on Windows that is broken in
# this manner will result in an error. In this case we can work around the issue by reading
# the target and resolving the directory ourselves
# On Windows, symlinks to directories are distinct from
# symlinks to files, and it is possible to create a
# broken symlink to a directory (e.g. using os.symlink
# without `target_is_directory=True`), invoking `isdir`
# on a symlink on Windows that is broken in this manner
# will result in an error. In this case we can work around
# the issue by reading the target and resolving the
# directory ourselves
try:
isdir = f.is_dir()
except OSError as e:
if sys.platform == "win32" and hasattr(e, "winerror") and e.winerror == 5 and islink:
# if path is a symlink, determine destination and evaluate file vs directory
# if path is a symlink, determine destination and
# evaluate file vs directory
link_target = resolve_link_target_relative_to_the_link(f)
# link_target might be relative but resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative to the CWD and therefore makes sense
# link_target might be relative but
# resolve_link_target_relative_to_the_link
# will ensure that if so, that it is relative
# to the CWD and therefore
# makes sense
isdir = os.path.isdir(link_target)
else:
raise e

View File

@@ -98,6 +98,36 @@ def caller_locals():
del stack
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
try:
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
return base_name
def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
def attr_setdefault(obj, name, value):
"""Like dict.setdefault, but for objects."""
if not hasattr(obj, name):
@@ -483,6 +513,42 @@ def copy(self):
return clone
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
if elt[3] == function_name:
return True
return False
finally:
del stack
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
def match_predicate(*args):
"""Utility function for making string matching predicates.
@@ -698,6 +764,11 @@ def pretty_seconds(seconds):
return pretty_seconds_formatter(seconds)(seconds)
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
class ObjectWrapper:
"""Base class that wraps an object. Derived classes can add new behavior
while staying undercover.
@@ -772,30 +843,6 @@ def __repr__(self):
return repr(self.instance)
def get_entry_points(*, group: str):
"""Wrapper for ``importlib.metadata.entry_points``
Args:
group: entry points to select
Returns:
EntryPoints for ``group`` or empty list if unsupported
"""
try:
import importlib.metadata # type: ignore # novermin
except ImportError:
return []
try:
return importlib.metadata.entry_points(group=group)
except TypeError:
# Prior to Python 3.10, entry_points accepted no parameters and always
# returned a dictionary of entry points, keyed by group. See
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
return importlib.metadata.entry_points().get(group, [])
def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file.
@@ -864,6 +911,25 @@ def uniq(sequence):
return uniq_list
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
class Devnull:
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::

View File

@@ -8,7 +8,7 @@
import filecmp
import os
import shutil
from typing import Callable, Dict, List, Optional, Tuple
from collections import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import BaseDirectoryVisitor, mkdirp, touch, traverse_tree
@@ -51,32 +51,32 @@ class SourceMergeVisitor(BaseDirectoryVisitor):
- A list of merge conflicts in dst/
"""
def __init__(self, ignore: Optional[Callable[[str], bool]] = None):
def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need to prepend the <projection>
# bit to the relative path in the destination dir.
self.projection: str = ""
# When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the
# destination dir.
self.projection = ""
# Two files f and g conflict if they are not os.path.samefile(f, g) and they are both
# projected to the same destination file. These conflicts are not necessarily fatal, and
# can be resolved or ignored. For example <prefix>/LICENSE or
# <site-packages>/<namespace>/__init__.py conflicts can be ignored).
self.file_conflicts: List[MergeConflict] = []
# When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or
# or <site-packages>/<namespace>/__init__.py conflicts can be
# ignored).
self.file_conflicts = []
# When we have to create a dir where a file is, or a file where a dir is, we have fatal
# errors, listed here.
self.fatal_conflicts: List[MergeConflict] = []
# When we have to create a dir where a file is, or a file
# where a dir is, we have fatal errors, listed here.
self.fatal_conflicts = []
# What directories we have to make; this is an ordered dict, so that we have a fast lookup
# and can run mkdir in order.
self.directories: Dict[str, Tuple[str, str]] = {}
# What directories we have to make; this is an ordered set,
# so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_root, src_rel). This is an ordered dict, where files
# are guaranteed to be grouped by src_root in the order they were visited.
self.files: Dict[str, Tuple[str, str]] = {}
# Files to link. Maps dst_rel to (src_root, src_rel)
self.files = OrderedDict()
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
"""
Register a directory if dst / rel_path is not blocked by a file or ignored.
"""
@@ -104,7 +104,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
self.directories[proj_rel_path] = (root, rel_path)
return True
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Replace symlinked dirs with actual directories when possible in low depths,
otherwise handle it as a file (i.e. we link to the symlink).
@@ -136,56 +136,40 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
self.visit_file(root, rel_path, depth)
return False
def visit_file(self, root: str, rel_path: str, depth: int, *, symlink: bool = False) -> None:
def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
pass
elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(*self.directories[proj_rel_path]),
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
)
elif proj_rel_path in self.files:
# When two files project to the same path, they conflict iff they are distinct.
# If they are the same (i.e. one links to the other), register regular files rather
# than symlinks. The reason is that in copy-type views, we need a copy of the actual
# file, not the symlink.
src_a = os.path.join(*self.files[proj_rel_path])
src_b = os.path.join(root, rel_path)
try:
samefile = os.path.samefile(src_a, src_b)
except OSError:
samefile = False
if not samefile:
# Distinct files produce a conflict.
self.file_conflicts.append(
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
# In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append(
MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path),
)
return
if not symlink:
# Remove the link in favor of the actual file. The del is necessary to maintain the
# order of the files dict, which is grouped by root.
del self.files[proj_rel_path]
self.files[proj_rel_path] = (root, rel_path)
)
else:
# Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth, symlink=True)
self.visit_file(root, rel_path, depth)
def set_projection(self, projection: str) -> None:
def set_projection(self, projection):
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
@@ -213,19 +197,24 @@ def set_projection(self, projection: str) -> None:
class DestinationMergeVisitor(BaseDirectoryVisitor):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor and:
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
and:
a. registers additional conflicts when merging to the destination prefix
b. removes redundant mkdir operations when directories already exist in the destination prefix.
a. registers additional conflicts when merging
to the destination prefix
b. removes redundant mkdir operations when
directories already exist in the destination
prefix.
This also makes sure that symlinked directories in the target prefix will never be merged with
This also makes sure that symlinked directories
in the target prefix will never be merged with
directories in the sources directories.
"""
def __init__(self, source_merge_visitor: SourceMergeVisitor):
def __init__(self, source_merge_visitor):
self.src = source_merge_visitor
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_dir(self, root, rel_path, depth):
# If destination dir is a file in a src dir, add a conflict,
# and don't traverse deeper
if rel_path in self.src.files:
@@ -247,7 +236,7 @@ def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
# don't descend into it.
return False
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Symlinked directories in the destination prefix should
be seen as files; we should not accidentally merge
@@ -273,7 +262,7 @@ def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bo
# Never descend into symlinked target dirs.
return False
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
@@ -291,7 +280,7 @@ def visit_file(self, root: str, rel_path: str, depth: int) -> None:
)
)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
def visit_symlinked_file(self, root, rel_path, depth):
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth)

View File

@@ -815,6 +815,10 @@ def __init__(self, path):
super().__init__(msg)
class LockLimitError(LockError):
"""Raised when exceed maximum attempts to acquire a lock."""
class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out."""

View File

@@ -189,7 +189,6 @@ def _windows_can_symlink() -> bool:
import llnl.util.filesystem as fs
fs.touchp(fpath)
fs.mkdirp(dpath)
try:
os.symlink(dpath, dlink)

View File

@@ -44,6 +44,10 @@ def is_debug(level=1):
return _debug >= level
def is_stacktrace():
return _stacktrace
def set_debug(level=0):
global _debug
assert level >= 0, "Debug level must be a positive value"
@@ -248,6 +252,37 @@ def die(message, *args, **kwargs) -> NoReturn:
sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
elif abort is not None:
prompt += " (%s to abort) " % abort
number = None
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None)

View File

@@ -244,7 +244,7 @@ def _search_duplicate_specs_in_externals(error_cls):
+ lines
+ ["as they might result in non-deterministic hashes"]
)
except (TypeError, AttributeError):
except TypeError:
details = []
errors.append(error_cls(summary=error_msg, details=details))
@@ -292,6 +292,12 @@ def _avoid_mismatched_variants(error_cls):
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
def make_error(config_data, summary):
s = io.StringIO()
s.write("Occurring in the following file:\n")
syaml.dump_config(config_data, stream=s, blame=True)
return error_cls(summary=summary, details=[s.getvalue()])
for pkg_name in packages_yaml:
# 'all:' must be more forgiving, since it is setting defaults for everything
if pkg_name == "all" or "variants" not in packages_yaml[pkg_name]:
@@ -311,7 +317,7 @@ def _avoid_mismatched_variants(error_cls):
f"Setting a preference for the '{pkg_name}' package to the "
f"non-existing variant '{variant.name}'"
)
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
errors.append(make_error(preferences, summary))
continue
# Variant cannot accept this value
@@ -323,41 +329,11 @@ def _avoid_mismatched_variants(error_cls):
f"Setting the variant '{variant.name}' of the '{pkg_name}' package "
f"to the invalid value '{str(variant)}'"
)
errors.append(_make_config_error(preferences, summary, error_cls=error_cls))
errors.append(make_error(preferences, summary))
return errors
@config_packages
def _wrongly_named_spec(error_cls):
"""Warns if the wrong name is used for an external spec"""
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
for pkg_name in packages_yaml:
if pkg_name == "all":
continue
externals = packages_yaml[pkg_name].get("externals", [])
is_virtual = spack.repo.PATH.is_virtual(pkg_name)
for entry in externals:
spec = spack.spec.Spec(entry["spec"])
regular_pkg_is_wrong = not is_virtual and pkg_name != spec.name
virtual_pkg_is_wrong = is_virtual and not any(
p.name == spec.name for p in spack.repo.PATH.providers_for(pkg_name)
)
if regular_pkg_is_wrong or virtual_pkg_is_wrong:
summary = f"Wrong external spec detected for '{pkg_name}': {spec}"
errors.append(_make_config_error(entry, summary, error_cls=error_cls))
return errors
def _make_config_error(config_data, summary, error_cls):
s = io.StringIO()
s.write("Occurring in the following file:\n")
syaml.dump_config(config_data, stream=s, blame=True)
return error_cls(summary=summary, details=[s.getvalue()])
#: Sanity checks on package directives
package_directives = AuditClass(
group="packages",
@@ -796,30 +772,10 @@ def check_virtual_with_variants(spec, msg):
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
summary = f"{pkg_name}: unknown package '{dep_name}' in 'depends_on' directive"
details = [f" in {filename}"]
errors.append(error_cls(summary=summary, details=details))
continue
# Check for self-referential specs similar to:
#
# depends_on("foo@X.Y", when="^foo+bar")
#
# That would allow clingo to choose whether to have foo@X.Y+bar in the graph.
problematic_edges = [
x for x in when.edges_to_dependencies(dep_name) if not x.virtuals
]
if problematic_edges and not dep.patches:
summary = (
f"{pkg_name}: dependency on '{dep.spec}' when '{when}' is self-referential"
f"{pkg_name}: unknown package '{dep_name}' in " "'depends_on' directive"
)
details = [
(
f" please specify better using '^[virtuals=...] {dep_name}', or "
f"substitute with an equivalent condition on '{pkg_name}'"
),
f" in {filename}",
]
details = [f" in {filename}"]
errors.append(error_cls(summary=summary, details=details))
continue

View File

@@ -5,6 +5,7 @@
import codecs
import collections
import errno
import hashlib
import io
import itertools
@@ -22,7 +23,8 @@
import urllib.parse
import urllib.request
import warnings
from contextlib import closing
from contextlib import closing, contextmanager
from gzip import GzipFile
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from urllib.error import HTTPError, URLError
@@ -48,7 +50,6 @@
import spack.stage
import spack.store
import spack.traverse as traverse
import spack.util.archive
import spack.util.crypto
import spack.util.file_cache as file_cache
import spack.util.gpg
@@ -1132,46 +1133,205 @@ def generate_key_index(key_prefix, tmpdir=None):
shutil.rmtree(tmpdir)
@contextmanager
def gzip_compressed_tarfile(path):
"""Create a reproducible, compressed tarfile"""
# Create gzip compressed tarball of the install prefix
# 1) Use explicit empty filename and mtime 0 for gzip header reproducibility.
# If the filename="" is dropped, Python will use fileobj.name instead.
# This should effectively mimick `gzip --no-name`.
# 2) On AMD Ryzen 3700X and an SSD disk, we have the following on compression speed:
# compresslevel=6 gzip default: llvm takes 4mins, roughly 2.1GB
# compresslevel=9 python default: llvm takes 12mins, roughly 2.1GB
# So we follow gzip.
with open(path, "wb") as f, ChecksumWriter(f) as inner_checksum, closing(
GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=inner_checksum)
) as gzip_file, ChecksumWriter(gzip_file) as outer_checksum, tarfile.TarFile(
name="", mode="w", fileobj=outer_checksum
) as tar:
yield tar, inner_checksum, outer_checksum
def _tarinfo_name(absolute_path: str, *, _path=pathlib.PurePath) -> str:
"""Compute tarfile entry name as the relative path from the (system) root."""
return _path(*_path(absolute_path).parts[1:]).as_posix()
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
"""Create a tarfile of an install prefix of a spec. Skips existing buildinfo file.
Only adds regular files, symlinks and dirs. Skips devices, fifos. Preserves hardlinks.
Normalizes permissions like git. Tar entries are added in depth-first pre-order, with
dir entries partitioned by file | dir, and sorted alphabetically, for reproducibility.
Partitioning ensures only one dir is in memory at a time, and sorting improves compression.
Args:
tar: tarfile object to add files to
prefix: absolute install prefix of spec"""
if not os.path.isabs(prefix) or not os.path.isdir(prefix):
raise ValueError(f"prefix '{prefix}' must be an absolute path to a directory")
hardlink_to_tarinfo_name: Dict[Tuple[int, int], str] = dict()
stat_key = lambda stat: (stat.st_dev, stat.st_ino)
try: # skip buildinfo file if it exists
files_to_skip = [stat_key(os.lstat(buildinfo_file_name(prefix)))]
skip = lambda entry: stat_key(entry.stat(follow_symlinks=False)) in files_to_skip
except OSError:
skip = lambda entry: False
files_to_skip = []
spack.util.archive.reproducible_tarfile_from_prefix(
tar,
prefix,
# Spack <= 0.21 did not include parent directories, leading to issues when tarballs are
# used in runtimes like AWS lambda.
include_parent_directories=True,
skip=skip,
)
# First add all directories leading up to `prefix` (Spack <= 0.21 did not do this, leading to
# issues when tarballs are used in runtimes like AWS lambda). Skip the file system root.
parent_dirs = reversed(pathlib.Path(prefix).parents)
next(parent_dirs) # skip the root: slices are supported from python 3.10
for parent_dir in parent_dirs:
dir_info = tarfile.TarInfo(_tarinfo_name(str(parent_dir)))
dir_info.type = tarfile.DIRTYPE
dir_info.mode = 0o755
tar.addfile(dir_info)
dir_stack = [prefix]
while dir_stack:
dir = dir_stack.pop()
# Add the dir before its contents
dir_info = tarfile.TarInfo(_tarinfo_name(dir))
dir_info.type = tarfile.DIRTYPE
dir_info.mode = 0o755
tar.addfile(dir_info)
# Sort by name: reproducible & improves compression
with os.scandir(dir) as it:
entries = sorted(it, key=lambda entry: entry.name)
new_dirs = []
for entry in entries:
if entry.is_dir(follow_symlinks=False):
new_dirs.append(entry.path)
continue
file_info = tarfile.TarInfo(_tarinfo_name(entry.path))
s = entry.stat(follow_symlinks=False)
# Skip existing binary distribution files.
id = stat_key(s)
if id in files_to_skip:
continue
# Normalize the mode
file_info.mode = 0o644 if s.st_mode & 0o100 == 0 else 0o755
if entry.is_symlink():
file_info.type = tarfile.SYMTYPE
file_info.linkname = os.readlink(entry.path)
tar.addfile(file_info)
elif entry.is_file(follow_symlinks=False):
# Deduplicate hardlinks
if s.st_nlink > 1:
if id in hardlink_to_tarinfo_name:
file_info.type = tarfile.LNKTYPE
file_info.linkname = hardlink_to_tarinfo_name[id]
tar.addfile(file_info)
continue
hardlink_to_tarinfo_name[id] = file_info.name
# If file not yet seen, copy it.
file_info.type = tarfile.REGTYPE
file_info.size = s.st_size
with open(entry.path, "rb") as f:
tar.addfile(file_info, f)
dir_stack.extend(reversed(new_dirs)) # we pop, so reverse to stay alphabetical
class ChecksumWriter(io.BufferedIOBase):
"""Checksum writer computes a checksum while writing to a file."""
myfileobj = None
def __init__(self, fileobj, algorithm=hashlib.sha256):
self.fileobj = fileobj
self.hasher = algorithm()
self.length = 0
def hexdigest(self):
return self.hasher.hexdigest()
def write(self, data):
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(data)
self.hasher.update(data)
self.length += length
return length
def read(self, size=-1):
raise OSError(errno.EBADF, "read() on write-only object")
def read1(self, size=-1):
raise OSError(errno.EBADF, "read1() on write-only object")
def peek(self, n):
raise OSError(errno.EBADF, "peek() on write-only object")
@property
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj.close()
self.fileobj = None
def flush(self):
self.fileobj.flush()
def fileno(self):
return self.fileobj.fileno()
def rewind(self):
raise OSError("Can't rewind while computing checksum")
def readable(self):
return False
def writable(self):
return True
def seekable(self):
return True
def tell(self):
return self.fileobj.tell()
def seek(self, offset, whence=io.SEEK_SET):
# In principle forward seek is possible with b"0" padding,
# but this is not implemented.
if offset == 0 and whence == io.SEEK_CUR:
return
raise OSError("Can't seek while computing checksum")
def readline(self, size=-1):
raise OSError(errno.EBADF, "readline() on write-only object")
def _do_create_tarball(tarfile_path: str, binaries_dir: str, buildinfo: dict):
with spack.util.archive.gzip_compressed_tarfile(tarfile_path) as (
tar,
inner_checksum,
outer_checksum,
):
with gzip_compressed_tarfile(tarfile_path) as (tar, inner_checksum, outer_checksum):
# Tarball the install prefix
tarfile_of_spec_prefix(tar, binaries_dir)
# Serialize buildinfo for the tarball
bstring = syaml.dump(buildinfo, default_flow_style=True).encode("utf-8")
tarinfo = tarfile.TarInfo(
name=spack.util.archive.default_path_to_name(buildinfo_file_name(binaries_dir))
)
tarinfo = tarfile.TarInfo(name=_tarinfo_name(buildinfo_file_name(binaries_dir)))
tarinfo.type = tarfile.REGTYPE
tarinfo.size = len(bstring)
tarinfo.mode = 0o644
@@ -1541,7 +1701,7 @@ def fetch_url_to_mirror(url):
response = spack.oci.opener.urlopen(
urllib.request.Request(
url=ref.manifest_url(),
headers={"Accept": ", ".join(spack.oci.oci.manifest_content_type)},
headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
)
)
except Exception:

View File

@@ -213,6 +213,9 @@ def _root_spec(spec_str: str) -> str:
platform = str(spack.platforms.host())
if platform == "darwin":
spec_str += " %apple-clang"
elif platform == "windows":
# TODO (johnwparent): Remove version constraint when clingo patch is up
spec_str += " %msvc@:19.37"
elif platform == "linux":
spec_str += " %gcc"
elif platform == "freebsd":

View File

@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
mixed_toolchain=sys.platform == "darwin"
)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers)
spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
@contextlib.contextmanager

View File

@@ -542,7 +542,7 @@ def verify_patchelf(patchelf: "spack.util.executable.Executable") -> bool:
return version >= spack.version.Version("0.13.1")
def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
def ensure_patchelf_in_path_or_raise() -> None:
"""Ensure patchelf is in the PATH or raise."""
# The old concretizer is not smart and we're doing its job: if the latest patchelf
# does not concretize because the compiler doesn't support C++17, we try to

View File

@@ -146,7 +146,7 @@ def mypy_root_spec() -> str:
def black_root_spec() -> str:
"""Return the root spec used to bootstrap black"""
return _root_spec("py-black@:24.1.0")
return _root_spec("py-black@:23.1.0")
def flake8_root_spec() -> str:

View File

@@ -217,9 +217,6 @@ def clean_environment():
env.unset("R_HOME")
env.unset("R_ENVIRON")
env.unset("LUA_PATH")
env.unset("LUA_CPATH")
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
# env.unset('MAKEFLAGS')
@@ -555,55 +552,58 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
"""
module = ModuleChangePropagator(pkg)
m = module
if context == Context.BUILD:
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
jobs = determine_number_of_jobs(parallel=pkg.parallel)
m.make_jobs = jobs
jobs = determine_number_of_jobs(parallel=pkg.parallel)
module.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable("make", jobs)
m.gmake = MakeExecutable("gmake", jobs)
m.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
# TODO: johnwparent: add package or builder support to define these build tools
# for now there is no entrypoint for builders to define these on their
# own
if sys.platform == "win32":
m.nmake = Executable("nmake")
m.msbuild = Executable("msbuild")
# analog to configure for win32
m.cscript = Executable("cscript")
# TODO: make these build deps that can be installed if not found.
module.make = MakeExecutable("make", jobs)
module.gmake = MakeExecutable("gmake", jobs)
module.ninja = MakeExecutable("ninja", jobs, supports_jobserver=False)
# TODO: johnwparent: add package or builder support to define these build tools
# for now there is no entrypoint for builders to define these on their
# own
if sys.platform == "win32":
module.nmake = Executable("nmake")
module.msbuild = Executable("msbuild")
# analog to configure for win32
module.cscript = Executable("cscript")
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable("./configure")
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
module.configure = Executable("./configure")
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
m.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
# Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build)
link_dir = spack.paths.build_env_path
module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
# Useful directories within the prefix are encapsulated in
# a Prefix object.
module.prefix = pkg.prefix
m.prefix = pkg.prefix
# Platform-specific library suffix.
module.dso_suffix = dso_suffix
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get("compiler", module.spack_cc)
compiler_path = kwargs.get("compiler", m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(
pkg.spec.architecture, compiler, static_lib, shared_lib, **kwargs
)
module.static_to_shared_library = static_to_shared_library
m.static_to_shared_library = static_to_shared_library
module.propagate_changes_to_mro()
@@ -972,8 +972,8 @@ def __init__(self, *specs: spack.spec.Spec, context: Context) -> None:
self.should_set_package_py_globals = (
self.should_setup_dependent_build_env | self.should_setup_run_env | UseMode.ROOT
)
# In a build context, the root needs build-specific globals set.
self.needs_build_context = UseMode.ROOT
# In a build context, the root and direct build deps need build-specific globals set.
self.needs_build_context = UseMode.ROOT | UseMode.BUILDTIME_DIRECT
def set_all_package_py_globals(self):
"""Set the globals in modules of package.py files."""

View File

@@ -541,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
if os.path.exists(self.configure_abs_path):
return
# Else try to regenerate it, which requires a few build dependencies
# Else try to regenerate it, which reuquires a few build dependencies
ensure_build_dependencies_or_raise(
spec=spec,
dependencies=["autoconf", "automake", "libtool"],

View File

@@ -199,8 +199,6 @@ def initconfig_mpi_entries(self):
mpiexec = "/usr/bin/srun"
else:
mpiexec = os.path.join(spec["slurm"].prefix.bin, "srun")
elif hasattr(spec["mpi"].package, "mpiexec"):
mpiexec = spec["mpi"].package.mpiexec
else:
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
if not os.path.exists(mpiexec):

View File

@@ -15,7 +15,6 @@
import spack.build_environment
import spack.builder
import spack.deptypes as dt
import spack.package_base
from spack.directives import build_system, conflicts, depends_on, variant
from spack.multimethod import when
@@ -32,86 +31,8 @@ def _extract_primary_generator(generator):
primary generator from the generator string which may contain an
optional secondary generator.
"""
return _primary_generator_extractor.match(generator).group(1)
def _maybe_set_python_hints(pkg: spack.package_base.PackageBase, args: List[str]) -> None:
"""Set the PYTHON_EXECUTABLE, Python_EXECUTABLE, and Python3_EXECUTABLE CMake variables
if the package has Python as build or link dep and ``find_python_hints`` is set to True. See
``find_python_hints`` for context."""
if not getattr(pkg, "find_python_hints", False):
return
pythons = pkg.spec.dependencies("python", dt.BUILD | dt.LINK)
if len(pythons) != 1:
return
try:
python_executable = pythons[0].package.command.path
except RuntimeError:
return
args.extend(
[
CMakeBuilder.define("PYTHON_EXECUTABLE", python_executable),
CMakeBuilder.define("Python_EXECUTABLE", python_executable),
CMakeBuilder.define("Python3_EXECUTABLE", python_executable),
]
)
def _supports_compilation_databases(pkg: spack.package_base.PackageBase) -> bool:
"""Check if this package (and CMake) can support compilation databases."""
# CMAKE_EXPORT_COMPILE_COMMANDS only exists for CMake >= 3.5
if not pkg.spec.satisfies("^cmake@3.5:"):
return False
# CMAKE_EXPORT_COMPILE_COMMANDS is only implemented for Makefile and Ninja generators
if not (pkg.spec.satisfies("generator=make") or pkg.spec.satisfies("generator=ninja")):
return False
return True
def _conditional_cmake_defaults(pkg: spack.package_base.PackageBase, args: List[str]) -> None:
"""Set a few default defines for CMake, depending on its version."""
cmakes = pkg.spec.dependencies("cmake", dt.BUILD)
if len(cmakes) != 1:
return
cmake = cmakes[0]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
try:
ipo = pkg.spec.variants["ipo"].value
except KeyError:
ipo = False
if cmake.satisfies("@3.9:"):
args.append(CMakeBuilder.define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
# Disable Package Registry: export(PACKAGE) may put files in the user's home directory, and
# find_package may search there. This is not what we want.
# Do not populate CMake User Package Registry
if cmake.satisfies("@3.15:"):
# see https://cmake.org/cmake/help/latest/policy/CMP0090.html
args.append(CMakeBuilder.define("CMAKE_POLICY_DEFAULT_CMP0090", "NEW"))
elif cmake.satisfies("@3.1:"):
# see https://cmake.org/cmake/help/latest/variable/CMAKE_EXPORT_NO_PACKAGE_REGISTRY.html
args.append(CMakeBuilder.define("CMAKE_EXPORT_NO_PACKAGE_REGISTRY", True))
# Do not use CMake User/System Package Registry
# https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#disabling-the-package-registry
if cmake.satisfies("@3.16:"):
args.append(CMakeBuilder.define("CMAKE_FIND_USE_PACKAGE_REGISTRY", False))
elif cmake.satisfies("@3.1:3.15"):
args.append(CMakeBuilder.define("CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY", False))
args.append(CMakeBuilder.define("CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY", False))
# Export a compilation database if supported.
if _supports_compilation_databases(pkg):
args.append(CMakeBuilder.define("CMAKE_EXPORT_COMPILE_COMMANDS", True))
primary_generator = _primary_generator_extractor.match(generator).group(1)
return primary_generator
def generator(*names: str, default: Optional[str] = None):
@@ -165,13 +86,6 @@ class CMakePackage(spack.package_base.PackageBase):
#: Legacy buildsystem attribute used to deserialize and install old specs
legacy_buildsystem = "cmake"
#: When this package depends on Python and ``find_python_hints`` is set to True, pass the
#: defines {Python3,Python,PYTHON}_EXECUTABLE explicitly, so that CMake locates the right
#: Python in its builtin FindPython3, FindPython, and FindPythonInterp modules. Spack does
#: CMake's job because CMake's modules by default only search for Python versions known at the
#: time of release.
find_python_hints = True
build_system("cmake")
with when("build_system=cmake"):
@@ -302,10 +216,7 @@ class CMakeBuilder(BaseBuilder):
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
files = [os.path.join(self.build_directory, "CMakeCache.txt")]
if _supports_compilation_databases(self):
files.append(os.path.join(self.build_directory, "compile_commands.json"))
return files
return [os.path.join(self.build_directory, "CMakeCache.txt")]
@property
def root_cmakelists_dir(self):
@@ -330,9 +241,9 @@ def std_cmake_args(self):
"""Standard cmake arguments provided as a property for
convenience of package writers
"""
args = CMakeBuilder.std_args(self.pkg, generator=self.generator)
args += getattr(self.pkg, "cmake_flag_args", [])
return args
std_cmake_args = CMakeBuilder.std_args(self.pkg, generator=self.generator)
std_cmake_args += getattr(self.pkg, "cmake_flag_args", [])
return std_cmake_args
@staticmethod
def std_args(pkg, generator=None):
@@ -352,6 +263,11 @@ def std_args(pkg, generator=None):
except KeyError:
build_type = "RelWithDebInfo"
try:
ipo = pkg.spec.variants["ipo"].value
except KeyError:
ipo = False
define = CMakeBuilder.define
args = [
"-G",
@@ -360,6 +276,10 @@ def std_args(pkg, generator=None):
define("CMAKE_BUILD_TYPE", build_type),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
if pkg.spec.satisfies("^cmake@3.9:"):
args.append(define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo))
if primary_generator == "Unix Makefiles":
args.append(define("CMAKE_VERBOSE_MAKEFILE", True))
@@ -368,9 +288,6 @@ def std_args(pkg, generator=None):
[define("CMAKE_FIND_FRAMEWORK", "LAST"), define("CMAKE_FIND_APPBUNDLE", "LAST")]
)
_conditional_cmake_defaults(pkg, args)
_maybe_set_python_hints(pkg, args)
# Set up CMake rpath
args.extend(
[

View File

@@ -218,7 +218,7 @@ def pset_components(self):
"+inspector": " intel-inspector",
"+itac": " intel-itac intel-ta intel-tc" " intel-trace-analyzer intel-trace-collector",
# Trace Analyzer and Collector
"+vtune": " intel-vtune",
"+vtune": " intel-vtune"
# VTune, ..-profiler since 2020, ..-amplifier before
}.items():
if variant in self.spec:

View File

@@ -29,12 +29,15 @@ class LuaPackage(spack.package_base.PackageBase):
with when("build_system=lua"):
depends_on("lua-lang")
with when("^[virtuals=lua-lang] lua"):
extends("lua")
with when("^[virtuals=lua-lang] lua-luajit"):
extends("lua-luajit+lualinks")
with when("^[virtuals=lua-lang] lua-luajit-openresty"):
extends("lua-luajit-openresty+lualinks")
extends("lua", when="^lua")
with when("^lua-luajit"):
extends("lua-luajit")
depends_on("luajit")
depends_on("lua-luajit+lualinks")
with when("^lua-luajit-openresty"):
extends("lua-luajit-openresty")
depends_on("luajit")
depends_on("lua-luajit-openresty+lualinks")
@property
def lua(self):

View File

@@ -149,7 +149,7 @@ def std_args(pkg):
else:
default_library = "shared"
return [
args = [
"-Dprefix={0}".format(pkg.prefix),
# If we do not specify libdir explicitly, Meson chooses something
# like lib/x86_64-linux-gnu, which causes problems when trying to
@@ -163,6 +163,8 @@ def std_args(pkg):
"-Dwrap_mode=nodownload",
]
return args
@property
def build_dirname(self):
"""Returns the directory name to use when building the package."""

View File

@@ -69,7 +69,7 @@ class MSBuildBuilder(BaseBuilder):
@property
def build_directory(self):
"""Return the directory containing the MSBuild solution or vcxproj."""
return fs.windows_sfn(self.pkg.stage.source_path)
return self.pkg.stage.source_path
@property
def toolchain_version(self):

View File

@@ -77,11 +77,7 @@ def ignore_quotes(self):
@property
def build_directory(self):
"""Return the directory containing the makefile."""
return (
fs.windows_sfn(self.pkg.stage.source_path)
if not self.makefile_root
else fs.windows_sfn(self.makefile_root)
)
return self.pkg.stage.source_path if not self.makefile_root else self.makefile_root
@property
def std_nmake_args(self):

View File

@@ -9,13 +9,10 @@
import shutil
from os.path import basename, isdir
from llnl.util import tty
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
from llnl.util.filesystem import HeaderList, find_libraries, join_path, mkdirp
from llnl.util.link_tree import LinkTree
from spack.build_environment import dso_suffix
from spack.directives import conflicts, variant
from spack.package_base import InstallError
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
@@ -182,72 +179,16 @@ class IntelOneApiLibraryPackage(IntelOneApiPackage):
"""
def openmp_libs(self):
"""Supply LibraryList for linking OpenMP"""
# NB: Hunting down explicit library files may be the Spack way of
# doing things, but it is better to add the compiler defined option
# e.g. -fopenmp
# If other packages use openmp, then all the packages need to
# support the same ABI. Spack usually uses the same compiler
# for all the packages, but you can force it if necessary:
#
# e.g. spack install blaspp%oneapi@2024 ^intel-oneapi-mkl%oneapi@2024
#
if self.spec.satisfies("%intel") or self.spec.satisfies("%oneapi"):
libname = "libiomp5"
elif self.spec.satisfies("%gcc"):
libname = "libgomp"
elif self.spec.satisfies("%clang"):
libname = "libomp"
else:
raise InstallError(
"OneAPI package with OpenMP threading requires one of %clang, %gcc, %oneapi, "
"or %intel"
)
# query the compiler for the library path
with self.compiler.compiler_environment():
omp_lib_path = Executable(self.compiler.cc)(
"--print-file-name", f"{libname}.{dso_suffix}", output=str
).strip()
# Newer versions of clang do not give the full path to libomp. If that's
# the case, look in a path relative to the compiler where libomp is
# typically found. If it's not found there, error out.
if not os.path.exists(omp_lib_path) and self.spec.satisfies("%clang"):
compiler_root = os.path.dirname(os.path.dirname(os.path.realpath(self.compiler.cc)))
omp_lib_path_compiler = os.path.join(compiler_root, "lib", f"{libname}.{dso_suffix}")
if os.path.exists(omp_lib_path_compiler):
omp_lib_path = omp_lib_path_compiler
# if the compiler cannot find the file, it returns the input path
if not os.path.exists(omp_lib_path):
raise InstallError(f"OneAPI package cannot locate OpenMP library: {omp_lib_path}")
omp_libs = LibraryList(omp_lib_path)
tty.info(f"OneAPI package requires OpenMP library: {omp_libs}")
return omp_libs
# find_headers uses heuristics to determine the include directory
# that does not work for oneapi packages. Use explicit directories
# instead.
def header_directories(self, dirs):
h = HeaderList([])
h.directories = dirs
# trilinos passes the directories to cmake, and cmake requires
# that the directory exists
for dir in dirs:
if not isdir(dir):
raise RuntimeError(f"{dir} does not exist")
return h
@property
def headers(self):
# This should match the directories added to CPATH by
# env/vars.sh for the component
return self.header_directories([self.component_prefix.include])
return self.header_directories(
[self.component_prefix.include, self.component_prefix.include.join(self.component_dir)]
)
@property
def libs(self):

View File

@@ -4,15 +4,12 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
from typing import Iterable
from llnl.util.filesystem import filter_file, find
from llnl.util.lang import memoized
from llnl.util.filesystem import filter_file
import spack.builder
import spack.package_base
from spack.directives import build_system, extends
from spack.install_test import SkipTest, test_part
from spack.util.executable import Executable
from ._checks import BaseBuilder, execute_build_time_tests
@@ -31,58 +28,6 @@ class PerlPackage(spack.package_base.PackageBase):
extends("perl", when="build_system=perl")
@property
@memoized
def _platform_dir(self):
"""Name of platform-specific module subdirectory."""
perl = self.spec["perl"].command
options = "-E", "use Config; say $Config{archname}"
out = perl(*options, output=str.split, error=str.split)
return out.strip()
@property
def use_modules(self) -> Iterable[str]:
"""Names of the package's perl modules."""
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
# Drop the platform directory, if present
if self._platform_dir:
platform_dir = self._platform_dir + os.sep
module_files = [m.replace(platform_dir, "") for m in module_files]
# Drop the extension and library path
prefix = self.prefix.lib + os.sep
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
# Drop the perl subdirectory as well
return ["::".join(m.split(os.sep)[1:]) for m in modules]
@property
def skip_modules(self) -> Iterable[str]:
"""Names of modules that should be skipped when running tests.
These are a subset of use_modules.
Returns:
List of strings of module names.
"""
return []
def test_use(self):
"""Test 'use module'"""
if not self.use_modules:
raise SkipTest("Test requires use_modules package property.")
perl = self.spec["perl"].command
for module in self.use_modules:
if module in self.skip_modules:
continue
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
options = ["-we", f'use strict; use {module}; print("OK\n")']
out = perl(*options, output=str.split, error=str.split)
assert "OK" in out
@spack.builder.builder("perl")
class PerlBuilder(BaseBuilder):
@@ -107,7 +52,7 @@ class PerlBuilder(BaseBuilder):
phases = ("configure", "build", "install")
#: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "check", "test_use")
legacy_methods = ("configure_args", "check")
#: Names associated with package attributes in the old build-system format
legacy_attributes = ()

View File

@@ -2,15 +2,11 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import functools
import inspect
import operator
import os
import re
import shutil
import stat
from typing import Dict, Iterable, List, Mapping, Optional, Tuple
from typing import Iterable, List, Mapping, Optional
import archspec
@@ -27,7 +23,7 @@
import spack.package_base
import spack.spec
import spack.store
from spack.directives import build_system, depends_on, extends
from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import test_part
from spack.spec import Spec
@@ -56,6 +52,8 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart")
@property
def import_modules(self) -> Iterable[str]:
"""Names of modules that the Python package provides.
@@ -138,52 +136,31 @@ def view_file_conflicts(self, view, merge_map):
return conflicts
def add_files_to_view(self, view, merge_map, skip_if_exists=True):
# Patch up shebangs to the python linked in the view only if python is built by Spack.
if not self.extendee_spec or self.extendee_spec.external:
if not self.extendee_spec:
return super().add_files_to_view(view, merge_map, skip_if_exists)
# We only patch shebangs in the bin directory.
copied_files: Dict[Tuple[int, int], str] = {} # File identifier -> source
delayed_links: List[Tuple[str, str]] = [] # List of symlinks from merge map
bin_dir = self.spec.prefix.bin
python_prefix = self.extendee_spec.prefix
python_is_external = self.extendee_spec.external
global_view = fs.same_path(python_prefix, view.get_projection_for_spec(self.spec))
for src, dst in merge_map.items():
if skip_if_exists and os.path.lexists(dst):
if os.path.exists(dst):
continue
if not fs.path_contains_subdirectory(src, bin_dir):
elif global_view or not fs.path_contains_subdirectory(src, bin_dir):
view.link(src, dst)
continue
s = os.lstat(src)
# Symlink is delayed because we may need to re-target if its target is copied in view
if stat.S_ISLNK(s.st_mode):
delayed_links.append((src, dst))
continue
# If it's executable and has a shebang, copy and patch it.
if (s.st_mode & 0b111) and fs.has_shebang(src):
copied_files[(s.st_dev, s.st_ino)] = dst
elif not os.path.islink(src):
shutil.copy2(src, dst)
fs.filter_file(
python_prefix, os.path.abspath(view.get_projection_for_spec(self.spec)), dst
)
is_script = fs.is_nonsymlink_exe_with_shebang(src)
if is_script and not python_is_external:
fs.filter_file(
python_prefix,
os.path.abspath(view.get_projection_for_spec(self.spec)),
dst,
)
else:
view.link(src, dst)
# Finally re-target the symlinks that point to copied files.
for src, dst in delayed_links:
try:
s = os.stat(src)
target = copied_files[(s.st_dev, s.st_ino)]
except (OSError, KeyError):
target = None
if target:
os.symlink(os.path.relpath(target, os.path.dirname(dst)), dst)
else:
view.link(src, dst, spec=self.spec)
orig_link_target = os.path.realpath(src)
new_link_target = os.path.abspath(merge_map[orig_link_target])
view.link(new_link_target, dst)
def remove_files_from_view(self, view, merge_map):
ignore_namespace = False
@@ -369,19 +346,16 @@ def headers(self) -> HeaderList:
# Remove py- prefix in package name
name = self.spec.name[3:]
# Headers should only be in include or platlib, but no harm in checking purelib too
# Headers may be in either location
include = self.prefix.join(self.spec["python"].package.include).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
headers_list = map(fs.find_all_headers, [include, platlib, purelib])
headers = functools.reduce(operator.add, headers_list)
headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)
if headers:
return headers
msg = "Unable to locate {} headers in {}, {}, or {}"
raise NoHeadersError(msg.format(self.spec.name, include, platlib, purelib))
msg = "Unable to locate {} headers in {} or {}"
raise NoHeadersError(msg.format(self.spec.name, include, platlib))
@property
def libs(self) -> LibraryList:
@@ -390,19 +364,15 @@ def libs(self) -> LibraryList:
# Remove py- prefix in package name
name = self.spec.name[3:]
# Libraries should only be in platlib, but no harm in checking purelib too
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
root = self.prefix.join(self.spec["python"].package.platlib).join(name)
find_all_libraries = functools.partial(fs.find_all_libraries, recursive=True)
libs_list = map(find_all_libraries, [platlib, purelib])
libs = functools.reduce(operator.add, libs_list)
libs = fs.find_all_libraries(root, recursive=True)
if libs:
return libs
msg = "Unable to recursively locate {} libraries in {} or {}"
raise NoLibrariesError(msg.format(self.spec.name, platlib, purelib))
msg = "Unable to recursively locate {} libraries in {}"
raise NoLibrariesError(msg.format(self.spec.name, root))
@spack.builder.builder("python_pip")

View File

@@ -162,9 +162,23 @@ def hip_flags(amdgpu_target):
# Add compiler minimum versions based on the first release where the
# processor is included in llvm/lib/Support/TargetParser.cpp
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx900:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx906:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx908:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx90c")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack-")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack+")
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx940")
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx941")
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx942")
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1013")
depends_on("llvm-amdgpu@3.8.0:", when="amdgpu_target=gfx1030")
depends_on("llvm-amdgpu@3.9.0:", when="amdgpu_target=gfx1031")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1032")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1033")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx1034")
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1035")
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx1036")
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1100")
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1101")

View File

@@ -9,8 +9,6 @@
import inspect
from typing import List, Optional, Tuple
from llnl.util import lang
import spack.build_environment
#: Builder classes, as registered by the "builder" decorator
@@ -233,27 +231,24 @@ def __new__(mcs, name, bases, attr_dict):
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
staged_callbacks = temporary_stage.callbacks
# Here we have an adapter from an old-style package. This means there is no
# hierarchy of builders, and every callback that had to be combined between
# *Package and *Builder has been combined already by _PackageAdapterMeta
if name == "Adapter":
# We don't have callbacks in this class, move on
if not staged_callbacks:
continue
# If we are here we have callbacks. To get a complete list, we accumulate all the
# callbacks from base classes, we deduplicate them, then prepend what we have
# registered here.
# If we are here we have callbacks. To get a complete list, get first what
# was attached to parent classes, then prepend what we have registered here.
#
# The order should be:
# 1. Callbacks are registered in order within the same class
# 2. Callbacks defined in derived classes precede those defined in base
# classes
callbacks_from_base = []
for base in bases:
current_callbacks = getattr(base, temporary_stage.attribute_name, None)
if not current_callbacks:
continue
callbacks_from_base.extend(current_callbacks)
callbacks_from_base = list(lang.dedupe(callbacks_from_base))
callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
if callbacks_from_base:
break
else:
callbacks_from_base = []
# Set the callbacks in this class and flush the temporary stage
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
del temporary_stage.callbacks[:]

View File

@@ -35,9 +35,9 @@ def _misc_cache():
#: Spack's cache for small data
MISC_CACHE: Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton] = (
llnl.util.lang.Singleton(_misc_cache)
)
MISC_CACHE: Union[
spack.util.file_cache.FileCache, llnl.util.lang.Singleton
] = llnl.util.lang.Singleton(_misc_cache)
def fetch_cache_location():
@@ -91,6 +91,6 @@ def symlink(self, mirror_ref):
#: Spack's local cache for downloaded source archives
FETCH_CACHE: Union[spack.fetch_strategy.FsCache, llnl.util.lang.Singleton] = (
llnl.util.lang.Singleton(_fetch_cache)
)
FETCH_CACHE: Union[
spack.fetch_strategy.FsCache, llnl.util.lang.Singleton
] = llnl.util.lang.Singleton(_fetch_cache)

View File

@@ -7,7 +7,9 @@
get_job_name = lambda needs_entry: (
needs_entry.get("job")
if (isinstance(needs_entry, collections.abc.Mapping) and needs_entry.get("artifacts", True))
else needs_entry if isinstance(needs_entry, str) else None
else needs_entry
if isinstance(needs_entry, str)
else None
)

View File

@@ -7,14 +7,13 @@
import glob
import hashlib
import json
import multiprocessing
import multiprocessing.pool
import os
import shutil
import sys
import tempfile
import urllib.request
from typing import Dict, List, Optional, Tuple, Union
from typing import Dict, List, Optional, Tuple
import llnl.util.tty as tty
from llnl.string import plural
@@ -327,30 +326,8 @@ def _progress(i: int, total: int):
return ""
class NoPool:
def map(self, func, args):
return [func(a) for a in args]
def starmap(self, func, args):
return [func(*a) for a in args]
def __enter__(self):
return self
def __exit__(self, *args):
pass
MaybePool = Union[multiprocessing.pool.Pool, NoPool]
def _make_pool() -> MaybePool:
"""Can't use threading because it's unsafe, and can't use spawned processes because of globals.
That leaves only forking"""
if multiprocessing.get_start_method() == "fork":
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
else:
return NoPool()
def _make_pool():
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
def push_fn(args):
@@ -594,15 +571,6 @@ def _put_manifest(
base_manifest, base_config = base_images[architecture]
env = _retrieve_env_dict_from_config(base_config)
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
# This is because Singularity / Apptainer is very strict about not mixing them.
base_manifest_mediaType = base_manifest.get(
"mediaType", "application/vnd.oci.image.manifest.v1+json"
)
use_docker_format = (
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
)
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
# Create an oci.image.config file
@@ -634,8 +602,8 @@ def _put_manifest(
# Upload the config file
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
manifest = {
"mediaType": base_manifest_mediaType,
oci_manifest = {
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"schemaVersion": 2,
"config": {
"mediaType": base_manifest["config"]["mediaType"],
@@ -646,11 +614,7 @@ def _put_manifest(
*(layer for layer in base_manifest["layers"]),
*(
{
"mediaType": (
"application/vnd.docker.image.rootfs.diff.tar.gzip"
if use_docker_format
else "application/vnd.oci.image.layer.v1.tar+gzip"
),
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"digest": str(checksums[s.dag_hash()].compressed_digest),
"size": checksums[s.dag_hash()].size,
}
@@ -659,11 +623,11 @@ def _put_manifest(
],
}
if not use_docker_format and annotations:
manifest["annotations"] = annotations
if annotations:
oci_manifest["annotations"] = annotations
# Finally upload the manifest
upload_manifest_with_retry(image_ref, manifest=manifest)
upload_manifest_with_retry(image_ref, oci_manifest=oci_manifest)
# delete the config file
os.unlink(config_file)
@@ -699,7 +663,7 @@ def _push_oci(
base_image: Optional[ImageReference],
installed_specs_with_deps: List[Spec],
tmpdir: str,
pool: MaybePool,
pool: multiprocessing.pool.Pool,
force: bool = False,
) -> Tuple[List[str], Dict[str, Tuple[dict, dict]], Dict[str, spack.oci.oci.Blob]]:
"""Push specs to an OCI registry
@@ -815,10 +779,11 @@ def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
return config if "spec" in config else None
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
request = urllib.request.Request(url=image_ref.tags_url())
response = spack.oci.opener.urlopen(request)
spack.oci.opener.ensure_status(request, response, 200)
def _update_index_oci(
image_ref: ImageReference, tmpdir: str, pool: multiprocessing.pool.Pool
) -> None:
response = spack.oci.opener.urlopen(urllib.request.Request(url=image_ref.tags_url()))
spack.oci.opener.ensure_status(response, 200)
tags = json.load(response)["tags"]
# Fetch all image config files in parallel

View File

@@ -5,7 +5,6 @@
import re
import sys
from typing import Dict, Optional
import llnl.string
import llnl.util.lang
@@ -18,15 +17,10 @@
import spack.util.crypto
import spack.util.web as web_util
from spack.cmd.common import arguments
from spack.package_base import (
ManualDownloadRequiredError,
PackageBase,
deprecated_version,
preferred_version,
)
from spack.package_base import PackageBase, deprecated_version, preferred_version
from spack.util.editor import editor
from spack.util.format import get_version_lines
from spack.version import StandardVersion, Version
from spack.version import Version
description = "checksum available versions of a package"
section = "packaging"
@@ -90,30 +84,28 @@ def checksum(parser, args):
spec = spack.spec.Spec(args.package)
# Get the package we're going to generate checksums for
pkg: PackageBase = spack.repo.PATH.get_pkg_class(spec.name)(spec)
pkg = spack.repo.PATH.get_pkg_class(spec.name)(spec)
# Skip manually downloaded packages
if pkg.manual_download:
raise ManualDownloadRequiredError(pkg.download_instr)
versions = [Version(v) for v in args.versions]
versions = [StandardVersion.from_string(v) for v in args.versions]
# Define placeholder for remote versions. This'll help reduce redundant work if we need to
# check for the existence of remote versions more than once.
remote_versions: Optional[Dict[StandardVersion, str]] = None
# Define placeholder for remote versions.
# This'll help reduce redundant work if we need to check for the existance
# of remote versions more than once.
remote_versions = None
# Add latest version if requested
if args.latest:
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
remote_versions = pkg.fetch_remote_versions(args.jobs)
if len(remote_versions) > 0:
versions.append(max(remote_versions.keys()))
latest_version = sorted(remote_versions.keys(), reverse=True)[0]
versions.append(latest_version)
# Add preferred version if requested (todo: exclude git versions)
# Add preferred version if requested
if args.preferred:
versions.append(preferred_version(pkg))
# Store a dict of the form version -> URL
url_dict: Dict[StandardVersion, str] = {}
url_dict = {}
for version in versions:
if deprecated_version(pkg, version):
@@ -123,16 +115,16 @@ def checksum(parser, args):
if url is not None:
url_dict[version] = url
continue
# If we get here, it's because no valid url was provided by the package. Do expensive
# fallback to try to recover
# if we get here, it's because no valid url was provided by the package
# do expensive fallback to try to recover
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
remote_versions = pkg.fetch_remote_versions(args.jobs)
if version in remote_versions:
url_dict[version] = remote_versions[version]
if len(versions) <= 0:
if remote_versions is None:
remote_versions = pkg.fetch_remote_versions(concurrency=args.jobs)
remote_versions = pkg.fetch_remote_versions(args.jobs)
url_dict = remote_versions
# A spidered URL can differ from the package.py *computed* URL, pointing to different tarballs.

View File

@@ -6,7 +6,6 @@
import json
import os
import shutil
from urllib.parse import urlparse, urlunparse
import llnl.util.filesystem as fs
import llnl.util.tty as tty
@@ -158,9 +157,7 @@ def setup_parser(subparser):
description=deindent(ci_reproduce.__doc__),
help=spack.cmd.first_line(ci_reproduce.__doc__),
)
reproduce.add_argument(
"job_url", help="URL of GitLab job web page or artifact", type=_gitlab_artifacts_url
)
reproduce.add_argument("job_url", help="URL of job artifacts bundle")
reproduce.add_argument(
"--runtime",
help="Container runtime to use.",
@@ -795,6 +792,11 @@ def ci_reproduce(args):
artifacts of the provided gitlab pipeline rebuild job's URL will be used to derive
instructions for reproducing the build locally
"""
job_url = args.job_url
work_dir = args.working_dir
autostart = args.autostart
runtime = args.runtime
# Allow passing GPG key for reprocuding protected CI jobs
if args.gpg_file:
gpg_key_url = url_util.path_to_file_url(args.gpg_file)
@@ -803,47 +805,7 @@ def ci_reproduce(args):
else:
gpg_key_url = None
return spack_ci.reproduce_ci_job(
args.job_url, args.working_dir, args.autostart, gpg_key_url, args.runtime
)
def _gitlab_artifacts_url(url: str) -> str:
"""Take a URL either to the URL of the job in the GitLab UI, or to the artifacts zip file,
and output the URL to the artifacts zip file."""
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError(url)
parts = parsed.path.split("/")
if len(parts) < 2:
raise ValueError(url)
# Just use API endpoints verbatim, they're probably generated by Spack.
if parts[1] == "api":
return url
# If it's a URL to the job in the Gitlab UI, we may need to append the artifacts path.
minus_idx = parts.index("-")
# Remove repeated slashes in the remainder
rest = [p for p in parts[minus_idx + 1 :] if p]
# Now the format is jobs/X or jobs/X/artifacts/download
if len(rest) < 2 or rest[0] != "jobs":
raise ValueError(url)
if len(rest) == 2:
# replace jobs/X with jobs/X/artifacts/download
rest.extend(("artifacts", "download"))
# Replace the parts and unparse.
parts[minus_idx + 1 :] = rest
# Don't allow fragments / queries
return urlunparse(parsed._replace(path="/".join(parts), fragment="", query=""))
return spack_ci.reproduce_ci_job(job_url, work_dir, autostart, gpg_key_url, runtime)
def ci(parser, args):

View File

@@ -404,17 +404,6 @@ def no_install_status():
)
@arg
def debug_nondefaults():
return Args(
"-d",
"--debug-nondefaults",
action="store_true",
default=False,
help="show non-default decisions in red",
)
@arg
def no_checksum():
return Args(
@@ -581,14 +570,6 @@ def add_concretizer_args(subparser):
default=None,
help="reuse installed dependencies only",
)
subgroup.add_argument(
"--deprecated",
action=ConfigSetAction,
dest="config:deprecated",
const=True,
default=None,
help="allow concretizer to select deprecated versions",
)
def add_connection_args(subparser, add_help):

View File

@@ -89,7 +89,7 @@ def compiler_find(args):
paths, scope=None, mixed_toolchain=args.mixed_toolchain
)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope)
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
n = len(new_compilers)
s = "s" if n > 1 else ""

View File

@@ -76,10 +76,6 @@ def setup_parser(subparser):
)
add_parser.add_argument("-f", "--file", help="file from which to set all config values")
change_parser = sp.add_parser("change", help="swap variants etc. on specs in config")
change_parser.add_argument("path", help="colon-separated path to config section with specs")
change_parser.add_argument("--match-spec", help="only change constraints that match this")
prefer_upstream_parser = sp.add_parser(
"prefer-upstream", help="set package preferences from upstream"
)
@@ -122,7 +118,7 @@ def _get_scope_and_section(args):
if not section and not scope:
env = ev.active_environment()
if env:
scope = env.scope_name
scope = env.env_file_config_scope_name()
# set scope defaults
elif not scope:
@@ -267,98 +263,6 @@ def _can_update_config_file(scope: spack.config.ConfigScope, cfg_file):
return fs.can_write_to_dir(scope.path) and fs.can_access(cfg_file)
def _config_change_requires_scope(path, spec, scope, match_spec=None):
"""Return whether or not anything changed."""
require = spack.config.get(path, scope=scope)
if not require:
return False
changed = False
def override_cfg_spec(spec_str):
nonlocal changed
init_spec = spack.spec.Spec(spec_str)
# Overridden spec cannot be anonymous
init_spec.name = spec.name
if match_spec and not init_spec.satisfies(match_spec):
# If there is a match_spec, don't change constraints that
# don't match it
return spec_str
elif not init_spec.intersects(spec):
changed = True
return str(spack.spec.Spec.override(init_spec, spec))
else:
# Don't override things if they intersect, otherwise we'd
# be e.g. attaching +debug to every single version spec
return spec_str
if isinstance(require, str):
new_require = override_cfg_spec(require)
else:
new_require = []
for item in require:
if "one_of" in item:
item["one_of"] = [override_cfg_spec(x) for x in item["one_of"]]
elif "any_of" in item:
item["any_of"] = [override_cfg_spec(x) for x in item["any_of"]]
elif "spec" in item:
item["spec"] = override_cfg_spec(item["spec"])
elif isinstance(item, str):
item = override_cfg_spec(item)
else:
raise ValueError(f"Unexpected requirement: ({type(item)}) {str(item)}")
new_require.append(item)
spack.config.set(path, new_require, scope=scope)
return changed
def _config_change(config_path, match_spec_str=None):
all_components = spack.config.process_config_path(config_path)
key_components = all_components[:-1]
key_path = ":".join(key_components)
spec = spack.spec.Spec(syaml.syaml_str(all_components[-1]))
match_spec = None
if match_spec_str:
match_spec = spack.spec.Spec(match_spec_str)
if key_components[-1] == "require":
# Extract the package name from the config path, which allows
# args.spec to be anonymous if desired
pkg_name = key_components[1]
spec.name = pkg_name
changed = False
for scope in spack.config.writable_scope_names():
changed |= _config_change_requires_scope(key_path, spec, scope, match_spec=match_spec)
if not changed:
existing_requirements = spack.config.get(key_path)
if isinstance(existing_requirements, str):
raise spack.config.ConfigError(
"'config change' needs to append a requirement,"
" but existing require: config is not a list"
)
ideal_scope_to_modify = None
for scope in spack.config.writable_scope_names():
if spack.config.get(key_path, scope=scope):
ideal_scope_to_modify = scope
break
update_path = f"{key_path}:[{str(spec)}]"
spack.config.add(update_path, scope=ideal_scope_to_modify)
else:
raise ValueError("'config change' can currently only change 'require' sections")
def config_change(args):
_config_change(args.path, args.match_spec)
def config_update(args):
# Read the configuration files
spack.config.CONFIG.get_config(args.section, scope=args.scope)
@@ -586,6 +490,5 @@ def config(parser, args):
"update": config_update,
"revert": config_revert,
"prefer-upstream": config_prefer_upstream,
"change": config_change,
}
action[args.config_command](args)

View File

@@ -64,9 +64,8 @@ class {class_name}({base_class_name}):
# maintainers("github_user1", "github_user2")
# FIXME: Add the SPDX identifier of the project's license below.
# See https://spdx.org/licenses/ for a list. Upon manually verifying
# the license, set checked_by to your Github username.
license("UNKNOWN", checked_by="github_user1")
# See https://spdx.org/licenses/ for a list.
license("UNKNOWN")
{versions}

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"])
arguments.add_common_arguments(subparser, ["jobs"])
subparser.add_argument(
"-d",
"--source-path",
@@ -34,6 +34,7 @@ def setup_parser(subparser):
dest="ignore_deps",
help="do not try to install dependencies of requested packages",
)
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"--keep-prefix",
action="store_true",
@@ -62,6 +63,7 @@ def setup_parser(subparser):
choices=["root", "all"],
help="run tests on only root packages or all packages",
)
arguments.add_common_arguments(subparser, ["spec"])
stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument(
@@ -123,6 +125,9 @@ def dev_build(self, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
tests = False
if args.test == "all":
tests = True

View File

@@ -8,7 +8,6 @@
import llnl.util.tty as tty
import spack.cmd
import spack.config
import spack.spec
import spack.util.path
import spack.version
@@ -22,7 +21,6 @@
def setup_parser(subparser):
subparser.add_argument("-p", "--path", help="source location of package")
subparser.add_argument("-b", "--build-directory", help="build directory for the package")
clone_group = subparser.add_mutually_exclusive_group()
clone_group.add_argument(
@@ -153,11 +151,4 @@ def develop(parser, args):
env = spack.cmd.require_active_env(cmd_name="develop")
tty.debug("Updating develop config for {0} transactionally".format(env.name))
with env.write_transaction():
if args.build_directory is not None:
spack.config.add(
"packages:{}:package_attributes:build_directory:{}".format(
spec.name, args.build_directory
),
env.scope_name,
)
_update_config(spec, path)

View File

@@ -54,104 +54,6 @@
]
#
# env create
#
def env_create_setup_parser(subparser):
"""create a new environment"""
subparser.add_argument(
"env_name",
metavar="env",
help=(
"name of managed environment or directory of the anonymous env "
"(when using --dir/-d) to activate"
),
)
subparser.add_argument(
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
)
subparser.add_argument(
"--keep-relative",
action="store_true",
help="copy relative develop paths verbatim into the new environment"
" when initializing from envfile",
)
view_opts = subparser.add_mutually_exclusive_group()
view_opts.add_argument(
"--without-view", action="store_true", help="do not maintain a view for this environment"
)
view_opts.add_argument(
"--with-view",
help="specify that this environment should maintain a view at the"
" specified path (by default the view is maintained in the"
" environment directory)",
)
subparser.add_argument(
"envfile",
nargs="?",
default=None,
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
def env_create(args):
if args.with_view:
# Expand relative paths provided on the command line to the current working directory
# This way we interpret `spack env create --with-view ./view --dir ./env` as
# a view in $PWD/view, not $PWD/env/view. This is different from specifying a relative
# path in the manifest, which is resolved relative to the manifest file's location.
with_view = os.path.abspath(args.with_view)
elif args.without_view:
with_view = False
else:
# Note that 'None' means unspecified, in which case the Environment
# object could choose to enable a view by default. False means that
# the environment should not include a view.
with_view = None
env = _env_create(
args.env_name,
init_file=args.envfile,
dir=args.dir,
with_view=with_view,
keep_relative=args.keep_relative,
)
# Generate views, only really useful for environments created from spack.lock files.
env.regenerate_views()
def _env_create(name_or_path, *, init_file=None, dir=False, with_view=None, keep_relative=False):
"""Create a new environment, with an optional yaml description.
Arguments:
name_or_path (str): name of the environment to create, or path to it
init_file (str or file): optional initialization file -- can be
a JSON lockfile (*.lock, *.json) or YAML manifest file
dir (bool): if True, create an environment in a directory instead
of a named environment
keep_relative (bool): if True, develop paths are copied verbatim into
the new environment file, otherwise they may be made absolute if the
new environment is in a different location
"""
if not dir:
env = ev.create(
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % (name_or_path))
return env
env = ev.create_in_dir(
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg("Created environment in %s" % env.path)
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % env.path)
return env
#
# env activate
#
@@ -216,46 +118,22 @@ def env_activate_setup_parser(subparser):
help="decorate the command line prompt when activating",
)
subparser.add_argument(
env_options = subparser.add_mutually_exclusive_group()
env_options.add_argument(
"--temp",
action="store_true",
default=False,
help="create and activate an environment in a temporary directory",
)
subparser.add_argument(
"--create",
action="store_true",
default=False,
help="create and activate the environment if it doesn't exist",
env_options.add_argument(
"-d", "--dir", default=None, help="activate the environment in this directory"
)
subparser.add_argument(
"--envfile",
nargs="?",
default=None,
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
subparser.add_argument(
"--keep-relative",
action="store_true",
help="copy relative develop paths verbatim into the new environment"
" when initializing from envfile",
)
subparser.add_argument(
"-d",
"--dir",
default=False,
action="store_true",
help="activate environment based on the directory supplied",
)
subparser.add_argument(
env_options.add_argument(
metavar="env",
dest="env_name",
dest="activate_env",
nargs="?",
default=None,
help=(
"name of managed environment or directory of the anonymous env"
" (when using --dir/-d) to activate"
),
help="name of environment to activate",
)
@@ -270,8 +148,7 @@ def create_temp_env_directory():
def _tty_info(msg):
"""tty.info like function that prints the equivalent printf statement for eval."""
decorated = f'{colorize("@*b{==>}")} {msg}\n'
executor = "echo" if sys.platform == "win32" else "printf"
print(f"{executor} {shlex.quote(decorated)};")
print(f"printf {shlex.quote(decorated)};")
def env_activate(args):
@@ -285,17 +162,11 @@ def env_activate(args):
if args.env or args.no_env or args.env_dir:
tty.die("Calling spack env activate with --env, --env-dir and --no-env is ambiguous")
# special parser error handling relative to the --temp flag
temp_conflicts = iter([args.keep_relative, args.dir, args.env_name, args.with_view])
if args.temp and any(temp_conflicts):
tty.die(
"spack env activate --temp cannot be combined with managed environments, --with-view,"
" --keep-relative, or --dir."
)
env_name_or_dir = args.activate_env or args.dir
# When executing `spack env activate` without further arguments, activate
# the default environment. It's created when it doesn't exist yet.
if not args.env_name and not args.temp:
if not env_name_or_dir and not args.temp:
short_name = "default"
if not ev.exists(short_name):
ev.create(short_name)
@@ -314,25 +185,17 @@ def env_activate(args):
_tty_info(f"Created and activated temporary environment in {env_path}")
# Managed environment
elif ev.exists(args.env_name) and not args.dir:
env_path = ev.root(args.env_name)
short_name = args.env_name
elif ev.exists(env_name_or_dir) and not args.dir:
env_path = ev.root(env_name_or_dir)
short_name = env_name_or_dir
# Environment directory
elif ev.is_env_dir(args.env_name):
env_path = os.path.abspath(args.env_name)
elif ev.is_env_dir(env_name_or_dir):
env_path = os.path.abspath(env_name_or_dir)
short_name = os.path.basename(env_path)
# create if user requested, and then recall recursively
elif args.create:
tty.set_msg_enabled(False)
env_create(args)
tty.set_msg_enabled(True)
env_activate(args)
return
else:
tty.die("No such environment: '%s'" % args.env_name)
tty.die("No such environment: '%s'" % env_name_or_dir)
env_prompt = "[%s]" % short_name
@@ -427,6 +290,97 @@ def env_deactivate(args):
sys.stdout.write(cmds)
#
# env create
#
def env_create_setup_parser(subparser):
"""create a new environment"""
subparser.add_argument("create_env", metavar="env", help="name of environment to create")
subparser.add_argument(
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
)
subparser.add_argument(
"--keep-relative",
action="store_true",
help="copy relative develop paths verbatim into the new environment"
" when initializing from envfile",
)
view_opts = subparser.add_mutually_exclusive_group()
view_opts.add_argument(
"--without-view", action="store_true", help="do not maintain a view for this environment"
)
view_opts.add_argument(
"--with-view",
help="specify that this environment should maintain a view at the"
" specified path (by default the view is maintained in the"
" environment directory)",
)
subparser.add_argument(
"envfile",
nargs="?",
default=None,
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
)
def env_create(args):
if args.with_view:
# Expand relative paths provided on the command line to the current working directory
# This way we interpret `spack env create --with-view ./view --dir ./env` as
# a view in $PWD/view, not $PWD/env/view. This is different from specifying a relative
# path in the manifest, which is resolved relative to the manifest file's location.
with_view = os.path.abspath(args.with_view)
elif args.without_view:
with_view = False
else:
# Note that 'None' means unspecified, in which case the Environment
# object could choose to enable a view by default. False means that
# the environment should not include a view.
with_view = None
env = _env_create(
args.create_env,
init_file=args.envfile,
dir=args.dir,
with_view=with_view,
keep_relative=args.keep_relative,
)
# Generate views, only really useful for environments created from spack.lock files.
env.regenerate_views()
def _env_create(name_or_path, *, init_file=None, dir=False, with_view=None, keep_relative=False):
"""Create a new environment, with an optional yaml description.
Arguments:
name_or_path (str): name of the environment to create, or path to it
init_file (str or file): optional initialization file -- can be
a JSON lockfile (*.lock, *.json) or YAML manifest file
dir (bool): if True, create an environment in a directory instead
of a named environment
keep_relative (bool): if True, develop paths are copied verbatim into
the new environment file, otherwise they may be made absolute if the
new environment is in a different location
"""
if not dir:
env = ev.create(
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % (name_or_path))
return env
env = ev.create_in_dir(
name_or_path, init_file=init_file, with_view=with_view, keep_relative=keep_relative
)
tty.msg("Created environment in %s" % env.path)
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % env.path)
return env
#
# env remove
#

View File

@@ -18,7 +18,6 @@
import spack.cray_manifest as cray_manifest
import spack.detection
import spack.error
import spack.repo
import spack.util.environment
from spack.cmd.common import arguments
@@ -153,9 +152,9 @@ def external_find(args):
def packages_to_search_for(
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
):
result = list(
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)}
)
result = []
for current_tag in tags:
result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
if names:
# Match both fully qualified and unqualified

View File

@@ -18,7 +18,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"-m",
"--missing",
@@ -28,7 +28,7 @@ def setup_parser(subparser):
subparser.add_argument(
"-D", "--dependencies", action="store_true", help="also fetch all dependencies"
)
arguments.add_concretizer_args(subparser)
arguments.add_common_arguments(subparser, ["specs"])
subparser.epilog = (
"With an active environment, the specs "
"parameter can be omitted. In this case all (uninstalled"
@@ -40,6 +40,9 @@ def fetch(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.specs:
specs = spack.cmd.parse_specs(args.specs, concretize=True)
else:

View File

@@ -18,14 +18,7 @@
def setup_parser(subparser):
setup_parser.parser = subparser
subparser.epilog = """
Outside of an environment, the command concretizes specs and graphs them, unless the
--installed option is given. In that case specs are matched from the current DB.
If an environment is active, specs are matched from the currently available concrete specs
in the lockfile.
"""
method = subparser.add_mutually_exclusive_group()
method.add_argument(
"-a", "--ascii", action="store_true", help="draw graph as ascii to stdout (default)"
@@ -48,40 +41,39 @@ def setup_parser(subparser):
)
subparser.add_argument(
"-i", "--installed", action="store_true", help="graph specs from the DB"
"-i",
"--installed",
action="store_true",
help="graph installed specs, or specs in the active env (implies --dot)",
)
arguments.add_common_arguments(subparser, ["deptype", "specs"])
def graph(parser, args):
env = ev.active_environment()
if args.installed and env:
tty.die("cannot use --installed with an active environment")
if args.installed and args.specs:
tty.die("cannot specify specs with --installed")
if args.color and not args.dot:
tty.die("the --color option can be used only with --dot")
if args.installed:
if not args.specs:
specs = spack.store.STORE.db.query()
args.dot = True
env = ev.active_environment()
if env:
specs = env.concrete_roots()
else:
result = []
for item in args.specs:
result.extend(spack.store.STORE.db.query(item))
specs = list(set(result))
elif env:
specs = env.concrete_roots()
if args.specs:
specs = env.all_matching_specs(*args.specs)
specs = spack.store.STORE.db.query()
else:
specs = spack.cmd.parse_specs(args.specs, concretize=not args.static)
if not specs:
tty.die("no spec matching the query")
setup_parser.parser.print_help()
return 1
if args.static:
args.dot = True
static_graph_dot(specs, depflag=args.deptype)
return

View File

@@ -30,7 +30,6 @@
@c{@min:max} version range (inclusive)
@c{@min:} version <min> or higher
@c{@:max} up to version <max> (inclusive)
@c{@=version} exact version
compilers:
@g{%compiler} build with <compiler>

View File

@@ -475,7 +475,10 @@ def print_virtuals(pkg, args):
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
for when, specs in reversed(sorted(pkg.provided.items())):
line = " %s provides %s" % (when.cformat(), ", ".join(s.cformat() for s in specs))
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:

View File

@@ -176,7 +176,7 @@ def setup_parser(subparser):
dest="install_source",
help="install source files in prefix",
)
arguments.add_common_arguments(subparser, ["no_checksum"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"-v",
"--verbose",
@@ -290,11 +290,11 @@ def require_user_confirmation_for_overwrite(concrete_specs, args):
def _dump_log_on_error(e: spack.build_environment.InstallError):
e.print_context()
assert e.pkg, "Expected InstallError to include the associated package"
if not os.path.exists(e.pkg.log_path):
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write("Full build log:\n")
with open(e.pkg.log_path, errors="replace") as log:
with open(e.pkg.build_log_path, errors="replace") as log:
shutil.copyfileobj(log, sys.stderr)
@@ -326,6 +326,9 @@ def install(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.log_file and not args.log_format:
msg = "the '--log-format' must be specified when using '--log-file'"
tty.die(msg)

View File

@@ -292,11 +292,9 @@ def head(n, span_id, title, anchor=None):
out.write("<dd>\n")
out.write(
", ".join(
(
d
if d not in pkg_names
else '<a class="reference internal" href="#%s">%s</a>' % (d, d)
)
d
if d not in pkg_names
else '<a class="reference internal" href="#%s">%s</a>' % (d, d)
for d in deps
)
)

View File

@@ -53,7 +53,6 @@ def setup_parser(subparser):
"-S", "--stages", action="store_true", help="top level stage directory"
)
directories.add_argument(
"-c",
"--source-dir",
action="store_true",
help="source directory for a spec (requires it to be staged first)",

View File

@@ -1,71 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import gzip
import io
import os
import shutil
import sys
import spack.cmd
import spack.spec
import spack.util.compression as compression
from spack.cmd.common import arguments
from spack.main import SpackCommandError
description = "print out logs for packages"
section = "basic"
level = "long"
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["spec"])
def _dump_byte_stream_to_stdout(instream: io.BufferedIOBase) -> None:
# Reopen stdout in binary mode so we don't have to worry about encoding
outstream = os.fdopen(sys.stdout.fileno(), "wb", closefd=False)
shutil.copyfileobj(instream, outstream)
def _logs(cmdline_spec: spack.spec.Spec, concrete_spec: spack.spec.Spec):
if concrete_spec.installed:
log_path = concrete_spec.package.install_log_path
elif os.path.exists(concrete_spec.package.stage.path):
# TODO: `spack logs` can currently not show the logs while a package is being built, as the
# combined log file is only written after the build is finished.
log_path = concrete_spec.package.log_path
else:
raise SpackCommandError(f"{cmdline_spec} is not installed or staged")
try:
stream = open(log_path, "rb")
except OSError as e:
if e.errno == errno.ENOENT:
raise SpackCommandError(f"No logs are available for {cmdline_spec}") from e
raise SpackCommandError(f"Error reading logs for {cmdline_spec}: {e}") from e
with stream as f:
ext = compression.extension_from_magic_numbers_by_stream(f, decompress=False)
if ext and ext != "gz":
raise SpackCommandError(f"Unsupported storage format for {log_path}: {ext}")
# If the log file is gzip compressed, wrap it with a decompressor
_dump_byte_stream_to_stdout(gzip.GzipFile(fileobj=f) if ext == "gz" else f)
def logs(parser, args):
specs = spack.cmd.parse_specs(args.spec)
if not specs:
raise SpackCommandError("You must supply a spec.")
if len(specs) != 1:
raise SpackCommandError("Too many specs. Supply only one.")
concrete_spec = spack.cmd.matching_spec_from_env(specs[0])
_logs(specs[0], concrete_spec)

View File

@@ -28,7 +28,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
@@ -72,7 +72,6 @@ def setup_parser(subparser):
" retrieve all versions of each package",
)
arguments.add_common_arguments(create_parser, ["specs"])
arguments.add_concretizer_args(create_parser)
# Destroy
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
@@ -550,4 +549,7 @@ def mirror(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
action[args.mirror_command](args)

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
arguments.add_concretizer_args(subparser)
@@ -33,6 +33,9 @@ def patch(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
specs = spack.cmd.parse_specs(args.specs, concretize=False)
for spec in specs:
_patch(spack.cmd.matching_spec_from_env(spec).package)

View File

@@ -45,9 +45,7 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["long", "very_long", "namespaces"])
install_status_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(
install_status_group, ["install_status", "no_install_status", "debug_nondefaults"]
)
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
subparser.add_argument(
"-y",
@@ -129,7 +127,10 @@ def _process_result(result, show, required_format, kwargs):
print()
if result.unsolved_specs and "solutions" in show:
tty.msg(asp.Result.format_unsolved(result.unsolved_specs))
tty.msg("Unsolved specs")
for spec in result.unsolved_specs:
print(spec)
print()
def solve(parser, args):
@@ -147,7 +148,6 @@ def solve(parser, args):
"show_types": args.types,
"status_fn": install_status_fn if args.install_status else None,
"hashes": args.long or args.very_long,
"nondefaults": args.debug_nondefaults,
}
# process output options

View File

@@ -32,9 +32,7 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["long", "very_long", "namespaces"])
install_status_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(
install_status_group, ["install_status", "no_install_status", "debug_nondefaults"]
)
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
@@ -86,8 +84,6 @@ def spec(parser, args):
tree_kwargs = {
"cover": args.cover,
"format": fmt,
"hashes": args.long or args.very_long,
"nondefaults": args.debug_nondefaults,
"hashlen": None if args.very_long else 7,
"show_types": args.types,
"status_fn": install_status_fn if args.install_status else None,
@@ -129,14 +125,12 @@ def spec(parser, args):
# repeated output. This happens because parse_specs outputs concrete
# specs for `/hash` inputs.
if not input.concrete:
# NOTE: can use overrides | tree_kwargs in python 3.9+
overrides = {"hashes": False}
overriden_kwargs = {**overrides, **tree_kwargs}
tree_kwargs["hashes"] = False # Always False for input spec
print("Input spec")
print("--------------------------------")
print(input.tree(**overriden_kwargs))
print(input.tree(**tree_kwargs))
print("Concretized")
print("--------------------------------")
tree_kwargs["hashes"] = args.long or args.very_long
print(output.tree(**tree_kwargs))

View File

@@ -22,7 +22,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
subparser.add_argument(
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
)
@@ -33,6 +33,9 @@ def stage(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if not args.specs:
env = ev.active_environment()
if not env:

View File

@@ -228,7 +228,7 @@ def create_reporter(args, specs_to_test, test_suite):
def test_list(args):
"""list installed packages with available tests"""
tagged = spack.repo.PATH.packages_with_tags(*args.tag) if args.tag else set()
tagged = set(spack.repo.PATH.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class):
tests = spack.install_test.test_functions(pkg_class)

View File

@@ -334,40 +334,6 @@ def __init__(
# used for version checks for API, e.g. C++11 flag
self._real_version = None
def __eq__(self, other):
return (
self.cc == other.cc
and self.cxx == other.cxx
and self.fc == other.fc
and self.f77 == other.f77
and self.spec == other.spec
and self.operating_system == other.operating_system
and self.target == other.target
and self.flags == other.flags
and self.modules == other.modules
and self.environment == other.environment
and self.extra_rpaths == other.extra_rpaths
and self.enable_implicit_rpaths == other.enable_implicit_rpaths
)
def __hash__(self):
return hash(
(
self.cc,
self.cxx,
self.fc,
self.f77,
self.spec,
self.operating_system,
self.target,
str(self.flags),
str(self.modules),
str(self.environment),
str(self.extra_rpaths),
self.enable_implicit_rpaths,
)
)
def verify_executables(self):
"""Raise an error if any of the compiler executables is not valid.
@@ -423,7 +389,8 @@ def implicit_rpaths(self):
# Put CXX first since it has the most linking issues
# And because it has flags that affect linking
link_dirs = self._get_compiler_link_paths()
exe_paths = [x for x in [self.cxx, self.cc, self.fc, self.f77] if x]
link_dirs = self._get_compiler_link_paths(exe_paths)
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
return list(paths_containing_libs(link_dirs, all_required_libs))
@@ -436,33 +403,43 @@ def required_libs(self):
# By default every compiler returns the empty list
return []
def _get_compiler_link_paths(self):
cc = self.cc if self.cc else self.cxx
if not cc or not self.verbose_flag:
# Cannot determine implicit link paths without a compiler / verbose flag
def _get_compiler_link_paths(self, paths):
first_compiler = next((c for c in paths if c), None)
if not first_compiler:
return []
if not self.verbose_flag:
# In this case there is no mechanism to learn what link directories
# are used by the compiler
return []
# What flag types apply to first_compiler, in what order
if cc == self.cc:
flags = ["cflags", "cppflags", "ldflags"]
flags = ["cppflags", "ldflags"]
if first_compiler == self.cc:
flags = ["cflags"] + flags
elif first_compiler == self.cxx:
flags = ["cxxflags"] + flags
else:
flags = ["cxxflags", "cppflags", "ldflags"]
flags.append("fflags")
try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, "main.c")
with open(fin, "w") as csource:
with open(fin, "w+") as csource:
csource.write(
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n"
"int main(int argc, char* argv[]) { " "(void)argc; (void)argv; return 0; }\n"
)
cc_exe = spack.util.executable.Executable(cc)
compiler_exe = spack.util.executable.Executable(first_compiler)
for flag_type in flags:
cc_exe.add_default_arg(*self.flags.get(flag_type, []))
for flag in self.flags.get(flag_type, []):
compiler_exe.add_default_arg(flag)
output = ""
with self.compiler_environment():
output = cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
output = str(
compiler_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
) # str for py2
return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe:
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)

View File

@@ -109,7 +109,7 @@ def _to_dict(compiler):
return {"compiler": d}
def get_compiler_config(scope=None, init_config=False):
def get_compiler_config(scope=None, init_config=True):
"""Return the compiler configuration for the specified architecture."""
config = spack.config.get("compilers", scope=scope) or []
@@ -118,8 +118,6 @@ def get_compiler_config(scope=None, init_config=False):
merged_config = spack.config.get("compilers")
if merged_config:
# Config is empty for this scope
# Do not init config because there is a non-empty scope
return config
_init_compiler_config(scope=scope)
@@ -127,95 +125,6 @@ def get_compiler_config(scope=None, init_config=False):
return config
def get_compiler_config_from_packages(scope=None):
"""Return the compiler configuration from packages.yaml"""
config = spack.config.get("packages", scope=scope)
if not config:
return []
packages = []
compiler_package_names = supported_compilers() + list(package_name_to_compiler_name.keys())
for name, entry in config.items():
if name not in compiler_package_names:
continue
externals_config = entry.get("externals", None)
if not externals_config:
continue
packages.extend(_compiler_config_from_package_config(externals_config))
return packages
def _compiler_config_from_package_config(config):
compilers = []
for entry in config:
compiler = _compiler_config_from_external(entry)
if compiler:
compilers.append(compiler)
return compilers
def _compiler_config_from_external(config):
spec = spack.spec.parse_with_version_concrete(config["spec"])
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
compiler_spec = spack.spec.CompilerSpec(
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
)
extra_attributes = config.get("extra_attributes", {})
prefix = config.get("prefix", None)
compiler_class = class_for_compiler_name(compiler_spec.name)
paths = extra_attributes.get("paths", {})
compiler_langs = ["cc", "cxx", "fc", "f77"]
for lang in compiler_langs:
if paths.setdefault(lang, None):
continue
if not prefix:
continue
# Check for files that satisfy the naming scheme for this compiler
bindir = os.path.join(prefix, "bin")
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
if regex.match(f):
paths[lang] = os.path.join(bindir, f)
if all(v is None for v in paths.values()):
return None
if not spec.architecture:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
target = host_platform.target("default_target").microarchitecture
else:
target = spec.target
if not target:
host_platform = spack.platforms.host()
target = host_platform.target("default_target").microarchitecture
operating_system = spec.os
if not operating_system:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
compiler_entry = {
"compiler": {
"spec": str(compiler_spec),
"paths": paths,
"flags": extra_attributes.get("flags", {}),
"operating_system": str(operating_system),
"target": str(target.family),
"modules": config.get("modules", []),
"environment": extra_attributes.get("environment", {}),
"extra_rpaths": extra_attributes.get("extra_rpaths", []),
"implicit_rpaths": extra_attributes.get("implicit_rpaths", None),
}
}
return compiler_entry
def _init_compiler_config(*, scope):
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
@@ -233,20 +142,17 @@ def compiler_config_files():
compiler_config = config.get("compilers", scope=name)
if compiler_config:
config_files.append(config.get_config_filename(name, "compilers"))
compiler_config_from_packages = get_compiler_config_from_packages(scope=name)
if compiler_config_from_packages:
config_files.append(config.get_config_filename(name, "packages"))
return config_files
def add_compilers_to_config(compilers, scope=None):
def add_compilers_to_config(compilers, scope=None, init_config=True):
"""Add compilers to the config for the specified architecture.
Arguments:
compilers: a list of Compiler objects.
scope: configuration scope to modify.
"""
compiler_config = get_compiler_config(scope, init_config=False)
compiler_config = get_compiler_config(scope, init_config)
for compiler in compilers:
if not compiler.cc:
tty.debug(f"{compiler.spec} does not have a C compiler")
@@ -278,9 +184,6 @@ def remove_compiler_from_config(compiler_spec, scope=None):
for current_scope in candidate_scopes:
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
msg = "`spack compiler remove` will not remove compilers defined in packages.yaml"
msg += "\nTo remove these compilers, either edit the config or use `spack external remove`"
tty.debug(msg)
return removal_happened
@@ -295,7 +198,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
True if one or more compiler entries were actually removed, False otherwise
"""
assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config(scope, init_config=False)
compiler_config = get_compiler_config(scope)
filtered_compiler_config = [
compiler_entry
for compiler_entry in compiler_config
@@ -318,14 +221,7 @@ def all_compilers_config(scope=None, init_config=True):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
from_packages_yaml = get_compiler_config_from_packages(scope)
if from_packages_yaml:
init_config = False
from_compilers_yaml = get_compiler_config(scope, init_config)
result = from_compilers_yaml + from_packages_yaml
key = lambda c: _compiler_from_config_entry(c["compiler"])
return list(llnl.util.lang.dedupe(result, key=key))
return get_compiler_config(scope, init_config)
def all_compiler_specs(scope=None, init_config=True):
@@ -492,7 +388,7 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
def all_compilers(scope=None, init_config=True):
config = all_compilers_config(scope, init_config=init_config)
config = get_compiler_config(scope, init_config=init_config)
compilers = list()
for items in config:
items = items["compiler"]
@@ -507,7 +403,10 @@ def compilers_for_spec(
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
config = all_compilers_config(scope, init_config)
if use_cache:
config = all_compilers_config(scope, init_config)
else:
config = get_compiler_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config))
compilers = []
@@ -615,10 +514,9 @@ def get_compilers(config, cspec=None, arch_spec=None):
for items in config:
items = items["compiler"]
# We might use equality here.
if cspec and not spack.spec.parse_with_version_concrete(
items["spec"], compiler=True
).satisfies(cspec):
# NOTE: in principle this should be equality not satisfies, but config can still
# be written in old format gcc@10.1.0 instead of gcc@=10.1.0.
if cspec and not cspec.satisfies(items["spec"]):
continue
# If an arch spec is given, confirm that this compiler
@@ -684,7 +582,9 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
scope_to_compilers = {}
for scope in config.scopes:
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec, scope=scope)
compilers = compilers_for_spec(
compiler_spec, arch_spec=arch_spec, scope=scope, use_cache=False
)
if compilers:
scope_to_compilers[scope] = compilers

View File

@@ -7,18 +7,15 @@
import re
import subprocess
import sys
import tempfile
from typing import Dict, List, Set
import archspec.cpu
import spack.compiler
import spack.operating_systems.windows_os
import spack.platforms
import spack.util.executable
from spack.compiler import Compiler
from spack.error import SpackError
from spack.version import Version, VersionRange
from spack.version import Version
avail_fc_version: Set[str] = set()
fc_path: Dict[str, str] = dict()
@@ -188,9 +185,6 @@ def __init__(self, *args, **kwargs):
# get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower()
arch = arch.replace("-", "_")
if str(archspec.cpu.host().family) == "x86_64":
arch = "amd64"
self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version)
env_cmds.append(self.vcvars_call)
# Below is a check for a valid fortran path
@@ -298,15 +292,6 @@ def setup_custom_environment(self, pkg, env):
else:
env.set_path(env_var, int_env[env_var].split(os.pathsep))
# certain versions of ifx (2021.3.0:2023.1.0) do not play well with env:TMP
# that has a "." character in the path
# Work around by pointing tmp to the stage for the duration of the build
if self.fc and Version(self.fc_version(self.fc)).satisfies(
VersionRange("2021.3.0", "2023.1.0")
):
new_tmp = tempfile.mkdtemp(dir=pkg.stage.path)
env.set("TMP", new_tmp)
env.set("CC", self.cc)
env.set("CXX", self.cxx)
env.set("FC", self.fc)
@@ -323,7 +308,7 @@ def fc_version(cls, fc):
fc_path[fc_ver] = fc
if os.getenv("ONEAPI_ROOT"):
try:
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths
sps = spack.operating_systems.windows_os.WindowsOs.compiler_search_paths
except AttributeError:
raise SpackError("Windows compiler search paths not established")
clp = spack.util.executable.which_string("cl", path=sps)

View File

@@ -826,6 +826,7 @@ def __init__(self, spec):
class InsufficientArchitectureInfoError(spack.error.SpackError):
"""Raised when details on architecture cannot be collected from the
system"""

View File

@@ -63,11 +63,10 @@
from spack.util.cpus import cpus_available
#: Dict from section names -> schema for that section
SECTION_SCHEMAS: Dict[str, Any] = {
SECTION_SCHEMAS = {
"compilers": spack.schema.compilers.schema,
"concretizer": spack.schema.concretizer.schema,
"definitions": spack.schema.definitions.schema,
"view": spack.schema.view.schema,
"develop": spack.schema.develop.schema,
"mirrors": spack.schema.mirrors.schema,
"repos": spack.schema.repos.schema,
@@ -82,7 +81,7 @@
# Same as above, but including keys for environments
# this allows us to unify config reading between configs and environments
_ALL_SCHEMAS: Dict[str, Any] = copy.deepcopy(SECTION_SCHEMAS)
_ALL_SCHEMAS = copy.deepcopy(SECTION_SCHEMAS)
_ALL_SCHEMAS.update({spack.schema.env.TOP_LEVEL_KEY: spack.schema.env.schema})
#: Path to the default configuration
@@ -639,6 +638,7 @@ def get(self, path: str, default: Optional[Any] = None, scope: Optional[str] = N
We use ``:`` as the separator, like YAML objects.
"""
# TODO: Currently only handles maps. Think about lists if needed.
parts = process_config_path(path)
section = parts.pop(0)
@@ -764,31 +764,6 @@ def _add_platform_scope(
cfg.push_scope(scope_type(plat_name, plat_path))
def config_paths_from_entry_points() -> List[Tuple[str, str]]:
"""Load configuration paths from entry points
A python package can register entry point metadata so that Spack can find
its configuration by adding the following to the project's pyproject.toml:
.. code-block:: toml
[project.entry-points."spack.config"]
baz = "baz:get_spack_config_path"
The function ``get_spack_config_path`` returns the path to the package's
spack configuration scope
"""
config_paths: List[Tuple[str, str]] = []
for entry_point in lang.get_entry_points(group="spack.config"):
hook = entry_point.load()
if callable(hook):
config_path = hook()
if config_path and os.path.exists(config_path):
config_paths.append(("plugin-%s" % entry_point.name, str(config_path)))
return config_paths
def _add_command_line_scopes(
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
) -> None:
@@ -841,9 +816,6 @@ def create() -> Configuration:
# No site-level configs should be checked into spack by default.
configuration_paths.append(("site", os.path.join(spack.paths.etc_path)))
# Python package's can register configuration scopes via entry_points
configuration_paths.extend(config_paths_from_entry_points())
# User configuration can override both spack defaults and site config
# This is disabled if user asks for no local configuration.
if not disable_local_config:
@@ -911,9 +883,7 @@ def add(fullpath: str, scope: Optional[str] = None) -> None:
has_existing_value = True
path = ""
override = False
value = components[-1]
if not isinstance(value, syaml.syaml_str):
value = syaml.load_config(value)
value = syaml.load_config(components[-1])
for idx, name in enumerate(components[:-1]):
# First handle double colons in constructing path
colon = "::" if override else ":" if path else ""
@@ -935,7 +905,7 @@ def add(fullpath: str, scope: Optional[str] = None) -> None:
# construct value from this point down
for component in reversed(components[idx + 1 : -1]):
value: Dict[str, str] = {component: value} # type: ignore[no-redef]
value = {component: value}
break
if override:
@@ -946,7 +916,7 @@ def add(fullpath: str, scope: Optional[str] = None) -> None:
# append values to lists
if isinstance(existing, list) and not isinstance(value, list):
value: List[str] = [value] # type: ignore[no-redef]
value = [value]
# merge value into existing
new = merge_yaml(existing, value)
@@ -979,8 +949,7 @@ def scopes() -> Dict[str, ConfigScope]:
def writable_scopes() -> List[ConfigScope]:
"""
Return list of writable scopes. Higher-priority scopes come first in the
list.
Return list of writable scopes
"""
return list(
reversed(
@@ -1125,7 +1094,7 @@ def read_config_file(
data = syaml.load_config(f)
if data:
if schema is None:
if not schema:
key = next(iter(data))
schema = _ALL_SCHEMAS[key]
validate(data, schema)
@@ -1367,141 +1336,56 @@ def they_are(t):
return copy.copy(source)
class ConfigPath:
quoted_string = "(?:\"[^\"]+\")|(?:'[^']+')"
unquoted_string = "[^:'\"]+"
element = rf"(?:(?:{quoted_string})|(?:{unquoted_string}))"
next_key_pattern = rf"({element}[+-]?)(?:\:|$)"
@staticmethod
def _split_front(string, extract):
m = re.match(extract, string)
if not m:
return None, None
token = m.group(1)
return token, string[len(token) :]
@staticmethod
def _validate(path):
"""Example valid config paths:
x:y:z
x:"y":z
x:y+:z
x:y::z
x:y+::z
x:y:
x:y::
"""
first_key, path = ConfigPath._split_front(path, ConfigPath.next_key_pattern)
if not first_key:
raise ValueError(f"Config path does not start with a parse-able key: {path}")
path_elements = [first_key]
path_index = 1
while path:
separator, path = ConfigPath._split_front(path, r"(\:+)")
if not separator:
raise ValueError(f"Expected separator for {path}")
path_elements[path_index - 1] += separator
if not path:
break
element, remainder = ConfigPath._split_front(path, ConfigPath.next_key_pattern)
if not element:
# If we can't parse something as a key, then it must be a
# value (if it's valid).
try:
syaml.load_config(path)
except spack.util.spack_yaml.SpackYAMLError as e:
raise ValueError(
"Remainder of path is not a valid key"
f" and does not parse as a value {path}"
) from e
element = path
path = None # The rest of the path was consumed into the value
else:
path = remainder
path_elements.append(element)
path_index += 1
return path_elements
@staticmethod
def process(path):
result = []
quote = "['\"]"
seen_override_in_path = False
path_elements = ConfigPath._validate(path)
last_element_idx = len(path_elements) - 1
for i, element in enumerate(path_elements):
override = False
append = False
prepend = False
quoted = False
if element.endswith("::") or (element.endswith(":") and i == last_element_idx):
if seen_override_in_path:
raise syaml.SpackYAMLError(
"Meaningless second override indicator `::' in path `{0}'".format(path), ""
)
override = True
seen_override_in_path = True
element = element.rstrip(":")
if element.endswith("+"):
prepend = True
elif element.endswith("-"):
append = True
element = element.rstrip("+-")
if re.match(f"^{quote}", element):
quoted = True
element = element.strip("'\"")
if any([append, prepend, override, quoted]):
element = syaml.syaml_str(element)
if append:
element.append = True
if prepend:
element.prepend = True
if override:
element.override = True
result.append(element)
return result
def process_config_path(path: str) -> List[str]:
"""Process a path argument to config.set() that may contain overrides ('::' or
trailing ':')
Colons will be treated as static strings if inside of quotes,
e.g. `this:is:a:path:'value:with:colon'` will yield:
Note: quoted value path components will be processed as a single value (escaping colons)
quoted path components outside of the value will be considered ill formed and will
raise.
e.g. `this:is:a:path:'value:with:colon'` will yield:
[this, is, a, path, value:with:colon]
The path may consist only of keys (e.g. for a `get`) or may end in a value.
Keys are always strings: if a user encloses a key in quotes, the quotes
should be removed. Values with quotes should be treated as strings,
but without quotes, may be parsed as a different yaml object (e.g.
'{}' is a dict, but '"{}"' is a string).
This function does not know whether the final element of the path is a
key or value, so:
* It must strip the quotes, in case it is a key (so we look for "key" and
not '"key"'))
* It must indicate somehow that the quotes were stripped, in case it is a
value (so that we don't process '"{}"' as a YAML dict)
Therefore, all elements with quotes are stripped, and then also converted
to ``syaml_str`` (if treating the final element as a value, the caller
should not parse it in this case).
[this, is, a, path, value:with:colon]
"""
return ConfigPath.process(path)
result = []
if path.startswith(":"):
raise syaml.SpackYAMLError(f"Illegal leading `:' in path `{path}'", "")
seen_override_in_path = False
while path:
front, sep, path = path.partition(":")
if (sep and not path) or path.startswith(":"):
if seen_override_in_path:
raise syaml.SpackYAMLError(
f"Meaningless second override indicator `::' in path `{path}'", ""
)
path = path.lstrip(":")
front = syaml.syaml_str(front)
front.override = True # type: ignore[attr-defined]
seen_override_in_path = True
elif front.endswith("+"):
front = front.rstrip("+")
front = syaml.syaml_str(front)
front.prepend = True # type: ignore[attr-defined]
elif front.endswith("-"):
front = front.rstrip("-")
front = syaml.syaml_str(front)
front.append = True # type: ignore[attr-defined]
result.append(front)
quote = "['\"]"
not_quote = "[^'\"]"
if re.match(f"^{quote}", path):
m = re.match(rf"^({quote}{not_quote}+{quote})$", path)
if not m:
raise ValueError("Quotes indicate value, but there are additional path entries")
result.append(m.group(1))
break
return result
#

View File

@@ -19,6 +19,9 @@
},
"os_package_manager": "dnf",
"build": "spack/fedora38",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/fedora:38"
}
@@ -30,6 +33,9 @@
},
"os_package_manager": "dnf",
"build": "spack/fedora37",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/fedora:37"
}
@@ -41,6 +47,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/rockylinux9",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/rockylinux:9"
}
@@ -52,6 +61,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/rockylinux8",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/rockylinux:8"
}
@@ -59,23 +71,29 @@
"almalinux:9": {
"bootstrap": {
"template": "container/almalinux_9.dockerfile",
"image": "quay.io/almalinuxorg/almalinux:9"
"image": "quay.io/almalinux/almalinux:9"
},
"os_package_manager": "dnf_epel",
"build": "spack/almalinux9",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "quay.io/almalinuxorg/almalinux:9"
"image": "quay.io/almalinux/almalinux:9"
}
},
"almalinux:8": {
"bootstrap": {
"template": "container/almalinux_8.dockerfile",
"image": "quay.io/almalinuxorg/almalinux:8"
"image": "quay.io/almalinux/almalinux:8"
},
"os_package_manager": "dnf_epel",
"build": "spack/almalinux8",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "quay.io/almalinuxorg/almalinux:8"
"image": "quay.io/almalinux/almalinux:8"
}
},
"centos:stream": {
@@ -87,6 +105,9 @@
"build": "spack/centos-stream",
"final": {
"image": "quay.io/centos/centos:stream"
},
"build_tags": {
"develop": "latest"
}
},
"centos:7": {
@@ -94,7 +115,10 @@
"template": "container/centos_7.dockerfile"
},
"os_package_manager": "yum",
"build": "spack/centos7"
"build": "spack/centos7",
"build_tags": {
"develop": "latest"
}
},
"opensuse/leap:15": {
"bootstrap": {
@@ -102,6 +126,9 @@
},
"os_package_manager": "zypper",
"build": "spack/leap15",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "opensuse/leap:latest"
}
@@ -121,13 +148,19 @@
"template": "container/ubuntu_2204.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-jammy"
"build": "spack/ubuntu-jammy",
"build_tags": {
"develop": "latest"
}
},
"ubuntu:20.04": {
"bootstrap": {
"template": "container/ubuntu_2004.dockerfile"
},
"build": "spack/ubuntu-focal",
"build_tags": {
"develop": "latest"
},
"os_package_manager": "apt"
},
"ubuntu:18.04": {
@@ -135,7 +168,10 @@
"template": "container/ubuntu_1804.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-bionic"
"build": "spack/ubuntu-bionic",
"build_tags": {
"develop": "latest"
}
}
},
"os_package_managers": {

Some files were not shown because too many files have changed in this diff Show More