Compare commits

..

1 Commits

Author SHA1 Message Date
Gregory Becker
6cb5700b1c try using nosearch to deprioritize api docs in search 2024-03-01 13:47:15 -08:00
1212 changed files with 8269 additions and 23859 deletions

View File

@@ -1,4 +0,0 @@
{
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
}

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Load spack environment at terminal startup
cat <<EOF >> /root/.bashrc
. /workspaces/spack/share/spack/setup-env.sh
EOF
# Load spack environment in this script
. /workspaces/spack/share/spack/setup-env.sh
# Ensure generic targets for maximum matching with buildcaches
spack config --scope site add "packages:all:require:[target=x86_64_v3]"
spack config --scope site add "concretizer:targets:granularity:generic"
# Find compiler and install gcc-runtime
spack compiler find --scope site
# Setup buildcaches
spack mirror add --scope site develop https://binaries.spack.io/develop
spack buildcache keys --install --trust

View File

@@ -22,8 +22,8 @@ jobs:
matrix:
operating_system: ["ubuntu-latest", "macos-latest"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
@@ -43,9 +43,7 @@ jobs:
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
@@ -158,8 +158,8 @@ jobs:
run: |
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: "3.12"
- name: Bootstrap clingo
@@ -182,7 +182,7 @@ jobs:
run: |
brew install tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap clingo
run: |
set -ex
@@ -207,7 +207,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
@@ -250,7 +250,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -287,7 +287,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
@@ -320,7 +320,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
@@ -338,7 +338,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh

View File

@@ -55,7 +55,7 @@ jobs:
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta
@@ -96,10 +96,10 @@ jobs:
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb
uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c
- name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -107,13 +107,13 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -18,7 +18,6 @@ jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
secrets: inherit
with:
with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks:
@@ -36,12 +35,12 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
- uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
@@ -71,17 +70,14 @@ jobs:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml
secrets: inherit
unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
secrets: inherit
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
secrets: inherit
all:
needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,4 +1,4 @@
black==24.4.0
black==24.2.0
clingo==5.7.1
flake8==7.0.0
isort==5.13.2

View File

@@ -51,10 +51,10 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -91,19 +91,17 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with:
flags: unittests,linux,${{ matrix.concretizer }}
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test shell integration
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -124,11 +122,9 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with:
flags: shelltests,linux
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack
@@ -141,7 +137,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -160,10 +156,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -185,23 +181,20 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
with:
flags: unittests,linux,clingo
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Run unit tests on MacOS
macos:
runs-on: ${{ matrix.os }}
runs-on: macos-latest
strategy:
matrix:
os: [macos-latest, macos-14]
python-version: ["3.11"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -223,8 +216,6 @@ jobs:
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with:
flags: unittests,macos
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -18,8 +18,8 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: '3.11'
cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: '3.11'
cache: 'pip'
@@ -56,7 +56,6 @@ jobs:
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
secrets: inherit
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11'
@@ -70,7 +69,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: 3.9
- name: Install Python packages
@@ -33,18 +33,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: 3.9
- name: Install Python packages
@@ -59,18 +57,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -88,7 +88,7 @@ Resources:
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions):
for Q&A and discussions. Note the pinned discussions for announcements.
* **X**: [@spackpm](https://twitter.com/spackpm). Be sure to
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack):
only for announcements. Please use other venues for discussions.

View File

@@ -42,8 +42,3 @@ concretizer:
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal
# Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {}

View File

@@ -101,12 +101,6 @@ config:
verify_ssl: true
# This is where custom certs for proxy/firewall are stored.
# It can be a path or environment variable. To match ssl env configuration
# the default is the environment variable SSL_CERT_FILE
ssl_certs: $SSL_CERT_FILE
# Suppress gpg warnings from binary package verification
# Only suppresses warnings, gpg failure will still fail the install
# Potential rationale to set True: users have already explicitly trusted the

View File

@@ -24,7 +24,6 @@ packages:
elf: [elfutils]
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse]
gl: [glx, osmesa]
glu: [mesa-glu, openglu]
@@ -35,9 +34,7 @@ packages:
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx, mesa18+glx]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]

View File

@@ -59,6 +59,7 @@ upload:
apidoc:
sphinx-apidoc -f -T -o . ../spack
sphinx-apidoc -f -T -o . ../llnl
./nosearch-api-docs # set :nosearch: at top of each file
help:
@echo "Please use \`make <target>' where <target> is one of"

View File

@@ -1119,9 +1119,6 @@ and ``3.4.2``. Similarly, ``@4.2:`` means any version above and including
``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and
includes any version with major version ``3``.
Versions are ordered lexicograpically by its components. For more details
on the order, see :ref:`the packaging guide <version-comparison>`.
Notice that you can distinguish between the specific version ``@=3.2`` and
the range ``@3.2``. This is useful for packages that follow a versioning
scheme that omits the zero patch version number: ``3.2``, ``3.2.1``,

View File

@@ -220,40 +220,6 @@ section of the configuration:
.. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
-----------------------------------------
OCI / Docker V2 registries as build cache
-----------------------------------------

View File

@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -250,7 +250,7 @@ generator is Ninja. To switch to the Ninja generator, simply add:
.. code-block:: python
generator("ninja")
generator = "Ninja"
``CMakePackage`` defaults to "Unix Makefiles". If you switch to the

View File

@@ -173,72 +173,6 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
]
^^^^^^^
Testing
^^^^^^^
``PerlPackage`` provides a simple stand-alone test of the successfully
installed package to confirm that installed perl module(s) can be used.
These tests can be performed any time after the installation using
``spack -v test run``. (For more information on the command, see
:ref:`cmd-spack-test-run`.)
The base class automatically detects perl modules based on the presence
of ``*.pm`` files under the package's library directory. For example,
the files under ``perl-bignum``'s perl library are:
.. code-block:: console
$ find . -name "*.pm"
./bigfloat.pm
./bigrat.pm
./Math/BigFloat/Trace.pm
./Math/BigInt/Trace.pm
./Math/BigRat/Trace.pm
./bigint.pm
./bignum.pm
which results in the package having the ``use_modules`` property containing:
.. code-block:: python
use_modules = [
"bigfloat",
"bigrat",
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
"bigint",
"bignum",
]
.. note::
This list can often be used to catch missing dependencies.
If the list is somehow wrong, you can provide the names of the modules
yourself by overriding ``use_modules`` like so:
.. code-block:: python
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
If you only want a subset of the automatically detected modules to be
tested, you could instead define the ``skip_modules`` property on the
package. So, instead of overriding ``use_modules`` as shown above, you
could define the following:
.. code-block:: python
skip_modules = [
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
]
for the same use tests.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack
^^^^^^^^^^^^^^^^^^^^^

View File

@@ -145,22 +145,6 @@ hosts when making ``ssl`` connections. Set to ``false`` to disable, and
tools like ``curl`` will use their ``--insecure`` options. Disabling
this can expose you to attacks. Use at your own risk.
--------------------
``ssl_certs``
--------------------
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to a file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
--------------------
``checksum``
--------------------

View File

@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes.
system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes:
------------------------

View File

@@ -1071,9 +1071,9 @@ Announcing a release
We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are
X, Slack, and the mailing list. Here are the steps:
Twitter, Slack, and the mailing list. Here are the steps:
#. Announce the release on X.
#. Announce the release on Twitter.
* Compose the tweet on the ``@spackpm`` account per the
``spack-twitter`` slack channel.

View File

@@ -952,17 +952,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if

View File

@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
(5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)

View File

@@ -250,10 +250,9 @@ Compiler configuration
Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
or automatically by running ``spack compiler find``, but for
convenience Spack will automatically detect compilers the first time
it needs them.
specifying them manually in ``compilers.yaml``, or automatically by
running ``spack compiler find``, but for convenience Spack will
automatically detect compilers the first time it needs them.
.. _cmd-spack-compilers:
@@ -458,48 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
.. code-block:: yaml
packages:
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
environment:
set:
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
paths:
cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler
@@ -1572,8 +1529,6 @@ Microsoft Visual Studio
"""""""""""""""""""""""
Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack.
Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your
visual studio installation as it is required to build many packages from source.
We require several specific components to be included in the Visual Studio installation.
One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools,"
@@ -1581,7 +1536,6 @@ depending on installation type (Professional, Build Tools, etc.) The other requ
"C++ CMake tools for Windows," which can be selected from among the optional packages.
This provides CMake and Ninja for use during Spack configuration.
If you already have Visual Studio installed, you can make sure these components are installed by
rerunning the installer. Next to your installation, select "Modify" and look at the
"Installation details" pane on the right.

View File

@@ -273,21 +273,9 @@ builtin support through the ``depends_on`` function, the latter simply uses a ``
statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values:
* ``none``: no autoloading
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
The ``autoload`` key accepts the values ``none``, ``direct``, and ``all``. To disable it, use
``none``, and to enable, it's best to stick to ``direct``, which only autoloads the direct link and
run type dependencies, relying on recursive autoloading to load the rest.
A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
@@ -309,11 +297,11 @@ Environment Modules requires version 4.7 or higher.
tcl:
hide_implicits: true
all:
autoload: direct # or `run`
autoload: direct
lmod:
hide_implicits: true
all:
autoload: direct # or `run`
autoload: direct
.. _anonymous_specs:

View File

@@ -0,0 +1,6 @@
#!/bin/sh
# Set :nosearch: at top of each api doc file
for filename in {spack,llnl}.*.rst; do
$(echo ":nosearch:"; cat $filename) > $filename
done

View File

@@ -893,50 +893,26 @@ as an option to the ``version()`` directive. Example situations would be a
"snapshot"-like Version Control System (VCS) tag, a VCS branch such as
``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball.
.. _version-comparison:
^^^^^^^^^^^^^^^^^^
Version comparison
^^^^^^^^^^^^^^^^^^
Spack imposes a generic total ordering on the set of versions,
independently from the package they are associated with.
Most Spack versions are numeric, a tuple of integers; for example,
``0.1``, ``6.96`` or ``1.2.3.1``. In this very basic case, version
comparison is lexicographical on the numeric components:
``1.2 < 1.2.1 < 1.2.2 < 1.10``.
``0.1``, ``6.96`` or ``1.2.3.1``. Spack knows how to compare and sort
numeric versions.
Spack can also supports string components such as ``1.1.1a`` and
``1.y.0``. String components are considered less than numeric
components, so ``1.y.0 < 1.0``. This is for consistency with
`RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_. String
components do not have to be separated by dots or any other delimiter.
So, the contrived version ``1y0`` is identical to ``1.y.0``.
Some Spack versions involve slight extensions of numeric syntax; for
example, ``py-sphinx-rtd-theme@=0.1.10a0``. In this case, numbers are
always considered to be "newer" than letters. This is for consistency
with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
Pre-release suffixes also contain string parts, but they are handled
in a special way. For example ``1.2.3alpha1`` is parsed as a pre-release
of the version ``1.2.3``. This allows Spack to order it before the
actual release: ``1.2.3alpha1 < 1.2.3``. Spack supports alpha, beta and
release candidate suffixes: ``1.2alpha1 < 1.2beta1 < 1.2rc1 < 1.2``. Any
suffix not recognized as a pre-release is treated as an ordinary
string component, so ``1.2 < 1.2-mysuffix``.
Spack versions may also be arbitrary non-numeric strings, for example
``develop``, ``master``, ``local``.
Finally, there are a few special string components that are considered
"infinity versions". They include ``develop``, ``main``, ``master``,
``head``, ``trunk``, and ``stable``. For example: ``1.2 < develop``.
These are useful for specifying the most recent development version of
a package (often a moving target like a git branch), without assigning
a specific version number. Infinity versions are not automatically used when determining the latest version of a package unless explicitly required by another package or user.
More formally, the order on versions is defined as follows. A version
string is split into a list of components based on delimiters such as
``.`` and ``-`` and string boundaries. The components are split into
the **release** and a possible **pre-release** (if the last component
is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``).
The release components are ordered lexicographically, with comparsion
between different types of components as follows:
The order on versions is defined as follows. A version string is split
into a list of components based on delimiters such as ``.``, ``-`` etc.
Lists are then ordered lexicographically, where components are ordered
as follows:
#. The following special strings are considered larger than any other
numeric or non-numeric version component, and satisfy the following
@@ -949,9 +925,6 @@ between different types of components as follows:
#. All other non-numeric components are less than numeric components,
and are ordered alphabetically.
Finally, if the release components are equal, the pre-release components
are used to break the tie, in the obvious way.
The logic behind this sort order is two-fold:
#. Non-numeric versions are usually used for special cases while

View File

@@ -2,12 +2,12 @@ sphinx==7.2.6
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.1
python-levenshtein==0.25.0
docutils==0.20.1
pygments==2.17.2
urllib3==2.2.1
pytest==8.1.1
pytest==8.0.2
isort==5.13.2
black==24.4.0
black==24.2.0
flake8==7.0.0
mypy==1.9.0
mypy==1.8.0

249
lib/spack/env/cc vendored
View File

@@ -47,8 +47,7 @@ SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG
SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS
SPACK_MANAGED_DIRS"
SPACK_SYSTEM_DIRS"
# Optional parameters that aren't required to be set
@@ -174,17 +173,21 @@ preextend() {
unset IFS
}
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
# system_dir PATH
# test whether a path is a system directory
system_dir() {
IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
path="$1"
for sd in $SPACK_SYSTEM_DIRS; do
if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
# success if path starts with a system prefix
unset IFS
return 0
fi
done
unset IFS
return 1 # fail if path starts no system prefix
}
"
# Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
@@ -245,7 +248,7 @@ case "$command" in
lang_flags=C
debug_flags="-g"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
@@ -417,12 +420,11 @@ input_command="$*"
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
case "$1" in
@@ -430,25 +432,21 @@ parse_Wl() {
arg="${1#-rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
--rpath=*)
arg="${1#--rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
-rpath|--rpath)
wl_expect_rpath=yes
@@ -475,20 +473,12 @@ categorize_arguments() {
return_other_args_list=""
return_isystem_was_used=""
return_isystem_spack_store_include_dirs_list=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_spack_store_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_spack_store_lib_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_spack_store_rpath_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
@@ -536,7 +526,7 @@ categorize_arguments() {
continue
fi
replaced="$after$stripped"
replaced="$after$stripped"
# it matched, remove it
shift
@@ -556,32 +546,29 @@ categorize_arguments() {
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_isystem_spack_store_include_dirs_list "$arg" ;;
1) append return_isystem_include_dirs_list "$arg" ;;
2) append return_isystem_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_include_dirs_list "$arg" ;;
1) append return_include_dirs_list "$arg" ;;
2) append return_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_lib_dirs_list "$arg" ;;
1) append return_lib_dirs_list "$arg" ;;
2) append return_system_lib_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -614,32 +601,29 @@ categorize_arguments() {
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
@@ -677,25 +661,16 @@ categorize_arguments() {
}
categorize_arguments "$@"
spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
include_dirs_list="$return_include_dirs_list"
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
include_dirs_list="$return_include_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -763,25 +738,16 @@ esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
@@ -801,13 +767,11 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
# Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS
fi
fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS
fi
@@ -834,50 +798,38 @@ case "$mode" in
;;
esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
#
# Finally, reassemble the command line.
#
args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system)
# Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_spack_store_include_dirs_list -I
extend args_list spack_store_include_dirs_list -I
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_include_dirs_list "-I"
extend args_list include_dirs_list "-I"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
fi
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
# Library search paths
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
@@ -887,12 +839,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
extend args_list spack_store_rpath_dirs_list "$rpath"
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
@@ -900,12 +848,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;
@@ -969,3 +913,4 @@ fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
* Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
astunparse
----------------

View File

@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
setattr(t, a, copy.deepcopy(getattr(self, a), memo))
setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -1,3 +1,2 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.3"
__version__ = "0.2.2"

View File

@@ -3,7 +3,6 @@
"""
import sys
from .cli import main
sys.exit(main())

View File

@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
def cpu() -> int:
"""Run the `archspec cpu` subcommand."""
try:
print(archspec.cpu.host())
except FileNotFoundError as exc:
print(exc)
return 1
print(archspec.cpu.host())
return 0

View File

@@ -5,14 +5,10 @@
"""The "cpu" package permits to query and compare different
CPU microarchitectures.
"""
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host
from .microarchitecture import (
TARGETS,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
version_components,
)
__all__ = [
"Microarchitecture",

View File

@@ -4,17 +4,15 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures"""
import collections
import functools
import os
import platform
import re
import struct
import subprocess
import warnings
from typing import Dict, List, Optional, Set, Tuple, Union
from ..vendor.cpuid.cpuid import CPUID
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture
from .schema import CPUID_JSON, TARGETS_JSON
from .microarchitecture import generic_microarchitecture, TARGETS
from .schema import TARGETS_JSON
#: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu
@@ -24,46 +22,43 @@
#: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {}
# Constants for commonly used architectures
X86_64 = "x86_64"
AARCH64 = "aarch64"
PPC64LE = "ppc64le"
PPC64 = "ppc64"
RISCV64 = "riscv64"
def detection(operating_system: str):
"""Decorator to mark functions that are meant to return partial information on the current cpu.
def info_dict(operating_system):
"""Decorator to mark functions that are meant to return raw info on
the current cpu.
Args:
operating_system: operating system where this function can be used.
operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
"""
def decorator(factory):
INFO_FACTORY[operating_system].append(factory)
return factory
@functools.wraps(factory)
def _impl():
info = factory()
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
return info
return _impl
return decorator
def partial_uarch(
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture(
name=name,
parents=[],
vendor=vendor,
features=features or set(),
compilers={},
generation=generation,
)
@detection(operating_system="Linux")
def proc_cpuinfo() -> Microarchitecture:
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
data = {}
@info_dict(operating_system="Linux")
def proc_cpuinfo():
"""Returns a raw info dictionary by parsing the first entry of
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
for line in file:
key, separator, value = line.partition(":")
@@ -75,96 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
#
# we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo
if separator != ":" and data:
if separator != ":" and info:
break
data[key.strip()] = value.strip()
architecture = _machine()
if architecture == X86_64:
return partial_uarch(
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
)
if architecture == AARCH64:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
)
if architecture in (PPC64LE, PPC64):
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
return partial_uarch(generation=generation)
if architecture == RISCV64:
if data.get("uarch") == "sifive,u74-mc":
data["uarch"] = "u74mc"
return partial_uarch(name=data.get("uarch", RISCV64))
return generic_microarchitecture(architecture)
class CpuidInfoCollector:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
info[key.strip()] = value.strip()
return info
def _check_output(args, env):
@@ -173,25 +83,14 @@ def _check_output(args, env):
return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": "x86_64",
"ARM64": "aarch64",
}
def _machine():
"""Return the machine architecture we are on"""
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us
if operating_system not in ("Darwin", "Windows"):
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
@@ -204,47 +103,54 @@ def _machine():
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64
return "aarch64"
return X86_64
return "x86_64"
@detection(operating_system="Darwin")
def sysctl_info() -> Microarchitecture:
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str:
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == X86_64:
features = (
f'{sysctl("-n", "machdep.cpu.features").lower()} '
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}'
if _machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
)
features = set(features.split())
info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
# Flags detected on Darwin turned to their linux counterpart
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items():
if darwin_flag in features:
features.update(linux_flag.split())
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
info = {
"vendor_id": "Apple",
"flags": [],
"model": model,
"CPU implementer": "Apple",
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
@@ -254,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
"""Adjust the vendor field to make it human-readable"""
if "CPU implementer" not in data:
return "generic"
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
"""
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources:
@@ -267,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"]
return arm_vendors.get(arm_code, arm_code)
arm_code = info["CPU implementer"]
if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
return set(data.get(key, "").split())
def raw_info_dictionary():
"""Returns a dictionary with information on the cpu of the current host.
def detected_info() -> Microarchitecture:
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
This function calls all the viable factories one after the other until
there's one that is able to produce the requested information.
"""
# pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]:
try:
return factory()
info = factory()
except Exception as exc:
warnings.warn(str(exc))
return generic_microarchitecture(_machine())
if info:
adjust_raw_flags(info)
adjust_raw_vendor(info)
break
return info
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
"""Returns an unordered list of known micro-architectures that are compatible with the
partial Microarchitecture passed as input.
def compatible_microarchitectures(info):
"""Returns an unordered list of known micro-architectures that are
compatible with the info dictionary passed as argument.
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family)
@@ -306,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
def host():
"""Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu
info = detected_info()
# Retrieve a dictionary with raw information on the host's cpu
info = raw_info_dictionary()
# Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info)
@@ -334,15 +258,16 @@ def sorting_fn(item):
return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
def compatibility_check(architecture_family):
"""Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument,
and an arbitrary target Microarchitecture as the second argument. It returns True if the
target is compatible with first argument, False otherwise.
A compatibility check function takes the raw info dictionary as a first
argument and an arbitrary target as the second argument. It returns True
if the target is compatible with the info dictionary, False otherwise.
Args:
architecture_family: architecture family for which this test can be used
architecture_family (str or tuple): architecture family for which
this test can be used, e.g. x86_64 or ppc64le etc.
"""
# Turn the argument into something iterable
if isinstance(architecture_family, str):
@@ -355,57 +280,86 @@ def decorator(func):
return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64))
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
# We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[_machine()]
arch_root = TARGETS[basename]
return (
target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation
) and target.generation <= generation
@compatibility_check(architecture_family=X86_64)
@compatibility_check(architecture_family="x86_64")
def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
# We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features
arch_root = TARGETS[X86_64]
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic")
and target.features.issubset(info.features)
and target.vendor in (vendor, "generic")
and target.features.issubset(features)
)
@compatibility_check(architecture_family=AARCH64)
@compatibility_check(architecture_family="aarch64")
def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with
basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64:
if target.vendor == "generic" and target.name != "aarch64":
return False
arch_root = TARGETS[AARCH64]
arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor,
vendor,
"generic",
)
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model = TARGETS[info.name]
model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features)
return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family=RISCV64)
@compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64]
basename = "riscv64"
uarch = info.get("uarch")
# sifive unmatched board
if uarch == "sifive,u74-mc":
uarch = "u74mc"
# catch-all for unknown uarchs
else:
uarch = "riscv64"
arch_root = TARGETS[basename]
return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic"
target == uarch or target.vendor == "generic"
)

View File

@@ -13,7 +13,6 @@
import archspec
import archspec.cpu.alias
import archspec.cpu.schema
from .alias import FEATURE_ALIASES
from .schema import LazyDictionary
@@ -48,7 +47,7 @@ class Microarchitecture:
which has "broadwell" as a parent, supports running binaries
optimized for "broadwell".
vendor (str): vendor of the micro-architecture
features (set of str): supported CPU flags. Note that the semantic
features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will
@@ -181,28 +180,24 @@ def generic(self):
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
return max(generics, key=lambda x: len(x.ancestors))
def to_dict(self):
"""Returns a dictionary representation of this object."""
return {
"name": str(self.name),
"vendor": str(self.vendor),
"features": sorted(str(x) for x in self.features),
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
}
def to_dict(self, return_list_of_items=False):
"""Returns a dictionary representation of this object.
@staticmethod
def from_dict(data) -> "Microarchitecture":
"""Construct a microarchitecture from a dictionary representation."""
return Microarchitecture(
name=data["name"],
parents=[TARGETS[x] for x in data["parents"]],
vendor=data["vendor"],
features=set(data["features"]),
compilers=data.get("compilers", {}),
generation=data.get("generation", 0),
)
Args:
return_list_of_items (bool): if True returns an ordered list of
items instead of the dictionary
"""
list_of_items = [
("name", str(self.name)),
("vendor", str(self.vendor)),
("features", sorted(str(x) for x in self.features)),
("generation", self.generation),
("parents", [str(x) for x in self.parents]),
]
if return_list_of_items:
return list_of_items
return dict(list_of_items)
def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs
@@ -276,7 +271,9 @@ def tuplify(ver):
flags = flags_fmt.format(**compiler_entry)
return flags
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
msg = (
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
)
if compiler_info:
versions = [x["versions"] for x in compiler_info]
msg += f' [supported compiler versions are {", ".join(versions)}]'
@@ -292,7 +289,9 @@ def generic_microarchitecture(name):
Args:
name (str): name of the micro-architecture
"""
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
)
def version_components(version):
@@ -346,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]

View File

@@ -7,9 +7,7 @@
"""
import collections.abc
import json
import os
import pathlib
from typing import Tuple
import os.path
class LazyDictionary(collections.abc.MutableMapping):
@@ -48,65 +46,21 @@ def __len__(self):
return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
#: Environment variable that might point to a directory with extensions to JSON files
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r", encoding="utf-8") as file:
return json.load(file)
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))

View File

@@ -9,11 +9,11 @@ language specific APIs.
Currently the repository contains the following JSON files:
```console
cpu/
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64
── cpuid_schema.json # Schema for the file above
├── microarchitectures.json # Contains information on CPU microarchitectures
└── microarchitectures_schema.json # Schema for the file above
.
├── COPYRIGHT
── cpu
   ├── microarchitectures.json # Contains information on CPU microarchitectures
   └── microarchitectures_schema.json # Schema for the file above
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"vendor": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"highest_extension_support": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
},
"extension-flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
}
}
}

View File

@@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Anders Høst
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,76 +0,0 @@
cpuid.py
========
Now, this is silly!
Pure Python library for accessing information about x86 processors
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
instruction. Well, not exactly pure Python...
It works by allocating a small piece of virtual memory, copying
a raw x86 function to that memory, giving the memory execute
permissions and then calling the memory as a function. The injected
function executes the CPUID instruction and copies the result back
to a ctypes.Structure where is can be read by Python.
It should work fine on both 32 and 64 bit versions of Windows and Linux
running x86 processors. Apple OS X and other BSD systems should also work,
not tested though...
Why?
----
For poops and giggles. Plus, having access to a low-level feature
without having to compile a C wrapper is pretty neat.
Examples
--------
Getting info with eax=0:
import cpuid
q = cpuid.CPUID()
eax, ebx, ecx, edx = q(0)
Running the files:
$ python example.py
Vendor ID : GenuineIntel
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
Vector instructions supported:
SSE : Yes
SSE2 : Yes
SSE3 : Yes
SSSE3 : Yes
SSE4.1 : Yes
SSE4.2 : Yes
SSE4a : --
AVX : --
AVX2 : --
$ python cpuid.py
CPUID A B C D
00000000 0000000b 756e6547 6c65746e 49656e69
00000001 000106a5 00100800 009ce3bd bfebfbff
00000002 55035a01 00f0b2e4 00000000 09ca212c
00000003 00000000 00000000 00000000 00000000
00000004 00000000 00000000 00000000 00000000
00000005 00000040 00000040 00000003 00001120
00000006 00000003 00000002 00000001 00000000
00000007 00000000 00000000 00000000 00000000
00000008 00000000 00000000 00000000 00000000
00000009 00000000 00000000 00000000 00000000
0000000a 07300403 00000044 00000000 00000603
0000000b 00000000 00000000 00000095 00000000
80000000 80000008 00000000 00000000 00000000
80000001 00000000 00000000 00000001 28100800
80000002 65746e49 2952286c 6f655820 2952286e
80000003 55504320 20202020 20202020 57202020
80000004 30353533 20402020 37302e33 007a4847
80000005 00000000 00000000 00000000 00000000
80000006 00000000 00000000 01006040 00000000
80000007 00000000 00000000 00000000 00000100
80000008 00003024 00000000 00000000 00000000

View File

@@ -1,172 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import platform
import os
import ctypes
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
# Posix x86_64:
# Three first call registers : RDI, RSI, RDX
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
# Windows x86_64:
# Three first call registers : RCX, RDX, R8
# Volatile registers : RAX, RCX, RDX, R8-11
# cdecl 32 bit:
# Three first call registers : Stack (%esp)
# Volatile registers : EAX, ECX, EDX
_POSIX_64_OPC = [
0x53, # push %rbx
0x89, 0xf0, # mov %esi,%eax
0x89, 0xd1, # mov %edx,%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%rdi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
0x5b, # pop %rbx
0xc3 # retq
]
_WINDOWS_64_OPC = [
0x53, # push %rbx
0x89, 0xd0, # mov %edx,%eax
0x49, 0x89, 0xc9, # mov %rcx,%r9
0x44, 0x89, 0xc1, # mov %r8d,%ecx
0x0f, 0xa2, # cpuid
0x41, 0x89, 0x01, # mov %eax,(%r9)
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
0x5b, # pop %rbx
0xc3 # retq
]
_CDECL_32_OPC = [
0x53, # push %ebx
0x57, # push %edi
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%edi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
0x5f, # pop %edi
0x5b, # pop %ebx
0xc3 # ret
]
is_windows = os.name == "nt"
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
class CPUID_struct(ctypes.Structure):
_register_names = ("eax", "ebx", "ecx", "edx")
_fields_ = [(r, c_uint32) for r in _register_names]
def __getitem__(self, item):
if item not in self._register_names:
raise KeyError(item)
return getattr(self, item)
def __repr__(self):
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
class CPUID(object):
def __init__(self):
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
raise SystemError("Only available for x86")
if is_windows:
if is_64bit:
# VirtualAlloc seems to fail under some weird
# circumstances when ctypes.windll.kernel32 is
# used under 64 bit Python. CDLL fixes this.
self.win = ctypes.CDLL("kernel32.dll")
opc = _WINDOWS_64_OPC
else:
# Here ctypes.windll.kernel32 is needed to get the
# right DLL. Otherwise it will fail when running
# 32 bit Python on 64 bit Windows.
self.win = ctypes.windll.kernel32
opc = _CDECL_32_OPC
else:
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
size = len(opc)
code = (ctypes.c_ubyte * size)(*opc)
if is_windows:
self.win.VirtualAlloc.restype = c_void_p
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
if not self.addr:
raise MemoryError("Could not allocate RWX memory")
ctypes.memmove(self.addr, code, size)
else:
from mmap import (
mmap,
MAP_PRIVATE,
MAP_ANONYMOUS,
PROT_WRITE,
PROT_READ,
PROT_EXEC,
)
self.mm = mmap(
-1,
size,
flags=MAP_PRIVATE | MAP_ANONYMOUS,
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
)
self.mm.write(code)
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
self.func_ptr = func_type(self.addr)
def __call__(self, eax, ecx=0):
struct = self.registers_for(eax=eax, ecx=ecx)
return struct.eax, struct.ebx, struct.ecx, struct.edx
def registers_for(self, eax, ecx=0):
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
containing eax, ebx, ecx, and edx.
"""
struct = CPUID_struct()
self.func_ptr(struct, eax, ecx)
return struct
def __del__(self):
if is_windows:
self.win.VirtualFree.restype = c_long
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
self.win.VirtualFree(self.addr, 0, 0x8000)
else:
self.mm.close()
if __name__ == "__main__":
def valid_inputs():
cpuid = CPUID()
for eax in (0x0, 0x80000000):
highest, _, _, _ = cpuid(eax)
while eax <= highest:
regs = cpuid(eax)
yield (eax, regs)
eax += 1
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
for eax, regs in valid_inputs():
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import struct
import cpuid
def cpu_vendor(cpu):
_, b, c, d = cpu(0)
return struct.pack("III", b, d, c).decode("utf-8")
def cpu_name(cpu):
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
for i in range(2, 5)))
return name.split('\x00', 1)[0]
def is_set(cpu, leaf, subleaf, reg_idx, bit):
"""
@param {leaf} %eax
@param {sublead} %ecx, 0 in most cases
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
@param {bit} bit of reg selected by {reg_idx}, 0-based
"""
regs = cpu(leaf, subleaf)
if (1 << bit) & regs[reg_idx]:
return "Yes"
else:
return "--"
if __name__ == "__main__":
cpu = cpuid.CPUID()
print("Vendor ID : %s" % cpu_vendor(cpu))
print("CPU name : %s" % cpu_name(cpu))
print()
print("Vector instructions supported:")
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
# Intel RDT CMT/MBM
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))

View File

@@ -1,13 +0,0 @@
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/comments.py b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
index 1badeda585..892c868af3 100644
--- a/lib/spack/external/_vendoring/ruamel/yaml/comments.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
- setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ setattr(t, a, copy.deepcopy(getattr(self, a), memo))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -42,6 +42,11 @@ def convert_to_posix_path(path: str) -> str:
return format_os_path(path, mode=Path.unix)
def convert_to_windows_path(path: str) -> str:
"""Converts the input path to Windows style."""
return format_os_path(path, mode=Path.windows)
def convert_to_platform_path(path: str) -> str:
"""Converts the input path to the current platform's native style."""
return format_os_path(path, mode=Path.platform_path)

View File

@@ -12,7 +12,7 @@
# Archive extensions allowed in Spack
PREFIX_EXTENSIONS = ("tar", "TAR")
EXTENSIONS = ("gz", "bz2", "xz", "Z")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz", "whl")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = (
@@ -357,8 +357,10 @@ def strip_version_suffixes(path_or_url: str) -> str:
r"i[36]86",
r"ppc64(le)?",
r"armv?(7l|6l|64)?",
# PyPI wheels
r"-(?:py|cp)[23].*",
# PyPI
r"[._-]py[23].*\.whl",
r"[._-]cp[23].*\.whl",
r"[._-]win.*\.exe",
]
for regex in suffix_regexes:
@@ -401,7 +403,7 @@ def expand_contracted_extension_in_path(
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
"""Returns compression extension for a compressed archive"""
extension = expand_contracted_extension(extension)
for ext in EXTENSIONS:
for ext in [*EXTENSIONS]:
if ext in extension:
return ext
return None

View File

@@ -198,32 +198,15 @@ def getuid():
return os.getuid()
def _win_rename(src, dst):
# os.replace will still fail if on Windows (but not POSIX) if the dst
# is a symlink to a directory (all other cases have parity Windows <-> Posix)
if os.path.islink(dst) and os.path.isdir(os.path.realpath(dst)):
if os.path.samefile(src, dst):
# src and dst are the same
# do nothing and exit early
return
# If dst exists and is a symlink to a directory
# we need to remove dst and then perform rename/replace
# this is safe to do as there's no chance src == dst now
os.remove(dst)
os.replace(src, dst)
@system_path_filter
def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists
# os.replace is the same as os.rename on POSIX and is MoveFileExW w/
# the MOVEFILE_REPLACE_EXISTING flag on Windows
# Windows invocation is abstracted behind additonal logic handling
# remaining cases of divergent behavior accross platforms
if sys.platform == "win32":
_win_rename(src, dst)
else:
os.replace(src, dst)
# Windows path existence checks will sometimes fail on junctions/links/symlinks
# so check for that case
if os.path.exists(dst) or islink(dst):
os.remove(dst)
os.rename(src, dst)
@system_path_filter
@@ -254,6 +237,16 @@ def _get_mime_type():
return file_command("-b", "-h", "--mime-type")
@memoized
def _get_mime_type_compressed():
"""Same as _get_mime_type but attempts to check for
compression first
"""
mime_uncompressed = _get_mime_type()
mime_uncompressed.add_default_arg("-Z")
return mime_uncompressed
def mime_type(filename):
"""Returns the mime type and subtype of a file.
@@ -269,6 +262,21 @@ def mime_type(filename):
return type, subtype
def compressed_mime_type(filename):
"""Same as mime_type but checks for type that has been compressed
Args:
filename (str): file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -300,6 +308,13 @@ def paths_containing_libs(paths, library_names):
return rpaths_to_include
@system_path_filter
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file(
regex: str,
repl: Union[str, Callable[[Match], str]],
@@ -894,6 +909,17 @@ def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
@system_path_filter
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable("file")
file.add_default_env("LC_ALL", "C")
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
return output.strip()
def has_shebang(path):
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
try:
@@ -1143,6 +1169,20 @@ def write_tmp_and_move(filename):
shutil.move(tmp, filename)
@contextmanager
@system_path_filter
def open_if_filename(str_or_file, mode="r"):
"""Takes either a path or a file object, and opens it if it is a path.
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, str):
with open(str_or_file, mode) as f:
yield f
else:
yield str_or_file
@system_path_filter
def touch(path):
"""Creates an empty file at the specified path."""
@@ -1234,12 +1274,10 @@ def windows_sfn(path: os.PathLike):
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Method with null values returns size of short path name
sz = k32.GetShortPathNameW(path, None, 0)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * sz
TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ctypes.byref(ret_str), sz)
k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value
@@ -1257,6 +1295,19 @@ def temp_cwd():
shutil.rmtree(tmp_dir, **kwargs)
@contextmanager
@system_path_filter
def temp_rename(orig_path, temp_path):
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
if not same_path:
shutil.move(orig_path, temp_path)
try:
yield
finally:
if not same_path:
shutil.move(temp_path, orig_path)
@system_path_filter
def can_access(file_name):
"""True if we have read/write access to the file."""

View File

@@ -98,6 +98,36 @@ def caller_locals():
del stack
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
try:
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
return base_name
def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
def attr_setdefault(obj, name, value):
"""Like dict.setdefault, but for objects."""
if not hasattr(obj, name):
@@ -483,6 +513,42 @@ def copy(self):
return clone
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
if elt[3] == function_name:
return True
return False
finally:
del stack
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
def match_predicate(*args):
"""Utility function for making string matching predicates.
@@ -698,6 +764,11 @@ def pretty_seconds(seconds):
return pretty_seconds_formatter(seconds)(seconds)
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
class ObjectWrapper:
"""Base class that wraps an object. Derived classes can add new behavior
while staying undercover.
@@ -772,30 +843,6 @@ def __repr__(self):
return repr(self.instance)
def get_entry_points(*, group: str):
"""Wrapper for ``importlib.metadata.entry_points``
Args:
group: entry points to select
Returns:
EntryPoints for ``group`` or empty list if unsupported
"""
try:
import importlib.metadata # type: ignore # novermin
except ImportError:
return []
try:
return importlib.metadata.entry_points(group=group)
except TypeError:
# Prior to Python 3.10, entry_points accepted no parameters and always
# returned a dictionary of entry points, keyed by group. See
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
return importlib.metadata.entry_points().get(group, [])
def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file.
@@ -864,6 +911,25 @@ def uniq(sequence):
return uniq_list
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
class Devnull:
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::

View File

@@ -815,6 +815,10 @@ def __init__(self, path):
super().__init__(msg)
class LockLimitError(LockError):
"""Raised when exceed maximum attempts to acquire a lock."""
class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out."""

View File

@@ -44,6 +44,10 @@ def is_debug(level=1):
return _debug >= level
def is_stacktrace():
return _stacktrace
def set_debug(level=0):
global _debug
assert level >= 0, "Debug level must be a positive value"
@@ -248,6 +252,37 @@ def die(message, *args, **kwargs) -> NoReturn:
sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
elif abort is not None:
prompt += " (%s to abort) " % abort
number = None
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None)

View File

@@ -17,6 +17,7 @@
import tarfile
import tempfile
import time
import traceback
import urllib.error
import urllib.parse
import urllib.request
@@ -110,6 +111,10 @@ def __init__(self, errors):
super().__init__(self.message)
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class BinaryCacheIndex:
"""
The BinaryCacheIndex tracks what specs are available on (usually remote)
@@ -536,6 +541,83 @@ def binary_index_location():
BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
def compute_hash(data):
if isinstance(data, str):
data = data.encode("utf-8")
@@ -910,10 +992,15 @@ def url_read_method(url):
if entry.endswith("spec.json") or entry.endswith("spec.json.sig")
]
read_fn = url_read_method
except KeyError as inst:
msg = "No packages at {0}: {1}".format(cache_prefix, inst)
tty.warn(msg)
except Exception as err:
# If we got some kind of S3 (access denied or other connection error), the first non
# boto-specific class in the exception is Exception. Just print a warning and return
tty.warn(f"Encountered problem listing packages at {cache_prefix}: {err}")
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing packages at {0}: {1}".format(cache_prefix, err)
tty.warn(msg)
return file_list, read_fn
@@ -960,10 +1047,11 @@ def generate_package_index(cache_prefix, concurrency=32):
"""
try:
file_list, read_fn = _spec_files_from_cache(cache_prefix)
except ListMirrorSpecsError as e:
raise GenerateIndexError(f"Unable to generate package index: {e}") from e
except ListMirrorSpecsError as err:
tty.error("Unable to generate package index, {0}".format(err))
return
tty.debug(f"Retrieving spec descriptor files from {cache_prefix} to build index")
tty.debug("Retrieving spec descriptor files from {0} to build index".format(cache_prefix))
tmpdir = tempfile.mkdtemp()
@@ -973,22 +1061,27 @@ def generate_package_index(cache_prefix, concurrency=32):
try:
_read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
except Exception as e:
raise GenerateIndexError(
f"Encountered problem pushing package index to {cache_prefix}: {e}"
) from e
except Exception as err:
msg = "Encountered problem pushing package index to {0}: {1}".format(cache_prefix, err)
tty.warn(msg)
tty.debug("\n" + traceback.format_exc())
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
shutil.rmtree(tmpdir)
def generate_key_index(key_prefix, tmpdir=None):
"""Create the key index page.
Creates (or replaces) the "index.json" page at the location given in key_prefix. This page
contains an entry for each key (.pub) under key_prefix.
Creates (or replaces) the "index.json" page at the location given in
key_prefix. This page contains an entry for each key (.pub) under
key_prefix.
"""
tty.debug(f"Retrieving key.pub files from {url_util.format(key_prefix)} to build key index")
tty.debug(
" ".join(
("Retrieving key.pub files from", url_util.format(key_prefix), "to build key index")
)
)
try:
fingerprints = (
@@ -996,8 +1089,17 @@ def generate_key_index(key_prefix, tmpdir=None):
for entry in web_util.list_url(key_prefix, recursive=False)
if entry.endswith(".pub")
)
except Exception as e:
raise CannotListKeys(f"Encountered problem listing keys at {key_prefix}: {e}") from e
except KeyError as inst:
msg = "No keys at {0}: {1}".format(key_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing keys at {0}: {1}".format(key_prefix, err)
tty.warn(msg)
return
remove_tmpdir = False
@@ -1022,13 +1124,12 @@ def generate_key_index(key_prefix, tmpdir=None):
keep_original=False,
extra_args={"ContentType": "application/json"},
)
except Exception as e:
raise GenerateIndexError(
f"Encountered problem pushing key index to {key_prefix}: {e}"
) from e
except Exception as err:
msg = "Encountered problem pushing key index to {0}: {1}".format(key_prefix, err)
tty.warn(msg)
finally:
if remove_tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
shutil.rmtree(tmpdir)
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
@@ -1099,8 +1200,7 @@ def push_or_raise(spec: Spec, out_url: str, options: PushOptions):
used at the mirror (following <tarball_directory_name>).
This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or
spec.json file already exist in the buildcache. It raises :py:class:`PushToBuildCacheError`
when the tarball or spec.json file cannot be pushed to the buildcache.
spec.json file already exist in the buildcache.
"""
if not spec.concrete:
raise ValueError("spec must be concrete to build tarball")
@@ -1178,18 +1278,13 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
key = select_signing_key(options.key)
sign_specfile(key, options.force, specfile_path)
try:
# push tarball and signed spec json to remote mirror
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
signed_specfile_path if not options.unsigned else specfile_path,
remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
keep_original=False,
)
except Exception as e:
raise PushToBuildCacheError(
f"Encountered problem pushing binary {remote_spackfile_path}: {e}"
) from e
# push tarball and signed spec json to remote mirror
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
signed_specfile_path if not options.unsigned else specfile_path,
remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
keep_original=False,
)
# push the key to the build cache's _pgp directory so it can be
# imported
@@ -1201,6 +1296,8 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
if options.regenerate_index:
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
return None
class NotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged."""
@@ -1255,6 +1352,28 @@ def specs_to_be_packaged(
return [s for s in itertools.chain(roots, deps) if not s.external]
def push(spec: Spec, mirror_url: str, options: PushOptions):
"""Create and push binary package for a single spec to the specified
mirror url.
Args:
spec: Spec to package and push
mirror_url: Desired destination url for binary package
options:
Returns:
True if package was pushed, False otherwise.
"""
try:
push_or_raise(spec, mirror_url, options)
except NoOverwriteException as e:
warnings.warn(str(e))
return False
return True
def try_verify(specfile_path):
"""Utility function to attempt to verify a local file. Assumes the
file is a clearsigned signature file.
@@ -2587,96 +2706,3 @@ def conditional_fetch(self) -> FetchIndexResult:
raise FetchIndexError(f"Remote index {url_manifest} is invalid")
return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False)
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class GenerateIndexError(spack.error.SpackError):
"""Raised when unable to generate key or package index for mirror"""
class CannotListKeys(GenerateIndexError):
"""Raised when unable to list keys when generating key index"""
class PushToBuildCacheError(spack.error.SpackError):
"""Raised when unable to push objects to binary mirror"""

View File

@@ -213,6 +213,9 @@ def _root_spec(spec_str: str) -> str:
platform = str(spack.platforms.host())
if platform == "darwin":
spec_str += " %apple-clang"
elif platform == "windows":
# TODO (johnwparent): Remove version constraint when clingo patch is up
spec_str += " %msvc@:19.37"
elif platform == "linux":
spec_str += " %gcc"
elif platform == "freebsd":

View File

@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
mixed_toolchain=sys.platform == "darwin"
)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers)
spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
@contextlib.contextmanager

View File

@@ -559,49 +559,12 @@ def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
)
def ensure_winsdk_external_or_raise() -> None:
"""Ensure the Windows SDK + WGL are available on system
If both of these package are found, the Spack user or bootstrap
configuration (depending on where Spack is running)
will be updated to include all versions and variants detected.
If either the WDK or WSDK are not found, this method will raise
a RuntimeError.
**NOTE:** This modifies the Spack config in the current scope,
either user or environment depending on the calling context.
This is different from all other current bootstrap dependency
checks.
"""
if set(["win-sdk", "wgl"]).issubset(spack.config.get("packages").keys()):
return
externals = spack.detection.by_path(["win-sdk", "wgl"])
if not set(["win-sdk", "wgl"]) == externals.keys():
missing_packages_lst = []
if "wgl" not in externals:
missing_packages_lst.append("wgl")
if "win-sdk" not in externals:
missing_packages_lst.append("win-sdk")
missing_packages = " & ".join(missing_packages_lst)
raise RuntimeError(
f"Unable to find the {missing_packages}, please install these packages\
via the Visual Studio installer\
before proceeding with Spack or provide the path to a non standard install via\
'spack external find --path'"
)
# wgl/sdk are not required for bootstrapping Spack, but
# are required for building anything non trivial
# add to user config so they can be used by subsequent Spack ops
spack.detection.update_configuration(externals, buildable=False)
def ensure_core_dependencies() -> None:
"""Ensure the presence of all the core dependencies."""
if sys.platform.lower() == "linux":
ensure_patchelf_in_path_or_raise()
if not IS_WINDOWS:
ensure_gpg_in_path_or_raise()
else:
ensure_winsdk_external_or_raise()
ensure_clingo_importable_or_raise()

View File

@@ -57,10 +57,8 @@
import spack.build_systems.meson
import spack.build_systems.python
import spack.builder
import spack.compilers
import spack.config
import spack.deptypes as dt
import spack.error
import spack.main
import spack.package_base
import spack.paths
@@ -68,7 +66,6 @@
import spack.repo
import spack.schema.environment
import spack.spec
import spack.stage
import spack.store
import spack.subprocess_context
import spack.user_environment
@@ -81,7 +78,7 @@
from spack.installer import InstallError
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import (
SYSTEM_DIR_CASE_ENTRY,
SYSTEM_DIRS,
EnvironmentModifications,
env_flag,
filter_system_paths,
@@ -104,13 +101,9 @@
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = "SPACK_ENV_PATH"
SPACK_MANAGED_DIRS = "SPACK_MANAGED_DIRS"
SPACK_INCLUDE_DIRS = "SPACK_INCLUDE_DIRS"
SPACK_LINK_DIRS = "SPACK_LINK_DIRS"
SPACK_RPATH_DIRS = "SPACK_RPATH_DIRS"
SPACK_STORE_INCLUDE_DIRS = "SPACK_STORE_INCLUDE_DIRS"
SPACK_STORE_LINK_DIRS = "SPACK_STORE_LINK_DIRS"
SPACK_STORE_RPATH_DIRS = "SPACK_STORE_RPATH_DIRS"
SPACK_RPATH_DEPS = "SPACK_RPATH_DEPS"
SPACK_LINK_DEPS = "SPACK_LINK_DEPS"
SPACK_PREFIX = "SPACK_PREFIX"
@@ -423,7 +416,7 @@ def set_compiler_environment_variables(pkg, env):
env.set("SPACK_COMPILER_SPEC", str(spec.compiler))
env.set("SPACK_SYSTEM_DIRS", SYSTEM_DIR_CASE_ENTRY)
env.set("SPACK_SYSTEM_DIRS", ":".join(SYSTEM_DIRS))
compiler.setup_custom_environment(pkg, env)
@@ -551,23 +544,9 @@ def update_compiler_args_for_dep(dep):
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
spack_managed_dirs: List[str] = [
spack.stage.get_stage_root(),
spack.store.STORE.db.root,
*(db.root for db in spack.store.STORE.db.upstream_dbs),
]
env.set(SPACK_MANAGED_DIRS, "|".join(f'"{p}/"*' for p in spack_managed_dirs))
is_spack_managed = lambda p: any(p.startswith(store) for store in spack_managed_dirs)
link_dirs_spack, link_dirs_system = stable_partition(link_dirs, is_spack_managed)
include_dirs_spack, include_dirs_system = stable_partition(include_dirs, is_spack_managed)
rpath_dirs_spack, rpath_dirs_system = stable_partition(rpath_dirs, is_spack_managed)
env.set(SPACK_LINK_DIRS, ":".join(link_dirs_system))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs_system))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs_system))
env.set(SPACK_STORE_LINK_DIRS, ":".join(link_dirs_spack))
env.set(SPACK_STORE_INCLUDE_DIRS, ":".join(include_dirs_spack))
env.set(SPACK_STORE_RPATH_DIRS, ":".join(rpath_dirs_spack))
env.set(SPACK_LINK_DIRS, ":".join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
def set_package_py_globals(pkg, context: Context = Context.BUILD):
@@ -604,22 +583,10 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
# Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build)
link_dir = spack.paths.build_env_path
pkg_compiler = None
try:
pkg_compiler = pkg.compiler
except spack.compilers.NoCompilerForSpecError as e:
tty.debug(f"cannot set 'spack_cc': {str(e)}")
if pkg_compiler is not None:
module.spack_cc = os.path.join(link_dir, pkg_compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg_compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg_compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg_compiler.link_paths["fc"])
else:
module.spack_cc = None
module.spack_cxx = None
module.spack_f77 = None
module.spack_fc = None
module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
# Useful directories within the prefix are encapsulated in
# a Prefix object.
@@ -822,7 +789,7 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
for mod in ["cray-mpich", "cray-libsci"]:
module("unload", mod)
if target and target.module_name:
if target.module_name:
load_module(target.module_name)
load_external_modules(pkg)

View File

@@ -434,6 +434,11 @@ def _do_patch_libtool(self):
r"crtendS\.o",
]:
x.filter(regex=(rehead + o), repl="")
elif self.pkg.compiler.name == "dpcpp":
# Hack to filter out spurious predep_objects when building with Intel dpcpp
# (see https://github.com/spack/spack/issues/32863):
x.filter(regex=r"^(predep_objects=.*)/tmp/conftest-[0-9A-Fa-f]+\.o", repl=r"\1")
x.filter(regex=r"^(predep_objects=.*)/tmp/a-[0-9A-Fa-f]+\.o", repl=r"\1")
elif self.pkg.compiler.name == "nag":
for tag in ["fc", "f77"]:
marker = markers[tag]
@@ -536,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
if os.path.exists(self.configure_abs_path):
return
# Else try to regenerate it, which requires a few build dependencies
# Else try to regenerate it, which reuquires a few build dependencies
ensure_build_dependencies_or_raise(
spec=spec,
dependencies=["autoconf", "automake", "libtool"],

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections.abc
import os
import re
from typing import Tuple
import llnl.util.filesystem as fs
@@ -16,12 +15,6 @@
from .cmake import CMakeBuilder, CMakePackage
def spec_uses_toolchain(spec):
gcc_toolchain_regex = re.compile(".*gcc-toolchain.*")
using_toolchain = list(filter(gcc_toolchain_regex.match, spec.compiler_flags["cxxflags"]))
return using_toolchain
def cmake_cache_path(name, value, comment="", force=False):
"""Generate a string for a cmake cache variable"""
force_str = " FORCE" if force else ""
@@ -220,7 +213,7 @@ def initconfig_mpi_entries(self):
else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if spec["cmake"].satisfies("@3.10:"):
if self.pkg.spec["cmake"].satisfies("@3.10:"):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
@@ -255,17 +248,12 @@ def initconfig_hardware_entries(self):
# Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
# CUDA_FLAGS
cuda_flags = []
if not spec.satisfies("cuda_arch=none"):
cuda_archs = ";".join(spec.variants["cuda_arch"].value)
entries.append(cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", cuda_archs))
if spec_uses_toolchain(spec):
cuda_flags.append("-Xcompiler {}".format(spec_uses_toolchain(spec)[0]))
entries.append(cmake_cache_string("CMAKE_CUDA_FLAGS", " ".join(cuda_flags)))
archs = spec.variants["cuda_arch"].value
if archs[0] != "none":
arch_str = ";".join(archs)
entries.append(
cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", "{0}".format(arch_str))
)
if "+rocm" in spec:
entries.append("#------------------{0}".format("-" * 30))
@@ -274,6 +262,9 @@ def initconfig_hardware_entries(self):
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
entries.append(
cmake_cache_path("HIP_CXX_COMPILER", "{0}".format(self.spec["hip"].hipcc))
)
llvm_bin = spec["llvm-amdgpu"].prefix.bin
llvm_prefix = spec["llvm-amdgpu"].prefix
# Some ROCm systems seem to point to /<path>/rocm-<ver>/ and
@@ -286,9 +277,11 @@ def initconfig_hardware_entries(self):
archs = self.spec.variants["amdgpu_target"].value
if archs[0] != "none":
arch_str = ";".join(archs)
entries.append(cmake_cache_string("CMAKE_HIP_ARCHITECTURES", arch_str))
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str))
entries.append(cmake_cache_string("GPU_TARGETS", arch_str))
entries.append(
cmake_cache_string("CMAKE_HIP_ARCHITECTURES", "{0}".format(arch_str))
)
entries.append(cmake_cache_string("AMDGPU_TARGETS", "{0}".format(arch_str)))
entries.append(cmake_cache_string("GPU_TARGETS", "{0}".format(arch_str)))
return entries

View File

@@ -16,7 +16,7 @@
class CargoPackage(spack.package_base.PackageBase):
"""Specialized class for packages built using cargo."""
"""Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -21,7 +21,7 @@
class MakefilePackage(spack.package_base.PackageBase):
"""Specialized class for packages built using Makefiles."""
"""Specialized class for packages built using a Makefiles."""
#: This attribute is used in UI queries that need to know the build
#: system base class

View File

@@ -14,7 +14,7 @@
from llnl.util.link_tree import LinkTree
from spack.build_environment import dso_suffix
from spack.directives import conflicts, license, variant
from spack.directives import conflicts, variant
from spack.package_base import InstallError
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
@@ -26,7 +26,6 @@ class IntelOneApiPackage(Package):
"""Base class for Intel oneAPI packages."""
homepage = "https://software.intel.com/oneapi"
license("https://intel.ly/393CijO")
# oneAPI license does not allow mirroring outside of the
# organization (e.g. University/Company).

View File

@@ -4,15 +4,12 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
from typing import Iterable
from llnl.util.filesystem import filter_file, find
from llnl.util.lang import memoized
from llnl.util.filesystem import filter_file
import spack.builder
import spack.package_base
from spack.directives import build_system, extends
from spack.install_test import SkipTest, test_part
from spack.util.executable import Executable
from ._checks import BaseBuilder, execute_build_time_tests
@@ -31,58 +28,6 @@ class PerlPackage(spack.package_base.PackageBase):
extends("perl", when="build_system=perl")
@property
@memoized
def _platform_dir(self):
"""Name of platform-specific module subdirectory."""
perl = self.spec["perl"].command
options = "-E", "use Config; say $Config{archname}"
out = perl(*options, output=str.split, error=str.split)
return out.strip()
@property
def use_modules(self) -> Iterable[str]:
"""Names of the package's perl modules."""
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
# Drop the platform directory, if present
if self._platform_dir:
platform_dir = self._platform_dir + os.sep
module_files = [m.replace(platform_dir, "") for m in module_files]
# Drop the extension and library path
prefix = self.prefix.lib + os.sep
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
# Drop the perl subdirectory as well
return ["::".join(m.split(os.sep)[1:]) for m in modules]
@property
def skip_modules(self) -> Iterable[str]:
"""Names of modules that should be skipped when running tests.
These are a subset of use_modules.
Returns:
List of strings of module names.
"""
return []
def test_use(self):
"""Test 'use module'"""
if not self.use_modules:
raise SkipTest("Test requires use_modules package property.")
perl = self.spec["perl"].command
for module in self.use_modules:
if module in self.skip_modules:
continue
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
options = ["-we", f'use strict; use {module}; print("OK\n")']
out = perl(*options, output=str.split, error=str.split)
assert "OK" in out
@spack.builder.builder("perl")
class PerlBuilder(BaseBuilder):
@@ -107,7 +52,7 @@ class PerlBuilder(BaseBuilder):
phases = ("configure", "build", "install")
#: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "check", "test_use")
legacy_methods = ("configure_args", "check")
#: Names associated with package attributes in the old build-system format
legacy_attributes = ()

View File

@@ -27,7 +27,7 @@
import spack.package_base
import spack.spec
import spack.store
from spack.directives import build_system, depends_on, extends
from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import test_part
from spack.spec import Spec
@@ -56,6 +56,8 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart")
@property
def import_modules(self) -> Iterable[str]:
"""Names of modules that the Python package provides.

View File

@@ -75,8 +75,6 @@
# does not like its directory structure.
#
import os
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.package_base import PackageBase
@@ -156,32 +154,6 @@ def hip_flags(amdgpu_target):
archs = ",".join(amdgpu_target)
return "--amdgpu-target={0}".format(archs)
# ASAN
@staticmethod
def asan_on(env, llvm_path):
env.set("CC", llvm_path + "/bin/clang")
env.set("CXX", llvm_path + "/bin/clang++")
env.set("ASAN_OPTIONS", "detect_leaks=0")
for root, dirs, files in os.walk(llvm_path):
if "libclang_rt.asan-x86_64.so" in files:
asan_lib_path = root
env.prepend_path("LD_LIBRARY_PATH", asan_lib_path)
SET_DWARF_VERSION_4 = ""
try:
# This will throw an error if imported on a non-Linux platform.
import distro
distname = distro.id()
except ImportError:
distname = "unknown"
if "rhel" in distname or "sles" in distname:
SET_DWARF_VERSION_4 = "-gdwarf-5"
env.set("CFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("CXXFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("LDFLAGS", "-Wl,--enable-new-dtags -fuse-ld=lld -fsanitize=address -g -Wl,")
# HIP version vs Architecture
# TODO: add a bunch of lines like:

View File

@@ -9,8 +9,6 @@
import inspect
from typing import List, Optional, Tuple
from llnl.util import lang
import spack.build_environment
#: Builder classes, as registered by the "builder" decorator
@@ -233,27 +231,24 @@ def __new__(mcs, name, bases, attr_dict):
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
staged_callbacks = temporary_stage.callbacks
# Here we have an adapter from an old-style package. This means there is no
# hierarchy of builders, and every callback that had to be combined between
# *Package and *Builder has been combined already by _PackageAdapterMeta
if name == "Adapter":
# We don't have callbacks in this class, move on
if not staged_callbacks:
continue
# If we are here we have callbacks. To get a complete list, we accumulate all the
# callbacks from base classes, we deduplicate them, then prepend what we have
# registered here.
# If we are here we have callbacks. To get a complete list, get first what
# was attached to parent classes, then prepend what we have registered here.
#
# The order should be:
# 1. Callbacks are registered in order within the same class
# 2. Callbacks defined in derived classes precede those defined in base
# classes
callbacks_from_base = []
for base in bases:
current_callbacks = getattr(base, temporary_stage.attribute_name, None)
if not current_callbacks:
continue
callbacks_from_base.extend(current_callbacks)
callbacks_from_base = list(lang.dedupe(callbacks_from_base))
callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
if callbacks_from_base:
break
else:
callbacks_from_base = []
# Set the callbacks in this class and flush the temporary stage
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
del temporary_stage.callbacks[:]

File diff suppressed because it is too large Load Diff

View File

@@ -334,7 +334,8 @@ def display_specs(specs, args=None, **kwargs):
variants (bool): Show variants with specs
indent (int): indent each line this much
groups (bool): display specs grouped by arch/compiler (default True)
decorator (typing.Callable): function to call to decorate specs
decorators (dict): dictionary mappng specs to decorators
header_callback (typing.Callable): called at start of arch/compiler groups
all_headers (bool): show headers even when arch/compiler aren't defined
output (typing.IO): A file object to write to. Default is ``sys.stdout``
@@ -383,13 +384,15 @@ def get_arg(name, default=None):
vfmt = "{variants}" if variants else ""
format_string = nfmt + "{@version}" + ffmt + vfmt
transform = {"package": decorator, "fullpackage": decorator}
def fmt(s, depth=0):
"""Formatter function for all output specs"""
string = ""
if hashes:
string += gray_hash(s, hlen) + " "
string += depth * " "
string += decorator(s, s.cformat(format_string))
string += s.cformat(format_string, transform=transform)
return string
def format_list(specs):
@@ -448,7 +451,7 @@ def filter_loaded_specs(specs):
return [x for x in specs if x.dag_hash() in hashes]
def print_how_many_pkgs(specs, pkg_type="", suffix=""):
def print_how_many_pkgs(specs, pkg_type=""):
"""Given a list of specs, this will print a message about how many
specs are in that list.
@@ -459,7 +462,7 @@ def print_how_many_pkgs(specs, pkg_type="", suffix=""):
category, e.g. if pkg_type is "installed" then the message
would be "3 installed packages"
"""
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package") + suffix)
tty.msg("%s" % llnl.string.plural(len(specs), pkg_type + " package"))
def spack_is_git_repo():

View File

@@ -275,37 +275,23 @@ def setup_parser(subparser: argparse.ArgumentParser):
# Sync buildcache entries from one mirror to another
sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
sync_manifest_source = sync.add_argument_group(
"Manifest Source",
"Specify a list of build cache objects to sync using manifest file(s)."
'This option takes the place of the "source mirror" for synchronization'
'and optionally takes a "destination mirror" ',
sync.add_argument(
"--manifest-glob", help="a quoted glob pattern identifying copy manifest files"
)
sync_manifest_source.add_argument(
"--manifest-glob", help="a quoted glob pattern identifying CI rebuild manifest files"
)
sync_source_mirror = sync.add_argument_group(
"Named Source",
"Specify a single registered source mirror to synchronize from. This option requires"
"the specification of a destination mirror.",
)
sync_source_mirror.add_argument(
sync.add_argument(
"src_mirror",
metavar="source mirror",
nargs="?",
type=arguments.mirror_name_or_url,
nargs="?",
help="source mirror name, path, or URL",
)
sync.add_argument(
"dest_mirror",
metavar="destination mirror",
nargs="?",
type=arguments.mirror_name_or_url,
nargs="?",
help="destination mirror name, path, or URL",
)
sync.set_defaults(func=sync_fn)
# Update buildcache index without copying any additional packages
@@ -1084,17 +1070,7 @@ def sync_fn(args):
requires an active environment in order to know which specs to sync
"""
if args.manifest_glob:
# Passing the args.src_mirror here because it is not possible to
# have the destination be required when specifying a named source
# mirror and optional for the --manifest-glob argument. In the case
# of manifest glob sync, the source mirror positional argument is the
# destination mirror if it is specified. If there are two mirrors
# specified, the second is ignored and the first is the override
# destination.
if args.dest_mirror:
tty.warn(f"Ignoring unused arguemnt: {args.dest_mirror.name}")
manifest_copy(glob.glob(args.manifest_glob), args.src_mirror)
manifest_copy(glob.glob(args.manifest_glob))
return 0
if args.src_mirror is None or args.dest_mirror is None:
@@ -1145,7 +1121,7 @@ def sync_fn(args):
shutil.rmtree(tmpdir)
def manifest_copy(manifest_file_list, dest_mirror=None):
def manifest_copy(manifest_file_list):
"""Read manifest files containing information about specific specs to copy
from source to destination, remove duplicates since any binary packge for
a given hash should be the same as any other, and copy all files specified
@@ -1159,17 +1135,10 @@ def manifest_copy(manifest_file_list, dest_mirror=None):
# Last duplicate hash wins
deduped_manifest[spec_hash] = copy_list
build_cache_dir = bindist.build_cache_relative_path()
for spec_hash, copy_list in deduped_manifest.items():
for copy_file in copy_list:
dest = copy_file["dest"]
if dest_mirror:
src_relative_path = os.path.join(
build_cache_dir, copy_file["src"].rsplit(build_cache_dir, 1)[1].lstrip("/")
)
dest = url_util.join(dest_mirror.push_url, src_relative_path)
tty.debug("copying {0} to {1}".format(copy_file["src"], dest))
copy_buildcache_file(copy_file["src"], dest)
tty.debug("copying {0} to {1}".format(copy_file["src"], copy_file["dest"]))
copy_buildcache_file(copy_file["src"], copy_file["dest"])
def update_index(mirror: spack.mirror.Mirror, update_keys=False):
@@ -1196,18 +1165,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path()
)
try:
bindist.generate_key_index(keys_url)
except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors.
tty.warn(f"did not update the key index: {e}")
bindist.generate_key_index(keys_url)
def update_index_fn(args):
"""update a buildcache index"""
return update_index(args.mirror, update_keys=args.keys)
update_index(args.mirror, update_keys=args.keys)
def buildcache(parser, args):
return args.func(args)
if args.func:
args.func(args)

View File

@@ -183,7 +183,7 @@ def checksum(parser, args):
print()
if args.add_to_package:
add_versions_to_package(pkg, version_lines, args.batch)
add_versions_to_package(pkg, version_lines)
def print_checksum_status(pkg: PackageBase, version_hashes: dict):
@@ -229,7 +229,7 @@ def print_checksum_status(pkg: PackageBase, version_hashes: dict):
tty.die("Invalid checksums found.")
def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool):
def add_versions_to_package(pkg: PackageBase, version_lines: str):
"""
Add checksumed versions to a package's instructions and open a user's
editor so they may double check the work of the function.
@@ -282,5 +282,5 @@ def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool
tty.msg(f"Added {num_versions_added} new versions to {pkg.name}")
tty.msg(f"Open {filename} to review the additions.")
if sys.stdout.isatty() and not is_batch:
if sys.stdout.isatty():
editor(filename)

View File

@@ -14,7 +14,6 @@
import spack.binary_distribution as bindist
import spack.ci as spack_ci
import spack.cmd
import spack.cmd.buildcache as buildcache
import spack.config as cfg
import spack.environment as ev
@@ -33,7 +32,6 @@
SPACK_COMMAND = "spack"
MAKE_COMMAND = "make"
INSTALL_FAIL_CODE = 1
FAILED_CREATE_BUILDCACHE_CODE = 100
def deindent(desc):
@@ -707,9 +705,11 @@ def ci_rebuild(args):
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
cdash_handler.copy_test_results(reports_dir, job_test_dir)
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0:
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
# will result in a non-zero exit code. Pushing is best-effort.
mirror_urls = [buildcache_mirror_url]
# TODO: Remove this block in Spack 0.23
@@ -721,12 +721,13 @@ def ci_rebuild(args):
destination_mirror_urls=mirror_urls,
sign_binaries=spack_ci.can_sign_binaries(),
):
if not result.success:
install_exit_code = FAILED_CREATE_BUILDCACHE_CODE
(tty.msg if result.success else tty.error)(
f'{"Pushed" if result.success else "Failed to push"} '
f'{job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when())} '
f"to {result.url}"
msg = tty.msg if result.success else tty.warn
msg(
"{} {} to {}".format(
"Pushed" if result.success else "Failed to push",
job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
result.url,
)
)
# If this is a develop pipeline, check if the spec that we just built is
@@ -747,22 +748,22 @@ def ci_rebuild(args):
tty.warn(msg.format(broken_spec_path, err))
else:
# If the install did not succeed, print out some instructions on how to reproduce this
# build failure outside of the pipeline environment.
tty.debug("spack install exited non-zero, will not create buildcache")
api_root_url = os.environ.get("CI_API_V4_URL")
ci_project_id = os.environ.get("CI_PROJECT_ID")
ci_job_id = os.environ.get("CI_JOB_ID")
repro_job_url = f"{api_root_url}/projects/{ci_project_id}/jobs/{ci_job_id}/artifacts"
repro_job_url = "{0}/projects/{1}/jobs/{2}/artifacts".format(
api_root_url, ci_project_id, ci_job_id
)
# Control characters cause this to be printed in blue so it stands out
print(
f"""
reproduce_msg = """
\033[34mTo reproduce this build locally, run:
spack ci reproduce-build {repro_job_url} [--working-dir <dir>] [--autostart]
spack ci reproduce-build {0} [--working-dir <dir>] [--autostart]
If this project does not have public pipelines, you will need to first:
@@ -770,9 +771,12 @@ def ci_rebuild(args):
... then follow the printed instructions.\033[0;0m
"""
""".format(
repro_job_url
)
print(reproduce_msg)
rebuild_timer.stop()
try:
with open("install_timers.json", "w") as timelog:

View File

@@ -570,14 +570,6 @@ def add_concretizer_args(subparser):
default=None,
help="reuse installed dependencies only",
)
subgroup.add_argument(
"--deprecated",
action=ConfigSetAction,
dest="config:deprecated",
const=True,
default=None,
help="allow concretizer to select deprecated versions",
)
def add_connection_args(subparser, add_help):

View File

@@ -89,7 +89,7 @@ def compiler_find(args):
paths, scope=None, mixed_toolchain=args.mixed_toolchain
)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope)
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
n = len(new_compilers)
s = "s" if n > 1 else ""

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"])
arguments.add_common_arguments(subparser, ["jobs"])
subparser.add_argument(
"-d",
"--source-path",
@@ -34,6 +34,7 @@ def setup_parser(subparser):
dest="ignore_deps",
help="do not try to install dependencies of requested packages",
)
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"--keep-prefix",
action="store_true",
@@ -62,6 +63,7 @@ def setup_parser(subparser):
choices=["root", "all"],
help="run tests on only root packages or all packages",
)
arguments.add_common_arguments(subparser, ["spec"])
stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument(
@@ -123,6 +125,9 @@ def dev_build(self, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
tests = False
if args.test == "all":
tests = True

View File

@@ -9,7 +9,6 @@
import shutil
import sys
import tempfile
from pathlib import Path
from typing import Optional
import llnl.string as string
@@ -45,7 +44,6 @@
"deactivate",
"create",
["remove", "rm"],
["rename", "mv"],
["list", "ls"],
["status", "st"],
"loads",
@@ -474,82 +472,11 @@ def env_remove(args):
tty.msg(f"Successfully removed environment '{bad_env_name}'")
#
# env rename
#
def env_rename_setup_parser(subparser):
"""rename an existing environment"""
subparser.add_argument(
"mv_from", metavar="from", help="name (or path) of existing environment"
)
subparser.add_argument(
"mv_to", metavar="to", help="new name (or path) for existing environment"
)
subparser.add_argument(
"-d",
"--dir",
action="store_true",
help="the specified arguments correspond to directory paths",
)
subparser.add_argument(
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
)
def env_rename(args):
"""Rename an environment.
This renames a managed environment or moves an anonymous environment.
"""
# Directory option has been specified
if args.dir:
if not ev.is_env_dir(args.mv_from):
tty.die("The specified path does not correspond to a valid spack environment")
from_path = Path(args.mv_from)
if not args.force:
if ev.is_env_dir(args.mv_to):
tty.die(
"The new path corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
if Path(args.mv_to).exists():
tty.die("The new path already exists; specify the --force flag to overwrite it.")
to_path = Path(args.mv_to)
# Name option being used
elif ev.exists(args.mv_from):
from_path = ev.environment.environment_dir_from_name(args.mv_from)
if not args.force and ev.exists(args.mv_to):
tty.die(
"The new name corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
to_path = ev.environment.root(args.mv_to)
# Neither
else:
tty.die("The specified name does not correspond to a managed spack environment")
# Guard against renaming from or to an active environment
active_env = ev.active_environment()
if active_env:
from_env = ev.Environment(from_path)
if from_env.path == active_env.path:
tty.die("Cannot rename active environment")
if to_path == active_env.path:
tty.die(f"{args.mv_to} is an active environment")
shutil.rmtree(to_path, ignore_errors=True)
fs.rename(from_path, to_path)
tty.msg(f"Successfully renamed environment {args.mv_from} to {args.mv_to}")
#
# env list
#
def env_list_setup_parser(subparser):
"""list managed environments"""
"""list available environments"""
def env_list(args):

View File

@@ -18,7 +18,6 @@
import spack.cray_manifest as cray_manifest
import spack.detection
import spack.error
import spack.repo
import spack.util.environment
from spack.cmd.common import arguments
@@ -153,9 +152,9 @@ def external_find(args):
def packages_to_search_for(
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
):
result = list(
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)}
)
result = []
for current_tag in tags:
result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
if names:
# Match both fully qualified and unqualified

View File

@@ -18,7 +18,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"-m",
"--missing",
@@ -28,7 +28,7 @@ def setup_parser(subparser):
subparser.add_argument(
"-D", "--dependencies", action="store_true", help="also fetch all dependencies"
)
arguments.add_concretizer_args(subparser)
arguments.add_common_arguments(subparser, ["specs"])
subparser.epilog = (
"With an active environment, the specs "
"parameter can be omitted. In this case all (uninstalled"
@@ -40,6 +40,9 @@ def fetch(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.specs:
specs = spack.cmd.parse_specs(args.specs, concretize=True)
else:

View File

@@ -3,6 +3,7 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import copy
import sys
import llnl.util.lang
@@ -13,7 +14,6 @@
import spack.cmd as cmd
import spack.environment as ev
import spack.repo
import spack.store
from spack.cmd.common import arguments
from spack.database import InstallStatuses
@@ -69,12 +69,6 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["long", "very_long", "tags", "namespaces"])
subparser.add_argument(
"-r",
"--only-roots",
action="store_true",
help="don't show full list of installed specs in an environment",
)
subparser.add_argument(
"-c",
"--show-concretized",
@@ -146,12 +140,6 @@ def setup_parser(subparser):
subparser.add_argument(
"--only-deprecated", action="store_true", help="show only deprecated packages"
)
subparser.add_argument(
"--install-tree",
action="store",
default="all",
help="Install trees to query: 'all' (default), 'local', 'upstream', upstream name or path",
)
subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]")
subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]")
@@ -180,12 +168,6 @@ def query_arguments(args):
q_args = {"installed": installed, "known": known, "explicit": explicit}
install_tree = args.install_tree
upstreams = spack.config.get("upstreams", {})
if install_tree in upstreams.keys():
install_tree = upstreams[install_tree]["install_tree"]
q_args["install_tree"] = install_tree
# Time window of installation
for attribute in ("start_date", "end_date"):
date = getattr(args, attribute)
@@ -195,22 +177,26 @@ def query_arguments(args):
return q_args
def make_env_decorator(env):
def setup_env(env):
"""Create a function for decorating specs when in an environment."""
roots = set(env.roots())
removed = set(env.removed_specs())
def strip_build(seq):
return set(s.copy(deps=("link", "run")) for s in seq)
added = set(strip_build(env.added_specs()))
roots = set(strip_build(env.roots()))
removed = set(strip_build(env.removed_specs()))
def decorator(spec, fmt):
# add +/-/* to show added/removed/root specs
if any(spec.dag_hash() == r.dag_hash() for r in roots):
return color.colorize(f"@*{{{fmt}}}")
return color.colorize("@*{%s}" % fmt)
elif spec in removed:
return color.colorize(f"@K{{{fmt}}}")
return color.colorize("@K{%s}" % fmt)
else:
return fmt
return "%s" % fmt
return decorator
return decorator, added, roots, removed
def display_env(env, args, decorator, results):
@@ -225,51 +211,28 @@ def display_env(env, args, decorator, results):
"""
tty.msg("In environment %s" % env.name)
num_roots = len(env.user_specs) or "No"
tty.msg(f"{num_roots} root specs")
if not env.user_specs:
tty.msg("No root specs")
else:
tty.msg("Root specs")
concrete_specs = {
root: concrete_root
for root, concrete_root in zip(env.concretized_user_specs, env.concrete_roots())
}
# Root specs cannot be displayed with prefixes, since those are not
# set for abstract specs. Same for hashes
root_args = copy.copy(args)
root_args.paths = False
def root_decorator(spec, string):
"""Decorate root specs with their install status if needed"""
concrete = concrete_specs.get(spec)
if concrete:
status = color.colorize(concrete.install_status().value)
hash = concrete.dag_hash()
else:
status = color.colorize(spack.spec.InstallStatus.absent.value)
hash = "-" * 32
# TODO: status has two extra spaces on the end of it, but fixing this and other spec
# TODO: space format idiosyncrasies is complicated. Fix this eventually
status = status[:-2]
if args.long or args.very_long:
hash = color.colorize(f"@K{{{hash[: 7 if args.long else None]}}}")
return f"{status} {hash} {string}"
else:
return f"{status} {string}"
with spack.store.STORE.db.read_transaction():
# Roots are displayed with variants, etc. so that we can see
# specifically what the user asked for.
cmd.display_specs(
env.user_specs,
args,
# these are overrides of CLI args
paths=False,
long=False,
very_long=False,
# these enforce details in the root specs to show what the user asked for
root_args,
decorator=lambda s, f: color.colorize("@*{%s}" % f),
namespaces=True,
show_flags=True,
show_full_compiler=True,
decorator=root_decorator,
variants=True,
)
print()
print()
if args.show_concretized:
tty.msg("Concretized roots")
@@ -279,7 +242,7 @@ def root_decorator(spec, string):
# Display a header for the installed packages section IF there are installed
# packages. If there aren't any, we'll just end up printing "0 installed packages"
# later.
if results and not args.only_roots:
if results:
tty.msg("Installed packages")
@@ -288,10 +251,9 @@ def find(parser, args):
results = args.specs(**q_args)
env = ev.active_environment()
if not env and args.only_roots:
tty.die("-r / --only-roots requires an active environment")
decorator = make_env_decorator(env) if env else lambda s, f: f
decorator = lambda s, f: f
if env:
decorator, _, roots, _ = setup_env(env)
# use groups by default except with format.
if args.groups is None:
@@ -318,12 +280,9 @@ def find(parser, args):
if env:
display_env(env, args, decorator, results)
count_suffix = " (not shown)"
if not args.only_roots:
cmd.display_specs(results, args, decorator=decorator, all_headers=True)
count_suffix = ""
cmd.display_specs(results, args, decorator=decorator, all_headers=True)
# print number of installed packages last (as the list may be long)
if sys.stdout.isatty() and args.groups:
pkg_type = "loaded" if args.loaded else "installed"
spack.cmd.print_how_many_pkgs(results, pkg_type, suffix=count_suffix)
spack.cmd.print_how_many_pkgs(results, pkg_type)

View File

@@ -176,7 +176,7 @@ def setup_parser(subparser):
dest="install_source",
help="install source files in prefix",
)
arguments.add_common_arguments(subparser, ["no_checksum"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument(
"-v",
"--verbose",
@@ -326,6 +326,9 @@ def install(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.log_file and not args.log_format:
msg = "the '--log-format' must be specified when using '--log-file'"
tty.die(msg)
@@ -420,9 +423,10 @@ def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter_
with reporter_factory(specs_to_install):
env.install_specs(specs_to_install, **install_kwargs)
finally:
if env.views:
with env.write_transaction():
env.write(regenerate=True)
# TODO: this is doing way too much to trigger
# views and modules to be generated.
with env.write_transaction():
env.write(regenerate=True)
def concrete_specs_from_cli(args, install_kwargs):

View File

@@ -5,6 +5,8 @@
import sys
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.find
import spack.environment as ev
@@ -68,6 +70,16 @@ def setup_parser(subparser):
help="load the first match if multiple packages match the spec",
)
subparser.add_argument(
"--only",
default="package,dependencies",
dest="things_to_load",
choices=["package", "dependencies"],
help="select whether to load the package and its dependencies\n\n"
"the default is to load the package and all dependencies. alternatively, "
"one can decide to load only the package or only the dependencies",
)
subparser.add_argument(
"--list",
action="store_true",
@@ -98,6 +110,11 @@ def load(parser, args):
)
return 1
if args.things_to_load != "package,dependencies":
tty.warn(
"The `--only` flag in spack load is deprecated and will be removed in Spack v0.22"
)
with spack.store.STORE.db.read_transaction():
env_mod = uenv.environment_modifications_for_specs(*specs)
for spec in specs:

View File

@@ -53,7 +53,6 @@ def setup_parser(subparser):
"-S", "--stages", action="store_true", help="top level stage directory"
)
directories.add_argument(
"-c",
"--source-dir",
action="store_true",
help="source directory for a spec (requires it to be staged first)",

View File

@@ -28,7 +28,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
@@ -72,7 +72,6 @@ def setup_parser(subparser):
" retrieve all versions of each package",
)
arguments.add_common_arguments(create_parser, ["specs"])
arguments.add_concretizer_args(create_parser)
# Destroy
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
@@ -108,11 +107,6 @@ def setup_parser(subparser):
"and source use `--type binary --type source` (default)"
),
)
add_parser.add_argument(
"--autopush",
action="store_true",
help=("set mirror to push automatically after installation"),
)
add_parser_signed = add_parser.add_mutually_exclusive_group(required=False)
add_parser_signed.add_argument(
"--unsigned",
@@ -180,21 +174,6 @@ def setup_parser(subparser):
),
)
set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'")
set_parser_autopush = set_parser.add_mutually_exclusive_group(required=False)
set_parser_autopush.add_argument(
"--autopush",
help="set mirror to push automatically after installation",
action="store_true",
default=None,
dest="autopush",
)
set_parser_autopush.add_argument(
"--no-autopush",
help="set mirror to not push automatically after installation",
action="store_false",
default=None,
dest="autopush",
)
set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False)
set_parser_unsigned.add_argument(
"--unsigned",
@@ -238,7 +217,6 @@ def mirror_add(args):
or args.type
or args.oci_username
or args.oci_password
or args.autopush
or args.signed is not None
):
connection = {"url": args.url}
@@ -255,8 +233,6 @@ def mirror_add(args):
if args.type:
connection["binary"] = "binary" in args.type
connection["source"] = "source" in args.type
if args.autopush:
connection["autopush"] = args.autopush
if args.signed is not None:
connection["signed"] = args.signed
mirror = spack.mirror.Mirror(connection, name=args.name)
@@ -293,8 +269,6 @@ def _configure_mirror(args):
changes["access_pair"] = [args.oci_username, args.oci_password]
if getattr(args, "signed", None) is not None:
changes["signed"] = args.signed
if getattr(args, "autopush", None) is not None:
changes["autopush"] = args.autopush
# argparse cannot distinguish between --binary and --no-binary when same dest :(
# notice that set-url does not have these args, so getattr
@@ -575,4 +549,7 @@ def mirror(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
action[args.mirror_command](args)

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
arguments.add_concretizer_args(subparser)
@@ -33,6 +33,9 @@ def patch(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
specs = spack.cmd.parse_specs(args.specs, concretize=False)
for spec in specs:
_patch(spack.cmd.matching_spec_from_env(spec).package)

View File

@@ -116,38 +116,39 @@ def ipython_interpreter(args):
def python_interpreter(args):
"""A python interpreter is the default interpreter"""
# Fake a main python shell by setting __name__ to __main__.
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack})
if "PYTHONSTARTUP" in os.environ:
startup_file = os.environ["PYTHONSTARTUP"]
if os.path.isfile(startup_file):
with open(startup_file) as startup:
console.runsource(startup.read(), startup_file, "exec")
if args.python_args and not args.python_command:
if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
elif args.python_args:
propagate_exceptions_from(console)
sys.argv = args.python_args
runpy.run_path(args.python_args[0], run_name="__main__")
with open(args.python_args[0]) as file:
console.runsource(file.read(), args.python_args[0], "exec")
else:
# Fake a main python shell by setting __name__ to __main__.
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack})
if "PYTHONSTARTUP" in os.environ:
startup_file = os.environ["PYTHONSTARTUP"]
if os.path.isfile(startup_file):
with open(startup_file) as startup:
console.runsource(startup.read(), startup_file, "exec")
if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
else:
# Provides readline support, allowing user to use arrow keys
console.push("import readline")
# Provide tabcompletion
console.push("from rlcompleter import Completer")
console.push("readline.set_completer(Completer(locals()).complete)")
console.push('readline.parse_and_bind("tab: complete")')
# Provides readline support, allowing user to use arrow keys
console.push("import readline")
# Provide tabcompletion
console.push("from rlcompleter import Completer")
console.push("readline.set_completer(Completer(locals()).complete)")
console.push('readline.parse_and_bind("tab: complete")')
console.interact(
"Spack version %s\nPython %s, %s %s"
% (
spack.spack_version,
platform.python_version(),
platform.system(),
platform.machine(),
)
console.interact(
"Spack version %s\nPython %s, %s %s"
% (
spack.spack_version,
platform.python_version(),
platform.system(),
platform.machine(),
)
)
def propagate_exceptions_from(console):

View File

@@ -91,6 +91,7 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers)
if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels)

View File

@@ -22,7 +22,7 @@
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
subparser.add_argument(
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
)
@@ -33,6 +33,9 @@ def stage(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if not args.specs:
env = ev.active_environment()
if not env:

View File

@@ -228,7 +228,7 @@ def create_reporter(args, specs_to_test, test_suite):
def test_list(args):
"""list installed packages with available tests"""
tagged = spack.repo.PATH.packages_with_tags(*args.tag) if args.tag else set()
tagged = set(spack.repo.PATH.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class):
tests = spack.install_test.test_functions(pkg_class)

View File

@@ -34,13 +34,6 @@ def setup_parser(subparser):
default=False,
help="show full pytest help, with advanced options",
)
subparser.add_argument(
"-n",
"--numprocesses",
type=int,
default=1,
help="run tests in parallel up to this wide, default 1 for sequential",
)
# extra spack arguments to list tests
list_group = subparser.add_argument_group("listing tests")
@@ -236,16 +229,6 @@ def unit_test(parser, args, unknown_args):
if args.extension:
pytest_root = spack.extensions.load_extension(args.extension)
if args.numprocesses is not None and args.numprocesses > 1:
pytest_args.extend(
[
"--dist",
"loadfile",
"--tx",
f"{args.numprocesses}*popen//python=spack-tmpconfig spack python",
]
)
# pytest.ini lives in the root of the spack repository.
with llnl.util.filesystem.working_dir(pytest_root):
if args.list:

View File

@@ -334,40 +334,6 @@ def __init__(
# used for version checks for API, e.g. C++11 flag
self._real_version = None
def __eq__(self, other):
return (
self.cc == other.cc
and self.cxx == other.cxx
and self.fc == other.fc
and self.f77 == other.f77
and self.spec == other.spec
and self.operating_system == other.operating_system
and self.target == other.target
and self.flags == other.flags
and self.modules == other.modules
and self.environment == other.environment
and self.extra_rpaths == other.extra_rpaths
and self.enable_implicit_rpaths == other.enable_implicit_rpaths
)
def __hash__(self):
return hash(
(
self.cc,
self.cxx,
self.fc,
self.f77,
self.spec,
self.operating_system,
self.target,
str(self.flags),
str(self.modules),
str(self.environment),
str(self.extra_rpaths),
self.enable_implicit_rpaths,
)
)
def verify_executables(self):
"""Raise an error if any of the compiler executables is not valid.
@@ -423,7 +389,8 @@ def implicit_rpaths(self):
# Put CXX first since it has the most linking issues
# And because it has flags that affect linking
link_dirs = self._get_compiler_link_paths()
exe_paths = [x for x in [self.cxx, self.cc, self.fc, self.f77] if x]
link_dirs = self._get_compiler_link_paths(exe_paths)
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
return list(paths_containing_libs(link_dirs, all_required_libs))
@@ -436,33 +403,43 @@ def required_libs(self):
# By default every compiler returns the empty list
return []
def _get_compiler_link_paths(self):
cc = self.cc if self.cc else self.cxx
if not cc or not self.verbose_flag:
# Cannot determine implicit link paths without a compiler / verbose flag
def _get_compiler_link_paths(self, paths):
first_compiler = next((c for c in paths if c), None)
if not first_compiler:
return []
if not self.verbose_flag:
# In this case there is no mechanism to learn what link directories
# are used by the compiler
return []
# What flag types apply to first_compiler, in what order
if cc == self.cc:
flags = ["cflags", "cppflags", "ldflags"]
flags = ["cppflags", "ldflags"]
if first_compiler == self.cc:
flags = ["cflags"] + flags
elif first_compiler == self.cxx:
flags = ["cxxflags"] + flags
else:
flags = ["cxxflags", "cppflags", "ldflags"]
flags.append("fflags")
try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, "main.c")
with open(fin, "w") as csource:
with open(fin, "w+") as csource:
csource.write(
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n"
"int main(int argc, char* argv[]) { " "(void)argc; (void)argv; return 0; }\n"
)
cc_exe = spack.util.executable.Executable(cc)
compiler_exe = spack.util.executable.Executable(first_compiler)
for flag_type in flags:
cc_exe.add_default_arg(*self.flags.get(flag_type, []))
for flag in self.flags.get(flag_type, []):
compiler_exe.add_default_arg(flag)
output = ""
with self.compiler_environment():
output = cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
output = str(
compiler_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
) # str for py2
return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe:
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)

View File

@@ -10,7 +10,6 @@
import itertools
import multiprocessing.pool
import os
import warnings
from typing import Dict, List, Optional, Tuple
import archspec.cpu
@@ -110,128 +109,29 @@ def _to_dict(compiler):
return {"compiler": d}
def get_compiler_config(
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = False,
) -> List[Dict]:
def get_compiler_config(scope=None, init_config=True):
"""Return the compiler configuration for the specified architecture."""
config = configuration.get("compilers", scope=scope) or []
config = spack.config.get("compilers", scope=scope) or []
if config or not init_config:
return config
merged_config = configuration.get("compilers")
merged_config = spack.config.get("compilers")
if merged_config:
# Config is empty for this scope
# Do not init config because there is a non-empty scope
return config
_init_compiler_config(configuration, scope=scope)
config = configuration.get("compilers", scope=scope)
_init_compiler_config(scope=scope)
config = spack.config.get("compilers", scope=scope)
return config
def get_compiler_config_from_packages(
configuration: "spack.config.Configuration", *, scope: Optional[str] = None
) -> List[Dict]:
"""Return the compiler configuration from packages.yaml"""
config = configuration.get("packages", scope=scope)
if not config:
return []
packages = []
compiler_package_names = supported_compilers() + list(package_name_to_compiler_name.keys())
for name, entry in config.items():
if name not in compiler_package_names:
continue
externals_config = entry.get("externals", None)
if not externals_config:
continue
packages.extend(_compiler_config_from_package_config(externals_config))
return packages
def _compiler_config_from_package_config(config):
compilers = []
for entry in config:
compiler = _compiler_config_from_external(entry)
if compiler:
compilers.append(compiler)
return compilers
def _compiler_config_from_external(config):
spec = spack.spec.parse_with_version_concrete(config["spec"])
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
compiler_spec = spack.spec.CompilerSpec(
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
)
extra_attributes = config.get("extra_attributes", {})
prefix = config.get("prefix", None)
compiler_class = class_for_compiler_name(compiler_spec.name)
paths = extra_attributes.get("paths", {})
compiler_langs = ["cc", "cxx", "fc", "f77"]
for lang in compiler_langs:
if paths.setdefault(lang, None):
continue
if not prefix:
continue
# Check for files that satisfy the naming scheme for this compiler
bindir = os.path.join(prefix, "bin")
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
if regex.match(f):
paths[lang] = os.path.join(bindir, f)
if all(v is None for v in paths.values()):
return None
if not spec.architecture:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
target = host_platform.target("default_target").microarchitecture
else:
target = spec.target
if not target:
host_platform = spack.platforms.host()
target = host_platform.target("default_target").microarchitecture
operating_system = spec.os
if not operating_system:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
compiler_entry = {
"compiler": {
"spec": str(compiler_spec),
"paths": paths,
"flags": extra_attributes.get("flags", {}),
"operating_system": str(operating_system),
"target": str(target.family),
"modules": config.get("modules", []),
"environment": extra_attributes.get("environment", {}),
"extra_rpaths": extra_attributes.get("extra_rpaths", []),
"implicit_rpaths": extra_attributes.get("implicit_rpaths", None),
}
}
return compiler_entry
def _init_compiler_config(
configuration: "spack.config.Configuration", *, scope: Optional[str]
) -> None:
def _init_compiler_config(*, scope):
"""Compiler search used when Spack has no compilers."""
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
compilers_dict.append(_to_dict(compiler))
configuration.set("compilers", compilers_dict, scope=scope)
spack.config.set("compilers", compilers_dict, scope=scope)
def compiler_config_files():
@@ -242,22 +142,17 @@ def compiler_config_files():
compiler_config = config.get("compilers", scope=name)
if compiler_config:
config_files.append(config.get_config_filename(name, "compilers"))
compiler_config_from_packages = get_compiler_config_from_packages(config, scope=name)
if compiler_config_from_packages:
config_files.append(config.get_config_filename(name, "packages"))
return config_files
def add_compilers_to_config(compilers, scope=None):
def add_compilers_to_config(compilers, scope=None, init_config=True):
"""Add compilers to the config for the specified architecture.
Arguments:
compilers: a list of Compiler objects.
scope: configuration scope to modify.
"""
compiler_config = get_compiler_config(
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
compiler_config = get_compiler_config(scope, init_config)
for compiler in compilers:
if not compiler.cc:
tty.debug(f"{compiler.spec} does not have a C compiler")
@@ -289,9 +184,6 @@ def remove_compiler_from_config(compiler_spec, scope=None):
for current_scope in candidate_scopes:
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
msg = "`spack compiler remove` will not remove compilers defined in packages.yaml"
msg += "\nTo remove these compilers, either edit the config or use `spack external remove`"
tty.debug(msg)
return removal_happened
@@ -306,9 +198,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
True if one or more compiler entries were actually removed, False otherwise
"""
assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config(
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
compiler_config = get_compiler_config(scope)
filtered_compiler_config = [
compiler_entry
for compiler_entry in compiler_config
@@ -323,36 +213,22 @@ def _remove_compiler_from_scope(compiler_spec, scope):
# We need to preserve the YAML type for comments, hence we are copying the
# items in the list that has just been retrieved
compiler_config[:] = filtered_compiler_config
spack.config.CONFIG.set("compilers", compiler_config, scope=scope)
spack.config.set("compilers", compiler_config, scope=scope)
return True
def all_compilers_config(
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = True,
) -> List["spack.compiler.Compiler"]:
def all_compilers_config(scope=None, init_config=True):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
from_packages_yaml = get_compiler_config_from_packages(configuration, scope=scope)
if from_packages_yaml:
init_config = False
from_compilers_yaml = get_compiler_config(configuration, scope=scope, init_config=init_config)
result = from_compilers_yaml + from_packages_yaml
# Dedupe entries by the compiler they represent
# If the entry is invalid, treat it as unique for deduplication
key = lambda c: _compiler_from_config_entry(c["compiler"] or id(c))
return list(llnl.util.lang.dedupe(result, key=key))
return get_compiler_config(scope, init_config)
def all_compiler_specs(scope=None, init_config=True):
# Return compiler specs from the merged config.
return [
spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True)
for s in all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config)
for s in all_compilers_config(scope, init_config)
]
@@ -512,20 +388,11 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
def all_compilers(scope=None, init_config=True):
return all_compilers_from(
configuration=spack.config.CONFIG, scope=scope, init_config=init_config
)
def all_compilers_from(configuration, scope=None, init_config=True):
compilers = []
for items in all_compilers_config(
configuration=configuration, scope=scope, init_config=init_config
):
config = get_compiler_config(scope, init_config=init_config)
compilers = list()
for items in config:
items = items["compiler"]
compiler = _compiler_from_config_entry(items) # can be None in error case
if compiler:
compilers.append(compiler)
compilers.append(_compiler_from_config_entry(items))
return compilers
@@ -536,7 +403,10 @@ def compilers_for_spec(
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
config = all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config)
if use_cache:
config = all_compilers_config(scope, init_config)
else:
config = get_compiler_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config))
compilers = []
@@ -546,7 +416,7 @@ def compilers_for_spec(
def compilers_for_arch(arch_spec, scope=None):
config = all_compilers_config(spack.config.CONFIG, scope=scope)
config = all_compilers_config(scope)
return list(get_compilers(config, arch_spec=arch_spec))
@@ -632,10 +502,7 @@ def _compiler_from_config_entry(items):
compiler = _compiler_cache.get(config_id, None)
if compiler is None:
try:
compiler = compiler_from_dict(items)
except UnknownCompilerError as e:
warnings.warn(e.message)
compiler = compiler_from_dict(items)
_compiler_cache[config_id] = compiler
return compiler
@@ -688,9 +555,7 @@ def get_compilers(config, cspec=None, arch_spec=None):
raise ValueError(msg)
continue
compiler = _compiler_from_config_entry(items)
if compiler:
compilers.append(compiler)
compilers.append(_compiler_from_config_entry(items))
return compilers
@@ -718,7 +583,9 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
scope_to_compilers = {}
for scope in config.scopes:
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec, scope=scope)
compilers = compilers_for_spec(
compiler_spec, arch_spec=arch_spec, scope=scope, use_cache=False
)
if compilers:
scope_to_compilers[scope] = compilers

View File

@@ -0,0 +1,34 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import spack.compilers.oneapi
class Dpcpp(spack.compilers.oneapi.Oneapi):
"""This is the same as the oneAPI compiler but uses dpcpp instead of
icpx (for DPC++ source files). It explicitly refers to dpcpp, so that
CMake test files which check the compiler name (e.g. CMAKE_CXX_COMPILER)
detect it as dpcpp.
Ideally we could switch out icpx for dpcpp where needed in the oneAPI
compiler definition, but two things are needed for that: (a) a way to
tell the compiler that it should be using dpcpp and (b) a way to
customize the link_paths
See also: https://www.intel.com/content/www/us/en/develop/documentation/oneapi-dpcpp-cpp-compiler-dev-guide-and-reference/top/compiler-setup/using-the-command-line/invoking-the-compiler.html
"""
# Subclasses use possible names of C++ compiler
cxx_names = ["dpcpp"]
# Named wrapper links within build_env_path
link_paths = {
"cc": os.path.join("oneapi", "icx"),
"cxx": os.path.join("oneapi", "dpcpp"),
"f77": os.path.join("oneapi", "ifx"),
"fc": os.path.join("oneapi", "ifx"),
}

View File

@@ -8,9 +8,7 @@
import subprocess
import sys
import tempfile
from typing import Dict, List
import archspec.cpu
from typing import Dict, List, Set
import spack.compiler
import spack.operating_systems.windows_os
@@ -20,7 +18,15 @@
from spack.error import SpackError
from spack.version import Version, VersionRange
FC_PATH: Dict[str, str] = dict()
avail_fc_version: Set[str] = set()
fc_path: Dict[str, str] = dict()
fortran_mapping = {
"2021.3.0": "19.29.30133",
"2021.2.1": "19.28.29913",
"2021.2.0": "19.28.29334",
"2021.1.0": "19.28.29333",
}
class CmdCall:
@@ -107,13 +113,15 @@ def command_str(self):
return f"{script} {self.arch} {self.sdk_ver} {self.vcvars_ver}"
def get_valid_fortran_pth():
"""Assign maximum available fortran compiler version"""
# TODO (johnwparent): validate compatibility w/ try compiler
# functionality when added
def get_valid_fortran_pth(comp_ver):
cl_ver = str(comp_ver)
sort_fn = lambda fc_ver: Version(fc_ver)
sort_fc_ver = sorted(list(FC_PATH.keys()), key=sort_fn)
return FC_PATH[sort_fc_ver[-1]] if sort_fc_ver else None
sort_fc_ver = sorted(list(avail_fc_version), key=sort_fn)
for ver in sort_fc_ver:
if ver in fortran_mapping:
if Version(cl_ver) <= Version(fortran_mapping[ver]):
return fc_path[ver]
return None
class Msvc(Compiler):
@@ -157,9 +165,11 @@ def __init__(self, *args, **kwargs):
# This positional argument "paths" is later parsed and process by the base class
# via the call to `super` later in this method
paths = args[3]
latest_fc = get_valid_fortran_pth()
new_pth = [pth if pth else latest_fc for pth in paths[2:]]
paths[2:] = new_pth
# This positional argument "cspec" is also parsed and handled by the base class
# constructor
cspec = args[0]
new_pth = [pth if pth else get_valid_fortran_pth(cspec.version) for pth in paths]
paths[:] = new_pth
# Initialize, deferring to base class but then adding the vcvarsallfile
# file based on compiler executable path.
super().__init__(*args, **kwargs)
@@ -171,14 +181,11 @@ def __init__(self, *args, **kwargs):
# and stores their path, but their respective VCVARS
# file must be invoked before useage.
env_cmds = []
compiler_root = os.path.join(os.path.dirname(self.cc), "../../../../../..")
compiler_root = os.path.join(self.cc, "../../../../../../..")
vcvars_script_path = os.path.join(compiler_root, "Auxiliary", "Build", "vcvars64.bat")
# get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower()
arch = arch.replace("-", "_")
if str(archspec.cpu.host().family) == "x86_64":
arch = "amd64"
self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version)
env_cmds.append(self.vcvars_call)
# Below is a check for a valid fortran path
@@ -186,34 +193,11 @@ def __init__(self, *args, **kwargs):
# paths[2] refers to the fc path and is a generic check
# for a fortran compiler
if paths[2]:
def get_oneapi_root(pth: str):
"""From within a prefix known to be a oneAPI path
determine the oneAPI root path from arbitrary point
under root
Args:
pth: path prefixed within oneAPI root
"""
if not pth:
return ""
while os.path.basename(pth) and os.path.basename(pth) != "oneAPI":
pth = os.path.dirname(pth)
return pth
# If this found, it sets all the vars
oneapi_root = get_oneapi_root(self.fc)
if not oneapi_root:
raise RuntimeError(f"Non-oneAPI Fortran compiler {self.fc} assigned to MSVC")
oneapi_root = os.getenv("ONEAPI_ROOT")
oneapi_root_setvars = os.path.join(oneapi_root, "setvars.bat")
# some oneAPI exes return a version more precise than their
# install paths specify, so we determine path from
# the install path rather than the fc executable itself
numver = r"\d+\.\d+(?:\.\d+)?"
pattern = f"((?:{numver})|(?:latest))"
version_from_path = re.search(pattern, self.fc).group(1)
oneapi_version_setvars = os.path.join(
oneapi_root, "compiler", version_from_path, "env", "vars.bat"
oneapi_root, "compiler", str(self.ifx_version), "env", "vars.bat"
)
# order matters here, the specific version env must be invoked first,
# otherwise it will be ignored if the root setvars sets up the oneapi
@@ -325,19 +309,23 @@ def setup_custom_environment(self, pkg, env):
@classmethod
def fc_version(cls, fc):
# We're using intel for the Fortran compilers, which exist if
# ONEAPI_ROOT is a meaningful variable
if not sys.platform == "win32":
return "unknown"
fc_ver = cls.default_version(fc)
FC_PATH[fc_ver] = fc
try:
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths
except AttributeError:
raise SpackError(
"Windows compiler search paths not established, "
"please report this behavior to github.com/spack/spack"
)
clp = spack.util.executable.which_string("cl", path=sps)
return cls.default_version(clp) if clp else fc_ver
avail_fc_version.add(fc_ver)
fc_path[fc_ver] = fc
if os.getenv("ONEAPI_ROOT"):
try:
sps = spack.operating_systems.windows_os.WindowsOs.compiler_search_paths
except AttributeError:
raise SpackError("Windows compiler search paths not established")
clp = spack.util.executable.which_string("cl", path=sps)
ver = cls.default_version(clp)
else:
ver = fc_ver
return ver
@classmethod
def f77_version(cls, f77):

View File

@@ -749,6 +749,7 @@ def _concretize_specs_together_new(*abstract_specs, **kwargs):
result = solver.solve(
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
)
result.raise_if_unsat()
return [s.copy() for s in result.specs]

View File

@@ -107,7 +107,7 @@
#: metavar to use for commands that accept scopes
#: this is shorter and more readable than listing all choices
SCOPES_METAVAR = "{defaults,system,site,user,command_line}[/PLATFORM] or env:ENVIRONMENT"
SCOPES_METAVAR = "{defaults,system,site,user}[/PLATFORM] or env:ENVIRONMENT"
#: Base name for the (internal) overrides scope.
_OVERRIDES_BASE_NAME = "overrides-"
@@ -764,31 +764,6 @@ def _add_platform_scope(
cfg.push_scope(scope_type(plat_name, plat_path))
def config_paths_from_entry_points() -> List[Tuple[str, str]]:
"""Load configuration paths from entry points
A python package can register entry point metadata so that Spack can find
its configuration by adding the following to the project's pyproject.toml:
.. code-block:: toml
[project.entry-points."spack.config"]
baz = "baz:get_spack_config_path"
The function ``get_spack_config_path`` returns the path to the package's
spack configuration scope
"""
config_paths: List[Tuple[str, str]] = []
for entry_point in lang.get_entry_points(group="spack.config"):
hook = entry_point.load()
if callable(hook):
config_path = hook()
if config_path and os.path.exists(config_path):
config_paths.append(("plugin-%s" % entry_point.name, str(config_path)))
return config_paths
def _add_command_line_scopes(
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
) -> None:
@@ -841,9 +816,6 @@ def create() -> Configuration:
# No site-level configs should be checked into spack by default.
configuration_paths.append(("site", os.path.join(spack.paths.etc_path)))
# Python package's can register configuration scopes via entry_points
configuration_paths.extend(config_paths_from_entry_points())
# User configuration can override both spack defaults and site config
# This is disabled if user asks for no local configuration.
if not disable_local_config:
@@ -1562,9 +1534,8 @@ def ensure_latest_format_fn(section: str) -> Callable[[YamlConfigDict], bool]:
def use_configuration(
*scopes_or_paths: Union[ConfigScope, str]
) -> Generator[Configuration, None, None]:
"""Use the configuration scopes passed as arguments within the context manager.
This function invalidates caches, and is therefore very slow.
"""Use the configuration scopes passed as arguments within the
context manager.
Args:
*scopes_or_paths: scope objects or paths to be used

View File

@@ -19,6 +19,9 @@
},
"os_package_manager": "dnf",
"build": "spack/fedora38",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/fedora:38"
}
@@ -30,6 +33,9 @@
},
"os_package_manager": "dnf",
"build": "spack/fedora37",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/fedora:37"
}
@@ -41,6 +47,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/rockylinux9",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/rockylinux:9"
}
@@ -52,6 +61,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/rockylinux8",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "docker.io/rockylinux:8"
}
@@ -63,6 +75,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/almalinux9",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "quay.io/almalinuxorg/almalinux:9"
}
@@ -74,6 +89,9 @@
},
"os_package_manager": "dnf_epel",
"build": "spack/almalinux8",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "quay.io/almalinuxorg/almalinux:8"
}
@@ -87,6 +105,9 @@
"build": "spack/centos-stream",
"final": {
"image": "quay.io/centos/centos:stream"
},
"build_tags": {
"develop": "latest"
}
},
"centos:7": {
@@ -94,7 +115,10 @@
"template": "container/centos_7.dockerfile"
},
"os_package_manager": "yum",
"build": "spack/centos7"
"build": "spack/centos7",
"build_tags": {
"develop": "latest"
}
},
"opensuse/leap:15": {
"bootstrap": {
@@ -102,6 +126,9 @@
},
"os_package_manager": "zypper",
"build": "spack/leap15",
"build_tags": {
"develop": "latest"
},
"final": {
"image": "opensuse/leap:latest"
}
@@ -121,13 +148,19 @@
"template": "container/ubuntu_2204.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-jammy"
"build": "spack/ubuntu-jammy",
"build_tags": {
"develop": "latest"
}
},
"ubuntu:20.04": {
"bootstrap": {
"template": "container/ubuntu_2004.dockerfile"
},
"build": "spack/ubuntu-focal",
"build_tags": {
"develop": "latest"
},
"os_package_manager": "apt"
},
"ubuntu:18.04": {
@@ -135,7 +168,10 @@
"template": "container/ubuntu_1804.dockerfile"
},
"os_package_manager": "apt",
"build": "spack/ubuntu-bionic"
"build": "spack/ubuntu-bionic",
"build_tags": {
"develop": "latest"
}
}
},
"os_package_managers": {

View File

@@ -50,7 +50,10 @@ def build_info(image, spack_version):
if not build_image:
return None, None
return build_image, spack_version
# Translate version from git to docker if necessary
build_tag = image_data["build_tags"].get(spack_version, spack_version)
return build_image, build_tag
def os_package_manager_for(image):

View File

@@ -227,7 +227,7 @@ def read(path, apply_updates):
if apply_updates and compilers:
for compiler in compilers:
try:
spack.compilers.add_compilers_to_config([compiler])
spack.compilers.add_compilers_to_config([compiler], init_config=False)
except Exception:
warnings.warn(
f"Could not add compiler {str(compiler.spec)}: "

Some files were not shown because too many files have changed in this diff Show More