Compare commits

..

1 Commits

Author SHA1 Message Date
Greg Becker
719d31682f speed up relocation using memoized offsets
refactor to do scanning in a single pass
parallelize new relocate method with threadpool
relocate_by_offsets can recompute if offsets not memoized
2022-06-03 12:19:01 +02:00
1161 changed files with 7293 additions and 12544 deletions

View File

@@ -12,7 +12,6 @@ on:
# built-in repository or documentation
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
- '!var/spack/repos/builtin/packages/clingo/**'
- '!var/spack/repos/builtin/packages/python/**'
- '!var/spack/repos/builtin/packages/re2c/**'
- 'lib/spack/docs/**'
@@ -20,10 +19,6 @@ on:
# nightly at 2:16 AM
- cron: '16 2 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
fedora-clingo-sources:
@@ -180,11 +175,10 @@ jobs:
tree ~/.spack/bootstrap/store/
macos-clingo-binaries:
runs-on: ${{ matrix.macos-version }}
runs-on: macos-latest
strategy:
matrix:
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
macos-version: ['macos-10.15', 'macos-11', 'macos-12']
if: github.repository == 'spack/spack'
steps:
- name: Install dependencies
@@ -192,7 +186,7 @@ jobs:
brew install tree
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: ${{ matrix.python-version }}
- name: Bootstrap clingo
@@ -211,7 +205,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: ${{ matrix.python-version }}
- name: Setup repo

View File

@@ -19,10 +19,6 @@ on:
release:
types: [published]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
deploy-images:
runs-on: ubuntu-latest

View File

@@ -16,10 +16,6 @@ on:
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
@@ -30,7 +26,7 @@ jobs:
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install
@@ -46,7 +42,7 @@ jobs:
timeout-minutes: 700
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install
@@ -60,7 +56,7 @@ jobs:
runs-on: macos-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: 3.9
- name: spack install

View File

@@ -9,11 +9,6 @@ on:
branches:
- develop
- releases/**
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
@@ -21,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python Packages
@@ -39,7 +34,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python packages
@@ -114,7 +109,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -179,7 +174,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install System packages
@@ -245,7 +240,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install System packages
@@ -294,7 +289,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -337,7 +332,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # @v2
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08 # @v2
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6 # @v2
with:
python-version: '3.10'
- name: Install Python packages
@@ -350,7 +345,7 @@ jobs:
coverage run $(which spack) audit packages
coverage combine
coverage xml
- name: Package audits (without coverage)
- name: Package audits (wwithout coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh

View File

@@ -9,11 +9,6 @@ on:
branches:
- develop
- releases/**
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_number }}
cancel-in-progress: true
defaults:
run:
shell:
@@ -23,7 +18,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python Packages
@@ -41,7 +36,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -63,7 +58,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -83,7 +78,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -103,7 +98,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -128,7 +123,7 @@ jobs:
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with:
fetch-depth: 0
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages
@@ -159,7 +154,7 @@ jobs:
run:
shell: pwsh
steps:
- uses: actions/setup-python@c4e89fac7e8767b327bbad6cb4d859eda999cf08
- uses: actions/setup-python@98f2ad02fd48d057ee3b4d4f66525b231c3e52b6
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -62,12 +62,11 @@ on these ideas for each distinct build system that Spack supports:
build_systems/bundlepackage
build_systems/cudapackage
build_systems/custompackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/multiplepackage
build_systems/rocmpackage
build_systems/sourceforgepackage
build_systems/custompackage
build_systems/multiplepackage
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
provide a list of build systems and methods/attributes that can be

View File

@@ -84,8 +84,8 @@ build ``hdf5`` with Intel oneAPI MPI do::
spack install hdf5 +mpi ^intel-oneapi-mpi
Using Externally Installed oneAPI Tools
=======================================
Using an Externally Installed oneAPI
====================================
Spack can also use oneAPI tools that are manually installed with
`Intel Installers`_. The procedures for configuring Spack to use
@@ -110,7 +110,7 @@ Another option is to manually add the configuration to
Libraries
---------
If you want Spack to use oneMKL that you have installed without Spack in
If you want Spack to use MKL that you have installed without Spack in
the default location, then add the following to
``~/.spack/packages.yaml``, adjusting the version as appropriate::
@@ -139,7 +139,7 @@ You can also use Spack-installed libraries. For example::
spack load intel-oneapi-mkl
Will update your environment CPATH, LIBRARY_PATH, and other
environment variables for building an application with oneMKL.
environment variables for building an application with MKL.
More information
================

View File

@@ -15,9 +15,6 @@ IntelPackage
Intel packages in Spack
^^^^^^^^^^^^^^^^^^^^^^^^
This is an earlier version of Intel software development tools and has
now been replaced by Intel oneAPI Toolkits.
Spack can install and use several software development products offered by Intel.
Some of these are available under no-cost terms, others require a paid license.
All share the same basic steps for configuration, installation, and, where

View File

@@ -48,9 +48,8 @@ important to understand.
**build backend**
Libraries used to define how to build a wheel. Examples
include `setuptools <https://setuptools.pypa.io/>`__,
`flit <https://flit.readthedocs.io/>`_,
`poetry <https://python-poetry.org/>`_, and
`hatchling <https://hatch.pypa.io/latest/>`_.
`flit <https://flit.readthedocs.io/>`_, and
`poetry <https://python-poetry.org/>`_.
^^^^^^^^^^^
Downloading
@@ -327,33 +326,6 @@ for specifying the version requirements. Note that ``~=`` works
differently in poetry than in setuptools and flit for versions that
start with a zero.
"""""""""
hatchling
"""""""""
If the ``pyproject.toml`` lists ``hatchling.build`` as the
``build-backend``, it uses the hatchling build system. Look for
dependencies under the following keys:
* ``requires-python``
This specifies the version of Python that is required
* ``project.dependencies``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
* ``project.optional-dependencies``
This section includes keys with lists of optional dependencies
needed to enable those features. You should add a variant that
optionally adds these dependencies. This variant should be ``False``
by default.
See https://hatch.pypa.io/latest/config/dependency/ for more
information.
""""""
wheels
""""""
@@ -694,4 +666,3 @@ For more information on build backend tools, see:
* setuptools: https://setuptools.pypa.io/
* flit: https://flit.readthedocs.io/
* poetry: https://python-poetry.org/
* hatchling: https://hatch.pypa.io/latest/

View File

@@ -1,55 +0,0 @@
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _sourceforgepackage:
------------------
SourceforgePackage
------------------
``SourceforgePackage`` is a
`mixin-class <https://en.wikipedia.org/wiki/Mixin>`_. It automatically
sets the URL based on a list of Sourceforge mirrors listed in
`sourceforge_mirror_path`, which defaults to a half dozen known mirrors.
Refer to the package source
(`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/sourceforge.py>`__) for the current list of mirrors used by Spack.
^^^^^^^
Methods
^^^^^^^
This package provides a method for populating mirror URLs.
**urls**
This method returns a list of possible URLs for package source.
It is decorated with `property` so its results are treated as
a package attribute.
Refer to
`<https://spack.readthedocs.io/en/latest/packaging_guide.html#mirrors-of-the-main-url>`__
for information on how Spack uses the `urls` attribute during
fetching.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package and defining the relative location of an archive
file for one version of your software.
.. code-block:: python
:emphasize-lines: 1,3
class MyPackage(AutotoolsPackage, SourceforgePackage):
...
sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz"
...
Over 40 packages are using ``SourceforcePackage`` this mix-in as of
July 2022 so there are multiple packages to choose from if you want
to see a real example.

View File

@@ -109,10 +109,9 @@ Spack Images on Docker Hub
--------------------------
Docker images with Spack preinstalled and ready to be used are
built when a release is tagged, or nightly on ``develop``. The images
are then pushed both to `Docker Hub <https://hub.docker.com/u/spack>`_
and to `GitHub Container Registry <https://github.com/orgs/spack/packages?repo_name=spack>`_.
The OS that are currently supported are summarized in the table below:
built on `Docker Hub <https://hub.docker.com/u/spack>`_
at every push to ``develop`` or to a release branch. The OS that
are currently supported are summarized in the table below:
.. _containers-supported-os:
@@ -122,31 +121,22 @@ The OS that are currently supported are summarized in the table below:
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 16.04
- ``ubuntu:16.04``
- ``spack/ubuntu-xenial``
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04
- ``ubuntu:20.04``
- ``spack/ubuntu-focal``
* - Ubuntu 22.04
- ``ubuntu:22.04``
- ``spack/ubuntu-jammy``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
* - CentOS Stream
- ``quay.io/centos/centos:stream``
- ``spack/centos-stream``
* - openSUSE Leap
- ``opensuse/leap``
- ``spack/leap15``
* - Amazon Linux 2
- ``amazonlinux:2``
- ``spack/amazon-linux``
All the images are tagged with the corresponding release of Spack:
.. image:: images/ghcr_spack.png
.. image:: dockerhub_spack.png
with the exception of the ``latest`` tag that points to the HEAD
of the ``develop`` branch. These images are available for anyone

View File

@@ -107,6 +107,7 @@ with a high level view of Spack's directory structure:
llnl/ <- some general-use libraries
spack/ <- spack module; contains Python code
analyzers/ <- modules to run analysis on installed packages
build_systems/ <- modules for different build systems
cmd/ <- each file in here is a spack subcommand
compilers/ <- compiler description files
@@ -241,6 +242,22 @@ Unit tests
Implements Spack's test suite. Add a module and put its name in
the test suite in ``__init__.py`` to add more unit tests.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Research and Monitoring Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:mod:`spack.monitor`
Contains :class:`~spack.monitor.SpackMonitorClient`. This is accessed from
the ``spack install`` and ``spack analyze`` commands to send build and
package metadata up to a `Spack Monitor
<https://github.com/spack/spack-monitor>`_ server.
:mod:`spack.analyzers`
A module folder with a :class:`~spack.analyzers.analyzer_base.AnalyzerBase`
that provides base functions to run, save, and (optionally) upload analysis
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
^^^^^^^^^^^^^
Other Modules
@@ -284,6 +301,240 @@ Most spack commands look something like this:
The information in Package files is used at all stages in this
process.
Conceptually, packages are overloaded. They contain:
-------------
Stage objects
-------------
.. _writing-analyzers:
-----------------
Writing analyzers
-----------------
To write an analyzer, you should add a new python file to the
analyzers module directory at ``lib/spack/spack/analyzers`` .
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
to add an analyzer class ``Myanalyzer`` you would write to
``spack/analyzers/myanalyzer.py`` and import and
use the base as follows:
.. code-block:: python
from .analyzer_base import AnalyzerBase
class Myanalyzer(AnalyzerBase):
Note that the class name is your module file name, all lowercase
except for the first capital letter. You can look at other analyzers in
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
^^^^^^^^^^^^^^^^^^^^^^^^^
Analyzer Output Directory
^^^^^^^^^^^^^^^^^^^^^^^^^
By default, when you run ``spack analyze run`` an analyzer output directory will
be created in your spack user directory in your ``$HOME``. The reason we output here
is because the install directory might not always be writable.
.. code-block:: console
~/.spack/
analyzers
Result files will be written here, organized in subfolders in the same structure
as the package, with each analyzer owning it's own subfolder. for example:
.. code-block:: console
$ tree ~/.spack/analyzers/
/home/spackuser/.spack/analyzers/
└── linux-ubuntu20.04-skylake
└── gcc-9.3.0
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
├── environment_variables
│   └── spack-analyzer-environment-variables.json
├── install_files
│   └── spack-analyzer-install-files.json
└── libabigail
└── lib
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
Notice that for the libabigail analyzer, since results are generated per object,
we honor the object's folder in case there are equivalently named files in
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
^^^^^^^^^^^^^^^^^
Analyzer Metadata
^^^^^^^^^^^^^^^^^
Your analyzer is required to have the class attributes ``name``, ``outfile``,
and ``description``. These are printed to the user with they use the subcommand
``spack analyze list-analyzers``. Here is an example.
As we mentioned above, note that this analyzer would live in a module named
``libabigail.py`` in the analyzers folder so that the class can be discovered.
.. code-block:: python
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
This means that the name and output file should be unique for your analyzer.
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
that the user wants to run all analyzers.
.. _analyzer_run_function:
^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer run Function
^^^^^^^^^^^^^^^^^^^^^^^^
The core of an analyzer is its ``run()`` function, which should accept no
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
and it's up to the run function to generate whatever analysis data you need,
and then return the object with a key as the analyzer name. The result data
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
and one of ``value`` or ``binary_value``. The install file should be for a relative
path, and not the absolute path. For example, let's say we extract a metric called
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
We might have data that looks like this:
.. code-block:: python
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
.. code-block:: python
return {self.name: result}
This will save the complete result to the analyzer metadata folder, as described
previously. If you want support for adding a different kind of metadata (e.g.,
not associated with an install file) then the monitor server would need to be updated
to support this first.
^^^^^^^^^^^^^^^^^^^^^^^^^
An analyzer init Function
^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't need any extra dependencies or checks, you can skip defining an analyzer
init function, as the base class will handle it. Typically, it will accept
a spec, and an optional output directory (if the user does not want the default
metadata folder for analyzer results). The analyzer init function should call
it's parent init, and then do any extra checks or validation that are required to
work. For example:
.. code-block:: python
def __init__(self, spec, dirname=None):
super(Myanalyzer, self).__init__(spec, dirname)
# install extra dependencies, do extra preparation and checks here
At the end of the init, you will have available to you:
- **self.spec**: the spec object
- **self.dirname**: an optional directory name the user as provided at init to save
- **self.output_dir**: the analyzer metadata directory, where we save by default
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
And can proceed to write your analyzer.
^^^^^^^^^^^^^^^^^^^^^^^
Saving Analyzer Results
^^^^^^^^^^^^^^^^^^^^^^^
The analyzer will have ``save_result`` called, with the result object generated
to save it to the filesystem, and if the user has added the ``--monitor`` flag
to upload it to a monitor server. If your result follows an accepted result
format and you don't need to parse it further, you don't need to add this
function to your class. However, if your result data is large or otherwise
needs additional parsing, you can define it. If you define the function, it
is useful to know about the ``output_dir`` property, which you can join
with your output file relative path of choice:
.. code-block:: python
outfile = os.path.join(self.output_dir, "my-output-file.txt")
The directory will be provided by the ``output_dir`` property but it won't exist,
so you should create it:
.. code::block:: python
# Create the output directory
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
If you are generating results that match to specific files in the package
install directory, you should try to maintain those paths in the case that
there are equivalently named files in different directories that would
overwrite one another. As an example of an analyzer with a custom save,
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
folder in ``run()``, as they are either binaries, or as xml (text) would
usually be too big to pass in one request. For this reason, the files
are saved during ``run()`` and the filenames added to the result object,
and then when the result object is passed back into ``save_result()``,
we skip saving to the filesystem, and instead read the file and send
each one (separately) to the monitor:
.. code-block:: python
def save_result(self, result, monitor=None, overwrite=False):
"""ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not monitor:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
Notice that this function, if you define it, requires a result object (generated by
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
to check if a result exists first, and not write to it if the result exists and
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
yet, you might want to do that here. You should also use ``tty.info`` to give
the user a message of "Writing result to $DIRNAME."
.. _writing-commands:
@@ -448,6 +699,23 @@ with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
"""""""""""""""""""""""""""""""""
``on_analyzer_save(pkg, result)``
"""""""""""""""""""""""""""""""""
After an analyzer has saved some result for a package, this hook is called,
and it provides the package that we just ran the analysis for, along with
the loaded result. Typically, a result is structured to have the name
of the analyzer as key, and the result object that is defined in detail in
:ref:`analyzer_run_function`.
.. code-block:: python
def on_analyzer_save(pkg, result):
"""given a package and a result...
"""
print('Do something extra with a package analysis result here')
^^^^^^^^^^^^^^^^^^^^^^
Adding a New Hook Type

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

View File

@@ -545,8 +545,8 @@ environment and have a single view of it in the filesystem.
The ``concretizer:unify`` config option was introduced in Spack 0.18 to
replace the ``concretization`` property. For reference,
``concretization: together`` is replaced by ``concretizer:unify:true``,
and ``concretization: separately`` is replaced by ``concretizer:unify:false``.
``concretization: separately`` is replaced by ``concretizer:unify:true``,
and ``concretization: together`` is replaced by ``concretizer:unify:false``.
.. admonition:: Re-concretization of user specs
@@ -799,7 +799,7 @@ directories.
select: [^mpi]
exclude: ['%pgi@18.5']
projections:
all: '{name}/{version}-{compiler.name}'
all: {name}/{version}-{compiler.name}
link: all
link_type: symlink
@@ -1013,7 +1013,7 @@ The following advanced example shows how generated targets can be used in a
SPACK ?= spack
.PHONY: all clean env
.PHONY: all clean fetch env
all: env
@@ -1022,6 +1022,9 @@ The following advanced example shows how generated targets can be used in a
env.mk: spack.lock
$(SPACK) -e . env depfile -o $@ --make-target-prefix spack
fetch: spack/fetch
$(info Environment fetched!)
env: spack/env
$(info Environment installed!)
@@ -1034,10 +1037,10 @@ The following advanced example shows how generated targets can be used in a
endif
When ``make`` is invoked, it first "remakes" the missing include ``env.mk``
from its rule, which triggers concretization. When done, the generated target
``spack/env`` is available. In the above example, the ``env`` target uses this generated
target as a prerequisite, meaning that it can make use of the installed packages in
its commands.
from its rule, which triggers concretization. When done, the generated targets
``spack/fetch`` and ``spack/env`` are available. In the above
example, the ``env`` target uses the latter as a prerequisite, meaning
that it can make use of the installed packages in its commands.
As it is typically undesirable to remake ``env.mk`` as part of ``make clean``,
the include is conditional.
@@ -1045,6 +1048,7 @@ the include is conditional.
.. note::
When including generated ``Makefile``\s, it is important to use
the ``--make-target-prefix`` flag and use the non-phony target
``<target-prefix>/env`` as prerequisite, instead of the phony target
``<target-prefix>/all``.
the ``--make-target-prefix`` flag and use the non-phony targets
``<target-prefix>/env`` and ``<target-prefix>/fetch`` as
prerequisites, instead of the phony targets ``<target-prefix>/all``
and ``<target-prefix>/fetch-all`` respectively.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

View File

@@ -1070,32 +1070,13 @@ Commits
Submodules
You can supply ``submodules=True`` to cause Spack to fetch submodules
recursively along with the repository at fetch time.
recursively along with the repository at fetch time. For more information
about git submodules see the manpage of git: ``man git-submodule``.
.. code-block:: python
version('1.0.1', tag='v1.0.1', submodules=True)
If a package has needs more fine-grained control over submodules, define
``submodules`` to be a callable function that takes the package instance as
its only argument. The function should return a list of submodules to be fetched.
.. code-block:: python
def submodules(package):
submodules = []
if "+variant-1" in package.spec:
submodules.append("submodule_for_variant_1")
if "+variant-2" in package.spec:
submodules.append("submodule_for_variant_2")
return submodules
class MyPackage(Package):
version("0.1.0", submodules=submodules)
For more information about git submodules see the manpage of git: ``man
git-submodule``.
.. _github-fetch:
@@ -2794,256 +2775,6 @@ Suppose a user invokes ``spack install`` like this:
Spack will fail with a constraint violation, because the version of
MPICH requested is too low for the ``mpi`` requirement in ``foo``.
.. _custom-attributes:
------------------
Custom attributes
------------------
Often a package will need to provide attributes for dependents to query
various details about what it provides. While any number of custom defined
attributes can be implemented by a package, the four specific attributes
described below are always available on every package with default
implementations and the ability to customize with alternate implementations
in the case of virtual packages provided:
=========== =========================================== =====================
Attribute Purpose Default
=========== =========================================== =====================
``home`` The installation path for the package ``spec.prefix``
``command`` An executable command for the package | ``spec.name`` found
in
| ``.home.bin``
``headers`` A list of headers provided by the package | All headers
searched
| recursively in
``.home.include``
``libs`` A list of libraries provided by the package | ``lib{spec.name}``
searched
| recursively in
``.home`` starting
| with ``lib``,
``lib64``, then the
| rest of ``.home``
=========== =========================================== =====================
Each of these can be customized by implementing the relevant attribute
as a ``@property`` in the package's class:
.. code-block:: python
:linenos:
class Foo(Package):
...
@property
def libs(self):
# The library provided by Foo is libMyFoo.so
return find_libraries('libMyFoo', root=self.home, recursive=True)
A package may also provide a custom implementation of each attribute
for the virtual packages it provides by implementing the
``virtualpackagename_attributename`` property in the package's class.
The implementation used is the first one found from:
#. Specialized virtual: ``Package.virtualpackagename_attributename``
#. Generic package: ``Package.attributename``
#. Default
The use of customized attributes is demonstrated in the next example.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Example: Customized attributes for virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider a package ``foo`` that can optionally provide two virtual
packages ``bar`` and ``baz``. When both are enabled the installation tree
appears as follows:
.. code-block:: console
include/foo.h
include/bar/bar.h
lib64/libFoo.so
lib64/libFooBar.so
baz/include/baz/baz.h
baz/lib/libFooBaz.so
The install tree shows that ``foo`` is providing the header ``include/foo.h``
and library ``lib64/libFoo.so`` in it's install prefix. The virtual
package ``bar`` is providing ``include/bar/bar.h`` and library
``lib64/libFooBar.so``, also in ``foo``'s install prefix. The ``baz``
package, however, is provided in the ``baz`` subdirectory of ``foo``'s
prefix with the ``include/baz/baz.h`` header and ``lib/libFooBaz.so``
library. Such a package could implement the optional attributes as
follows:
.. code-block:: python
:linenos:
class Foo(Package):
...
variant('bar', default=False, description='Enable the Foo implementation of bar')
variant('baz', default=False, description='Enable the Foo implementation of baz')
...
provides('bar', when='+bar')
provides('baz', when='+baz')
....
# Just the foo headers
@property
def headers(self):
return find_headers('foo', root=self.home.include, recursive=False)
# Just the foo libraries
@property
def libs(self):
return find_libraries('libFoo', root=self.home, recursive=True)
# The header provided by the bar virutal package
@property
def bar_headers(self):
return find_headers('bar/bar.h', root=self.home.include, recursive=False)
# The libary provided by the bar virtual package
@property
def bar_libs(self):
return find_libraries('libFooBar', root=sef.home, recursive=True)
# The baz virtual package home
@property
def baz_home(self):
return self.prefix.baz
# The header provided by the baz virtual package
@property
def baz_headers(self):
return find_headers('baz/baz', root=self.baz_home.include, recursive=False)
# The library provided by the baz virtual package
@property
def baz_libs(self):
return find_libraries('libFooBaz', root=self.baz_home, recursive=True)
Now consider another package, ``foo-app``, depending on all three:
.. code-block:: python
:linenos:
class FooApp(CMakePackage):
...
depends_on('foo')
depends_on('bar')
depends_on('baz')
The resulting spec objects for it's dependencies shows the result of
the above attribute implementations:
.. code-block:: python
# The core headers and libraries of the foo package
>>> spec['foo']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['foo'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the package install prefix without an explicit implementation
>>> spec['foo'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# foo headers from the foo prefix
>>> spec['foo'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/foo.h',
])
# foo include directories from the foo prefix
>>> spec['foo'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# foo libraries from the foo prefix
>>> spec['foo'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFoo.so',
])
# foo library directories from the foo prefix
>>> spec['foo'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual bar package in the same prefix as foo
# bar resolves to the foo package
>>> spec['bar']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['bar'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# home defaults to the foo prefix without either a Foo.bar_home
# or Foo.home implementation
>>> spec['bar'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# bar header in the foo prefix
>>> spec['bar'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include/bar/bar.h'
])
# bar include dirs from the foo prefix
>>> spec['bar'].headers.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/include']
# bar library from the foo prefix
>>> spec['bar'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64/libFooBar.so'
])
# bar library directories from the foo prefix
>>> spec['bar'].libs.directories
['/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64']
.. code-block:: python
# The virtual baz package in a subdirectory of foo's prefix
# baz resolves to the foo package
>>> spec['baz']
foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell
>>> spec['baz'].prefix
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6'
# baz_home implementation provides the subdirectory inside the foo prefix
>>> spec['baz'].home
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz'
# baz headers in the baz subdirectory of the foo prefix
>>> spec['baz'].headers
HeaderList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include/baz/baz.h'
])
# baz include directories in the baz subdirectory of the foo prefix
>>> spec['baz'].headers.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/include'
]
# baz libraries in the baz subdirectory of the foo prefix
>>> spec['baz'].libs
LibraryList([
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib/libFooBaz.so'
])
# baz library directories in the baz subdirectory of the foo porefix
>>> spec['baz'].libs.directories
[
'/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib'
]
.. _abstract-and-concrete:
-------------------------
@@ -5745,24 +5476,6 @@ Version Lists
Spack packages should list supported versions with the newest first.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``home`` vs ``prefix``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``home`` and ``prefix`` are both attributes that can be queried on a
package's dependencies, often when passing configure arguments pointing to the
location of a dependency. The difference is that while ``prefix`` is the
location on disk where a concrete package resides, ``home`` is the `logical`
location that a package resides, which may be different than ``prefix`` in
the case of virtual packages or other special circumstances. For most use
cases inside a package, it's dependency locations can be accessed via either
``self.spec['foo'].home`` or ``self.spec['foo'].prefix``. Specific packages
that should be consumed by dependents via ``.home`` instead of ``.prefix``
should be noted in their respective documentation.
See :ref:`custom-attributes` for more details and an example implementing
a custom ``home`` attribute.
---------------------------
Packaging workflow commands
---------------------------

View File

@@ -7,7 +7,7 @@ bash, , , Compiler wrappers
tar, , , Extract/create archives
gzip, , , Compress/Decompress archives
unzip, , , Compress/Decompress archives
bzip2, , , Compress/Decompress archives
bzip, , , Compress/Decompress archives
xz, , , Compress/Decompress archives
zstd, , Optional, Compress/Decompress archives
file, , , Create/Use Buildcaches
@@ -15,4 +15,4 @@ gnupg2, , , Sign/Verify Buildcaches
git, , , Manage Software Repositories
svn, , Optional, Manage Software Repositories
hg, , Optional, Manage Software Repositories
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
Python header files, , Optional (e.g. ``python3-dev`` on Debian), Bootstrapping from sources
1 Name Supported Versions Notes Requirement Reason
7 tar Extract/create archives
8 gzip Compress/Decompress archives
9 unzip Compress/Decompress archives
10 bzip2 bzip Compress/Decompress archives
11 xz Compress/Decompress archives
12 zstd Optional Compress/Decompress archives
13 file Create/Use Buildcaches
15 git Manage Software Repositories
16 svn Optional Manage Software Repositories
17 hg Optional Manage Software Repositories
18 Python header files Optional (e.g. ``python3-dev`` on Debian) Bootstrapping from sources

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.4 (commit b8eea9df2b4204ff27d204452cd46f5199a0b423)
* Version: 0.1.4 (commit 53fc4ac91e9b4c5e4079f15772503a80bece72ad)
argparse
--------

View File

@@ -85,21 +85,21 @@
"intel": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"name": "x86-64",
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
@@ -143,20 +143,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v2",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -214,20 +200,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v3",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -290,20 +262,6 @@
"name": "x86-64",
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
}
],
"oneapi": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": "2021.2.0:",
"name": "x86-64-v4",
"flags": "-march={name} -mtune=generic"
}
]
}
},
@@ -344,19 +302,22 @@
"intel": [
{
"versions": "16.0:",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"oneapi": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
],
"dpcpp": [
{
"versions": ":",
"flags": "-march={name} -mtune={name}"
"name": "pentium4",
"flags": "-march={name} -mtune=generic"
}
]
}

View File

@@ -308,68 +308,6 @@ def change_sed_delimiter(old_delim, new_delim, *filenames):
filter_file(double_quoted, '"%s"' % repl, f)
@contextmanager
def exploding_archive_catch(stage):
# Check for an exploding tarball, i.e. one that doesn't expand to
# a single directory. If the tarball *didn't* explode, move its
# contents to the staging source directory & remove the container
# directory. If the tarball did explode, just rename the tarball
# directory to the staging source directory.
#
# NOTE: The tar program on Mac OS X will encode HFS metadata in
# hidden files, which can end up *alongside* a single top-level
# directory. We initially ignore presence of hidden files to
# accomodate these "semi-exploding" tarballs but ensure the files
# are copied to the source directory.
# Expand all tarballs in their own directory to contain
# exploding tarballs.
tarball_container = os.path.join(stage.path,
"spack-expanded-archive")
mkdirp(tarball_container)
orig_dir = os.getcwd()
os.chdir(tarball_container)
try:
yield
# catch an exploding archive on sucessful extraction
os.chdir(orig_dir)
exploding_archive_handler(tarball_container, stage)
except Exception as e:
# return current directory context to previous on failure
os.chdir(orig_dir)
raise e
@system_path_filter
def exploding_archive_handler(tarball_container, stage):
"""
Args:
tarball_container: where the archive was expanded to
stage: Stage object referencing filesystem location
where archive is being expanded
"""
files = os.listdir(tarball_container)
non_hidden = [f for f in files if not f.startswith('.')]
if len(non_hidden) == 1:
src = os.path.join(tarball_container, non_hidden[0])
if os.path.isdir(src):
stage.srcdir = non_hidden[0]
shutil.move(src, stage.source_path)
if len(files) > 1:
files.remove(non_hidden[0])
for f in files:
src = os.path.join(tarball_container, f)
dest = os.path.join(stage.path, f)
shutil.move(src, dest)
os.rmdir(tarball_container)
else:
# This is a non-directory entry (e.g., a patch file) so simply
# rename the tarball container to be the source path.
shutil.move(tarball_container, stage.source_path)
else:
shutil.move(tarball_container, stage.source_path)
@system_path_filter(arg_slice=slice(1))
def get_owner_uid(path, err_msg=None):
if not os.path.exists(path):

View File

@@ -1072,15 +1072,3 @@ def __exit__(self, exc_type, exc_value, tb):
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True
class classproperty(object):
"""Non-data descriptor to evaluate a class-level property. The function that performs
the evaluation is injected at creation time and take an instance (could be None) and
an owner (i.e. the class that originated the instance)
"""
def __init__(self, callback):
self.callback = callback
def __get__(self, instance, owner):
return self.callback(owner)

View File

@@ -0,0 +1,42 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This package contains code for creating analyzers to extract Application
Binary Interface (ABI) information, along with simple analyses that just load
existing metadata.
"""
from __future__ import absolute_import
import llnl.util.tty as tty
import spack.paths
import spack.util.classes
mod_path = spack.paths.analyzers_path
analyzers = spack.util.classes.list_classes("spack.analyzers", mod_path)
# The base analyzer does not have a name, and cannot do dict comprehension
analyzer_types = {}
for a in analyzers:
if not hasattr(a, "name"):
continue
analyzer_types[a.name] = a
def list_all():
"""A helper function to list all analyzers and their descriptions
"""
for name, analyzer in analyzer_types.items():
print("%-25s: %-35s" % (name, analyzer.description))
def get_analyzer(name):
"""Courtesy function to retrieve an analyzer, and exit on error if it
does not exist.
"""
if name in analyzer_types:
return analyzer_types[name]
tty.die("Analyzer %s does not exist" % name)

View File

@@ -0,0 +1,116 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An analyzer base provides basic functions to run the analysis, save results,
and (optionally) interact with a Spack Monitor
"""
import os
import llnl.util.tty as tty
import spack.config
import spack.hooks
import spack.monitor
import spack.util.path
def get_analyzer_dir(spec, analyzer_dir=None):
"""
Given a spec, return the directory to save analyzer results.
We create the directory if it does not exist. We also check that the
spec has an associated package. An analyzer cannot be run if the spec isn't
associated with a package. If the user provides a custom analyzer_dir,
we use it over checking the config and the default at ~/.spack/analyzers
"""
# An analyzer cannot be run if the spec isn't associated with a package
if not hasattr(spec, "package") or not spec.package:
tty.die("A spec can only be analyzed with an associated package.")
# The top level directory is in the user home, or a custom location
if not analyzer_dir:
analyzer_dir = spack.util.path.canonicalize_path(
spack.config.get('config:analyzers_dir', '~/.spack/analyzers'))
# We follow the same convention as the spec install (this could be better)
package_prefix = os.sep.join(spec.package.prefix.split('/')[-3:])
meta_dir = os.path.join(analyzer_dir, package_prefix)
return meta_dir
class AnalyzerBase(object):
def __init__(self, spec, dirname=None):
"""
Verify that the analyzer has correct metadata.
An Analyzer is intended to run on one spec install, so the spec
with its associated package is required on init. The child analyzer
class should define an init function that super's the init here, and
also check that the analyzer has all dependencies that it
needs. If an analyzer subclass does not have dependencies, it does not
need to define an init. An Analyzer should not be allowed to proceed
if one or more dependencies are missing. The dirname, if defined,
is an optional directory name to save to (instead of the default meta
spack directory).
"""
self.spec = spec
self.dirname = dirname
self.meta_dir = os.path.dirname(spec.package.install_log_path)
for required in ["name", "outfile", "description"]:
if not hasattr(self, required):
tty.die("Please add a %s attribute on the analyzer." % required)
def run(self):
"""
Given a spec with an installed package, run the analyzer on it.
"""
raise NotImplementedError
@property
def output_dir(self):
"""
The full path to the output directory.
This includes the nested analyzer directory structure. This function
does not create anything.
"""
if not hasattr(self, "_output_dir"):
output_dir = get_analyzer_dir(self.spec, self.dirname)
self._output_dir = os.path.join(output_dir, self.name)
return self._output_dir
def save_result(self, result, overwrite=False):
"""
Save a result to the associated spack monitor, if defined.
This function is on the level of the analyzer because it might be
the case that the result is large (appropriate for a single request)
or that the data is organized differently (e.g., more than one
request per result). If an analyzer subclass needs to over-write
this function with a custom save, that is appropriate to do (see abi).
"""
# We maintain the structure in json with the analyzer as key so
# that in the future, we could upload to a monitor server
if result[self.name]:
outfile = os.path.join(self.output_dir, self.outfile)
# Only try to create the results directory if we have a result
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
# Don't overwrite an existing result if overwrite is False
if os.path.exists(outfile) and not overwrite:
tty.info("%s exists and overwrite is False, skipping." % outfile)
else:
tty.info("Writing result to %s" % outfile)
spack.monitor.write_json(result[self.name], outfile)
# This hook runs after a save result
spack.hooks.on_analyzer_save(self.spec.package, result)

View File

@@ -0,0 +1,33 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""A configargs analyzer is a class of analyzer that typically just uploads
already existing metadata about config args from a package spec install
directory."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class ConfigArgs(AnalyzerBase):
name = "config_args"
outfile = "spack-analyzer-config-args.json"
description = "config args loaded from spack-configure-args.txt"
def run(self):
"""
Load the configure-args.txt and save in json.
The run function will find the spack-config-args.txt file in the
package install directory, and read it into a json structure that has
the name of the analyzer as the key.
"""
config_file = os.path.join(self.meta_dir, "spack-configure-args.txt")
return {self.name: spack.monitor.read_file(config_file)}

View File

@@ -0,0 +1,54 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""An environment analyzer will read and parse the environment variables
file in the installed package directory, generating a json file that has
an index of key, value pairs for environment variables."""
import os
import llnl.util.tty as tty
from spack.util.environment import EnvironmentModifications
from .analyzer_base import AnalyzerBase
class EnvironmentVariables(AnalyzerBase):
name = "environment_variables"
outfile = "spack-analyzer-environment-variables.json"
description = "environment variables parsed from spack-build-env.txt"
def run(self):
"""
Load, parse, and save spack-build-env.txt to analyzers.
Read in the spack-build-env.txt file from the package install
directory and parse the environment variables into key value pairs.
The result should have the key for the analyzer, the name.
"""
env_file = os.path.join(self.meta_dir, "spack-build-env.txt")
return {self.name: self._read_environment_file(env_file)}
def _read_environment_file(self, filename):
"""
Read and parse the environment file.
Given an environment file, we want to read it, split by semicolons
and new lines, and then parse down to the subset of SPACK_* variables.
We assume that all spack prefix variables are not secrets, and unlike
the install_manifest.json, we don't (at least to start) parse the values
to remove path prefixes specific to user systems.
"""
if not os.path.exists(filename):
tty.warn("No environment file available")
return
mods = EnvironmentModifications.from_sourcing_file(filename)
env = {}
mods.apply_modifications(env)
return env

View File

@@ -0,0 +1,31 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The install files json file (install_manifest.json) already exists in
the package install folder, so this analyzer simply moves it to the user
analyzer folder for further processing."""
import os
import spack.monitor
from .analyzer_base import AnalyzerBase
class InstallFiles(AnalyzerBase):
name = "install_files"
outfile = "spack-analyzer-install-files.json"
description = "install file listing read from install_manifest.json"
def run(self):
"""
Load in the install_manifest.json and save to analyzers.
We write it out to the analyzers folder, with key as the analyzer name.
"""
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
return {self.name: spack.monitor.read_json(manifest_file)}

View File

@@ -0,0 +1,114 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import llnl.util.tty as tty
import spack
import spack.binary_distribution
import spack.bootstrap
import spack.error
import spack.hooks
import spack.monitor
import spack.package_base
import spack.repo
import spack.util.executable
from .analyzer_base import AnalyzerBase
class Libabigail(AnalyzerBase):
name = "libabigail"
outfile = "spack-analyzer-libabigail.json"
description = "Application Binary Interface (ABI) features for objects"
def __init__(self, spec, dirname=None):
"""
init for an analyzer ensures we have all needed dependencies.
For the libabigail analyzer, this means Libabigail.
Since the output for libabigail is one file per object, we communicate
with the monitor multiple times.
"""
super(Libabigail, self).__init__(spec, dirname)
# This doesn't seem to work to import on the module level
tty.debug("Preparing to use Libabigail, will install if missing.")
with spack.bootstrap.ensure_bootstrap_configuration():
# libabigail won't install lib/bin/share without docs
spec = spack.spec.Spec("libabigail+docs")
spack.bootstrap.ensure_executables_in_path_or_raise(
["abidw"], abstract_spec=spec
)
self.abidw = spack.util.executable.which('abidw')
def run(self):
"""
Run libabigail, and save results to filename.
This run function differs in that we write as we generate and then
return a dict with the analyzer name as the key, and the value of a
dict of results, where the key is the object name, and the value is
the output file written to.
"""
manifest = spack.binary_distribution.get_buildfile_manifest(self.spec)
# This result will store a path to each file
result = {}
# Generate an output file for each binary or object
for obj in manifest.get("binary_to_relocate_fullpath", []):
# We want to preserve the path in the install directory in case
# a library has an equivalenly named lib or executable, for example
outdir = os.path.dirname(obj.replace(self.spec.package.prefix,
'').strip(os.path.sep))
outfile = "spack-analyzer-libabigail-%s.xml" % os.path.basename(obj)
outfile = os.path.join(self.output_dir, outdir, outfile)
outdir = os.path.dirname(outfile)
# Create the output directory
if not os.path.exists(outdir):
os.makedirs(outdir)
# Sometimes libabigail segfaults and dumps
try:
self.abidw(obj, "--out-file", outfile)
result[obj] = outfile
tty.info("Writing result to %s" % outfile)
except spack.error.SpackError:
tty.warn("Issue running abidw for %s" % obj)
return {self.name: result}
def save_result(self, result, overwrite=False):
"""
Read saved ABI results and upload to monitor server.
ABI results are saved to individual files, so each one needs to be
read and uploaded. Result here should be the lookup generated in run(),
the key is the analyzer name, and each value is the result file.
We currently upload the entire xml as text because libabigail can't
easily read gzipped xml, but this will be updated when it can.
"""
if not spack.monitor.cli:
return
name = self.spec.package.name
for obj, filename in result.get(self.name, {}).items():
# Don't include the prefix
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
# We've already saved the results to file during run
content = spack.monitor.read_file(filename)
# A result needs an analyzer, value or binary_value, and name
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
spack.hooks.on_analyzer_save(self.spec.package, {"libabigail": [data]})

View File

@@ -281,15 +281,15 @@ def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test method is not included in build-time callbacks"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = pkg_cls.build_time_test_callbacks
pkg = spack.repo.get(pkg_name)
test_callbacks = pkg.build_time_test_callbacks
if test_callbacks and 'test' in test_callbacks:
msg = ('{0} package contains "test" method in '
'build_time_test_callbacks')
instr = ('Remove "test" from: [{0}]'
.format(', '.join(test_callbacks)))
errors.append(error_cls(msg.format(pkg_name), [instr]))
errors.append(error_cls(msg.format(pkg.name), [instr]))
return errors
@@ -298,14 +298,13 @@ def _check_build_test_callbacks(pkgs, error_cls):
def _check_patch_urls(pkgs, error_cls):
"""Ensure that patches fetched from GitHub have stable sha256 hashes."""
github_patch_url_re = (
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
".+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
r"^https?://github\.com/.+/.+/(?:commit|pull)/[a-fA-F0-9]*.(?:patch|diff)"
)
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
for condition, patches in pkg_cls.patches.items():
pkg = spack.repo.get(pkg_name)
for condition, patches in pkg.patches.items():
for patch in patches:
if not isinstance(patch, spack.patch.UrlPatch):
continue
@@ -317,7 +316,7 @@ def _check_patch_urls(pkgs, error_cls):
if not patch.url.endswith(full_index_arg):
errors.append(error_cls(
"patch URL in package {0} must end with {1}".format(
pkg_cls.name, full_index_arg,
pkg.name, full_index_arg,
),
[patch.url],
))
@@ -331,21 +330,21 @@ def _linting_package_file(pkgs, error_cls):
"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Does the homepage have http, and if so, does https work?
if pkg_cls.homepage.startswith('http://'):
https = re.sub("http", "https", pkg_cls.homepage, 1)
if pkg.homepage.startswith('http://'):
https = re.sub("http", "https", pkg.homepage, 1)
try:
response = urlopen(https)
except Exception as e:
msg = 'Error with attempting https for "{0}": '
errors.append(error_cls(msg.format(pkg_cls.name), [str(e)]))
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
continue
if response.getcode() == 200:
msg = 'Package "{0}" uses http but has a valid https endpoint.'
errors.append(msg.format(pkg_cls.name))
errors.append(msg.format(pkg.name))
return llnl.util.lang.dedupe(errors)
@@ -355,10 +354,10 @@ def _unknown_variants_in_directives(pkgs, error_cls):
"""Report unknown or wrong variants in directives for this package"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# Check "conflicts" directive
for conflict, triggers in pkg_cls.conflicts.items():
for conflict, triggers in pkg.conflicts.items():
for trigger, _ in triggers:
vrn = spack.spec.Spec(conflict)
try:
@@ -371,34 +370,34 @@ def _unknown_variants_in_directives(pkgs, error_cls):
# When os and target constraints can be created independently of
# the platform, TODO change this back to add an error.
errors.extend(_analyze_variants_in_directive(
pkg_cls, spack.spec.Spec(trigger),
pkg, spack.spec.Spec(trigger),
directive='conflicts', error_cls=error_cls
))
errors.extend(_analyze_variants_in_directive(
pkg_cls, vrn, directive='conflicts', error_cls=error_cls
pkg, vrn, directive='conflicts', error_cls=error_cls
))
# Check "depends_on" directive
for _, triggers in pkg_cls.dependencies.items():
for _, triggers in pkg.dependencies.items():
triggers = list(triggers)
for trigger in list(triggers):
vrn = spack.spec.Spec(trigger)
errors.extend(_analyze_variants_in_directive(
pkg_cls, vrn, directive='depends_on', error_cls=error_cls
pkg, vrn, directive='depends_on', error_cls=error_cls
))
# Check "patch" directive
for _, triggers in pkg_cls.provided.items():
for _, triggers in pkg.provided.items():
triggers = [spack.spec.Spec(x) for x in triggers]
for vrn in triggers:
errors.extend(_analyze_variants_in_directive(
pkg_cls, vrn, directive='patch', error_cls=error_cls
pkg, vrn, directive='patch', error_cls=error_cls
))
# Check "resource" directive
for vrn in pkg_cls.resources:
for vrn in pkg.resources:
errors.extend(_analyze_variants_in_directive(
pkg_cls, vrn, directive='resource', error_cls=error_cls
pkg, vrn, directive='resource', error_cls=error_cls
))
return llnl.util.lang.dedupe(errors)
@@ -409,15 +408,15 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
"""Report unknown dependencies and wrong variants for dependencies"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# No need to analyze virtual packages
if spack.repo.path.is_virtual(dependency_name):
continue
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(dependency_name)
dependency_pkg = spack.repo.get(dependency_name)
except spack.repo.UnknownPackageError:
# This dependency is completely missing, so report
# and continue the analysis
@@ -433,8 +432,8 @@ def _unknown_variants_in_dependencies(pkgs, error_cls):
dependency_variants = dependency_edge.spec.variants
for name, value in dependency_variants.items():
try:
v, _ = dependency_pkg_cls.variants[name]
v.validate_or_raise(value, pkg_cls=dependency_pkg_cls)
v, _ = dependency_pkg.variants[name]
v.validate_or_raise(value, pkg=dependency_pkg)
except Exception as e:
summary = (pkg_name + ": wrong variant used for a "
"dependency in a 'depends_on' directive")
@@ -456,10 +455,10 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
"""Report if version constraints used in directives are not satisfiable"""
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
filename = spack.repo.path.filename_for_package_name(pkg_name)
dependencies_to_check = []
for dependency_name, dependency_data in pkg_cls.dependencies.items():
for dependency_name, dependency_data in pkg.dependencies.items():
# Skip virtual dependencies for the time being, check on
# their versions can be added later
if spack.repo.path.is_virtual(dependency_name):
@@ -470,19 +469,19 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
)
for s in dependencies_to_check:
dependency_pkg_cls = None
dependency_pkg = None
try:
dependency_pkg_cls = spack.repo.path.get_pkg_class(s.name)
dependency_pkg = spack.repo.get(s.name)
assert any(
v.satisfies(s.versions) for v in list(dependency_pkg_cls.versions)
v.satisfies(s.versions) for v in list(dependency_pkg.versions)
)
except Exception:
summary = ("{0}: dependency on {1} cannot be satisfied "
"by known versions of {1.name}").format(pkg_name, s)
details = ['happening in ' + filename]
if dependency_pkg_cls is not None:
if dependency_pkg is not None:
details.append('known versions of {0.name} are {1}'.format(
s, ', '.join([str(x) for x in dependency_pkg_cls.versions])
s, ', '.join([str(x) for x in dependency_pkg.versions])
))
errors.append(error_cls(summary=summary, details=details))
@@ -500,7 +499,7 @@ def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
for name, v in constraint.variants.items():
try:
variant, _ = pkg.variants[name]
variant.validate_or_raise(v, pkg_cls=pkg)
variant.validate_or_raise(v, pkg=pkg)
except variant_exceptions as e:
summary = pkg.name + ': wrong variant in "{0}" directive'
summary = summary.format(directive)

View File

@@ -624,10 +624,14 @@ def get_buildfile_manifest(spec):
"""
data = {"text_to_relocate": [], "binary_to_relocate": [],
"link_to_relocate": [], "other": [],
"binary_to_relocate_fullpath": []}
"binary_to_relocate_fullpath": [], "offsets": {}}
blacklist = (".spack", "man")
# Get all the paths we will want to relocate in binaries
paths_to_relocate = [s.prefix for s in spec.traverse(root=True)]
paths_to_relocate.append(spack.store.layout.root)
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(spec.prefix, topdown=True):
@@ -662,6 +666,11 @@ def get_buildfile_manifest(spec):
(m_subtype in ('x-mach-binary')
and sys.platform == 'darwin') or
(not filename.endswith('.o'))):
# Last path to relocate is the layout root, which is a substring
# of the others
indices = relocate.compute_indices(path_name, paths_to_relocate)
data['offsets'][rel_path_name] = indices
data['binary_to_relocate'].append(rel_path_name)
data['binary_to_relocate_fullpath'].append(path_name)
added = True
@@ -700,6 +709,7 @@ def write_buildinfo_file(spec, workdir, rel=False):
buildinfo['relocate_binaries'] = manifest['binary_to_relocate']
buildinfo['relocate_links'] = manifest['link_to_relocate']
buildinfo['prefix_to_hash'] = prefix_to_hash
buildinfo['offsets'] = manifest['offsets']
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
@@ -1473,11 +1483,25 @@ def is_backup_file(file):
# If we are not installing back to the same install tree do the relocation
if old_prefix != new_prefix:
files_to_relocate = [os.path.join(workdir, filename)
for filename in buildinfo.get('relocate_binaries')
]
# Relocate links to the new install prefix
links = [link for link in buildinfo.get('relocate_links', [])]
relocate.relocate_links(
links, old_layout_root, old_prefix, new_prefix
)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names, prefix_to_prefix_text)
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
# do the relocation of rpaths in binaries
# TODO: Is this necessary? How are null-terminated strings handled
# in the rpath header?
files_to_relocate = [
os.path.join(workdir, filename)
for filename in buildinfo.get('relocate_binaries')
]
platform = spack.platforms.by_name(spec.platform)
if 'macho' in platform.binary_formats:
relocate.relocate_macho_binaries(files_to_relocate,
@@ -1493,25 +1517,11 @@ def is_backup_file(file):
prefix_to_prefix_bin, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
links = [link for link in buildinfo.get('relocate_links', [])]
relocate.relocate_links(
links, old_layout_root, old_prefix, new_prefix
)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names, prefix_to_prefix_text)
paths_to_relocate = [old_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate, prefix_to_prefix_bin)
# If offsets is None, we will recompute offsets when needed
offsets = buildinfo.get('offsets', None)
relocate.relocate_text_bin(
files_to_relocate, prefix_to_prefix_bin, offsets, workdir)
# If we are installing back to the same location
# relocate the sbang location if the spack directory changed

View File

@@ -80,41 +80,32 @@ def _try_import_from_store(module, query_spec, query_info=None):
for candidate_spec in installed_specs:
pkg = candidate_spec['python'].package
module_paths = [
module_paths = {
os.path.join(candidate_spec.prefix, pkg.purelib),
os.path.join(candidate_spec.prefix, pkg.platlib),
] # type: list[str]
path_before = list(sys.path)
# NOTE: try module_paths first and last, last allows an existing version in path
# to be picked up and used, possibly depending on something in the store, first
# allows the bootstrap version to work when an incompatible version is in
# sys.path
orders = [
module_paths + sys.path,
sys.path + module_paths,
]
for path in orders:
sys.path = path
try:
_fix_ext_suffix(candidate_spec)
if _python_import(module):
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
'provides the "{0}" Python module').format(
module, query_spec, candidate_spec.dag_hash()
)
tty.debug(msg)
if query_info is not None:
query_info['spec'] = candidate_spec
return True
except Exception as e:
msg = ('unexpected error while trying to import module '
'"{0}" from spec "{1}" [error="{2}"]')
tty.warn(msg.format(module, candidate_spec, str(e)))
else:
msg = "Spec {0} did not provide module {1}"
tty.warn(msg.format(candidate_spec, module))
}
sys.path.extend(module_paths)
sys.path = path_before
try:
_fix_ext_suffix(candidate_spec)
if _python_import(module):
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
'provides the "{0}" Python module').format(
module, query_spec, candidate_spec.dag_hash()
)
tty.debug(msg)
if query_info is not None:
query_info['spec'] = candidate_spec
return True
except Exception as e:
msg = ('unexpected error while trying to import module '
'"{0}" from spec "{1}" [error="{2}"]')
tty.warn(msg.format(module, candidate_spec, str(e)))
else:
msg = "Spec {0} did not provide module {1}"
tty.warn(msg.format(candidate_spec, module))
sys.path = sys.path[:-3]
return False
@@ -465,10 +456,9 @@ def _make_bootstrapper(conf):
return _bootstrap_methods[btype](conf)
def source_is_enabled_or_raise(conf):
"""Raise ValueError if the source is not enabled for bootstrapping"""
def _validate_source_is_trusted(conf):
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
if not trusted.get(name, False):
if name not in trusted:
raise ValueError('source is not trusted')
@@ -539,7 +529,7 @@ def ensure_module_importable_or_raise(module, abstract_spec=None):
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
source_is_enabled_or_raise(current_config)
_validate_source_is_trusted(current_config)
b = _make_bootstrapper(current_config)
if b.try_import(module, abstract_spec):
@@ -581,7 +571,7 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
for current_config in bootstrapping_sources():
with h.forward(current_config['name']):
source_is_enabled_or_raise(current_config)
_validate_source_is_trusted(current_config)
b = _make_bootstrapper(current_config)
if b.try_search_path(executables, abstract_spec):
@@ -652,10 +642,10 @@ def _add_compilers_if_missing():
def _add_externals_if_missing():
search_list = [
# clingo
spack.repo.path.get_pkg_class('cmake'),
spack.repo.path.get_pkg_class('bison'),
spack.repo.path.get('cmake'),
spack.repo.path.get('bison'),
# GnuPG
spack.repo.path.get_pkg_class('gawk')
spack.repo.path.get('gawk')
]
detected_packages = spack.detection.by_executable(search_list)
spack.detection.update_configuration(detected_packages, scope='bootstrap')

View File

@@ -1259,14 +1259,6 @@ def install(self, spec, prefix):
for f in glob.glob('%s/intel*log' % tmpdir):
install(f, dst)
@run_after('install')
def validate_install(self):
# Sometimes the installer exits with an error but doesn't pass a
# non-zero exit code to spack. Check for the existence of a 'bin'
# directory to catch this error condition.
if not os.path.exists(self.prefix.bin):
raise InstallError('The installer has failed to install anything.')
@run_after('install')
def configure_rpath(self):
if '+rpath' not in self.spec:

View File

@@ -45,16 +45,18 @@ def component_dir(self):
raise NotImplementedError
@property
def component_prefix(self):
def component_path(self):
"""Path to component <prefix>/<component>/<version>."""
return self.prefix.join(join_path(self.component_dir, self.spec.version))
return join_path(self.prefix, self.component_dir, str(self.spec.version))
def install(self, spec, prefix):
self.install_component(basename(self.url_for_version(spec.version)))
def install_component(self, installer_path):
def install(self, spec, prefix, installer_path=None):
"""Shared install method for all oneapi packages."""
# intel-oneapi-compilers overrides the installer_path when
# installing fortran, which comes from a spack resource
if installer_path is None:
installer_path = basename(self.url_for_version(spec.version))
if platform.system() == 'Linux':
# Intel installer assumes and enforces that all components
# are installed into a single prefix. Spack wants to
@@ -75,7 +77,7 @@ def install_component(self, installer_path):
bash = Executable('bash')
# Installer writes files in ~/intel set HOME so it goes to prefix
bash.add_default_env('HOME', self.prefix)
bash.add_default_env('HOME', prefix)
# Installer checks $XDG_RUNTIME_DIR/.bootstrapper_lock_file as well
bash.add_default_env('XDG_RUNTIME_DIR',
join_path(self.stage.path, 'runtime'))
@@ -83,13 +85,13 @@ def install_component(self, installer_path):
bash(installer_path,
'-s', '-a', '-s', '--action', 'install',
'--eula', 'accept',
'--install-dir', self.prefix)
'--install-dir', prefix)
if getpass.getuser() == 'root':
shutil.rmtree('/var/intel/installercache', ignore_errors=True)
# Some installers have a bug and do not return an error code when failing
if not isdir(join_path(self.prefix, self.component_dir)):
if not isdir(join_path(prefix, self.component_dir)):
raise RuntimeError('install failed')
def setup_run_environment(self, env):
@@ -102,7 +104,7 @@ def setup_run_environment(self, env):
$ source {prefix}/{component}/{version}/env/vars.sh
"""
env.extend(EnvironmentModifications.from_sourcing_file(
join_path(self.component_prefix, 'env', 'vars.sh')))
join_path(self.component_path, 'env', 'vars.sh')))
class IntelOneApiLibraryPackage(IntelOneApiPackage):
@@ -116,12 +118,12 @@ class IntelOneApiLibraryPackage(IntelOneApiPackage):
@property
def headers(self):
include_path = join_path(self.component_prefix, 'include')
include_path = join_path(self.component_path, 'include')
return find_headers('*', include_path, recursive=True)
@property
def libs(self):
lib_path = join_path(self.component_prefix, 'lib', 'intel64')
lib_path = join_path(self.component_path, 'lib', 'intel64')
lib_path = lib_path if isdir(lib_path) else dirname(lib_path)
return find_libraries('*', root=lib_path, shared=True, recursive=True)

View File

@@ -12,17 +12,14 @@
from llnl.util.filesystem import (
filter_file,
find,
find_all_headers,
find_libraries,
is_nonsymlink_exe_with_shebang,
path_contains_subdirectory,
same_path,
working_dir,
)
from llnl.util.lang import classproperty, match_predicate
from llnl.util.lang import match_predicate
from spack.directives import depends_on, extends
from spack.error import NoHeadersError, NoLibrariesError
from spack.package_base import PackageBase, run_after
@@ -77,21 +74,24 @@ def _std_args(cls):
'--no-index',
]
@classproperty
def homepage(cls):
if cls.pypi:
name = cls.pypi.split('/')[0]
@property
def homepage(self):
if self.pypi:
name = self.pypi.split('/')[0]
return 'https://pypi.org/project/' + name + '/'
@classproperty
def url(cls):
if cls.pypi:
return 'https://files.pythonhosted.org/packages/source/' + cls.pypi[0] + '/' + cls.pypi
@property
def url(self):
if self.pypi:
return (
'https://files.pythonhosted.org/packages/source/'
+ self.pypi[0] + '/' + self.pypi
)
@classproperty
def list_url(cls):
if cls.pypi:
name = cls.pypi.split('/')[0]
@property
def list_url(self):
if self.pypi:
name = self.pypi.split('/')[0]
return 'https://pypi.org/simple/' + name + '/'
@property
@@ -178,37 +178,6 @@ def install(self, spec, prefix):
with working_dir(self.build_directory):
pip(*args)
@property
def headers(self):
"""Discover header files in platlib."""
# Headers may be in either location
include = inspect.getmodule(self).include
platlib = inspect.getmodule(self).platlib
headers = find_all_headers(include) + find_all_headers(platlib)
if headers:
return headers
msg = 'Unable to locate {} headers in {} or {}'
raise NoHeadersError(msg.format(self.spec.name, include, platlib))
@property
def libs(self):
"""Discover libraries in platlib."""
# Remove py- prefix in package name
library = 'lib' + self.spec.name[3:].replace('-', '?')
root = inspect.getmodule(self).platlib
for shared in [True, False]:
libs = find_libraries(library, root, shared=shared, recursive=True)
if libs:
return libs
msg = 'Unable to recursively locate {} libraries in {}'
raise NoLibrariesError(msg.format(self.spec.name, root))
# Testing
def test(self):

View File

@@ -2,11 +2,11 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
from typing import Optional
import llnl.util.lang as lang
from spack.directives import extends
from spack.package_base import PackageBase, run_after
@@ -42,27 +42,27 @@ class RPackage(PackageBase):
extends('r')
@lang.classproperty
def homepage(cls):
if cls.cran:
return 'https://cloud.r-project.org/package=' + cls.cran
elif cls.bioc:
return 'https://bioconductor.org/packages/' + cls.bioc
@property
def homepage(self):
if self.cran:
return 'https://cloud.r-project.org/package=' + self.cran
elif self.bioc:
return 'https://bioconductor.org/packages/' + self.bioc
@lang.classproperty
def url(cls):
if cls.cran:
@property
def url(self):
if self.cran:
return (
'https://cloud.r-project.org/src/contrib/'
+ cls.cran + '_' + str(list(cls.versions)[0]) + '.tar.gz'
+ self.cran + '_' + str(list(self.versions)[0]) + '.tar.gz'
)
@lang.classproperty
def list_url(cls):
if cls.cran:
@property
def list_url(self):
if self.cran:
return (
'https://cloud.r-project.org/src/contrib/Archive/'
+ cls.cran + '/'
+ self.cran + '/'
)
@property

View File

@@ -5,7 +5,6 @@
import os
from typing import Optional
import llnl.util.lang as lang
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
@@ -42,10 +41,10 @@ class RacketPackage(PackageBase):
name = None # type: Optional[str]
parallel = True
@lang.classproperty
def homepage(cls):
if cls.pkgs:
return 'https://pkgs.racket-lang.org/package/{0}'.format(cls.name)
@property
def homepage(self):
if self.pkgs:
return 'https://pkgs.racket-lang.org/package/{0}'.format(self.name)
@property
def build_directory(self):

View File

@@ -90,10 +90,9 @@ class ROCmPackage(PackageBase):
# https://llvm.org/docs/AMDGPUUsage.html
# Possible architectures
amdgpu_targets = (
'gfx701', 'gfx801', 'gfx802', 'gfx803', 'gfx900', 'gfx900:xnack-',
'gfx906', 'gfx908', 'gfx90a',
'gfx906:xnack-', 'gfx908:xnack-', 'gfx90a:xnack-', 'gfx90a:xnack+',
'gfx1010', 'gfx1011', 'gfx1012', 'gfx1030', 'gfx1031',
'gfx701', 'gfx801', 'gfx802', 'gfx803',
'gfx900', 'gfx906', 'gfx908', 'gfx90a', 'gfx1010',
'gfx1011', 'gfx1012'
)
variant('rocm', default=False, description='Enable ROCm support')

View File

@@ -771,13 +771,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
mirrors_to_check = {
'override': remote_mirror_override
}
# If we have a remote override and we want generate pipeline using
# --check-index-only, then the override mirror needs to be added to
# the configured mirrors when bindist.update() is run, or else we
# won't fetch its index and include in our local cache.
spack.mirror.add(
'ci_pr_mirror', remote_mirror_override, cfg.default_modify_scope())
else:
spack.mirror.add(
'ci_pr_mirror', remote_mirror_override, cfg.default_modify_scope())
pipeline_artifacts_dir = artifacts_root
if not pipeline_artifacts_dir:
@@ -823,7 +819,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
user_artifacts_dir, ci_project_dir)
# Speed up staging by first fetching binary indices from all mirrors
# (including the override mirror we may have just added above).
# (including the per-PR mirror we may have just added above).
try:
bindist.binary_index.update()
except bindist.FetchCacheError as e:
@@ -857,7 +853,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
finally:
# Clean up remote mirror override if enabled
if remote_mirror_override:
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
if spack_pipeline_type != 'spack_protected_branch':
spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())
all_job_names = []
output_object = {}
@@ -1628,9 +1625,8 @@ def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
job_log_dir (str): Path into which build log should be copied
"""
try:
pkg_cls = spack.repo.path.get_pkg_class(job_spec.name)
job_pkg = pkg_cls(job_spec)
tty.debug('job package: {0.fullname}'.format(job_pkg))
job_pkg = spack.repo.get(job_spec)
tty.debug('job package: {0}'.format(job_pkg))
stage_dir = job_pkg.stage.path
tty.debug('stage dir: {0}'.format(stage_dir))
build_out_src = os.path.join(stage_dir, 'spack-build-out.txt')

View File

@@ -8,10 +8,7 @@
import argparse
import os
import re
import shlex
import sys
from textwrap import dedent
from typing import List, Tuple
import ruamel.yaml as yaml
import six
@@ -150,58 +147,6 @@ def get_command(cmd_name):
return getattr(get_module(cmd_name), pname)
class _UnquotedFlags(object):
"""Use a heuristic in `.extract()` to detect whether the user is trying to set
multiple flags like the docker ENV attribute allows (e.g. 'cflags=-Os -pipe').
If the heuristic finds a match (which can be checked with `__bool__()`), a warning
message explaining how to quote multiple flags correctly can be generated with
`.report()`.
"""
flags_arg_pattern = re.compile(
r'^({0})=([^\'"].*)$'.format(
'|'.join(spack.spec.FlagMap.valid_compiler_flags()),
))
def __init__(self, all_unquoted_flag_pairs):
# type: (List[Tuple[re.Match, str]]) -> None
self._flag_pairs = all_unquoted_flag_pairs
def __bool__(self):
# type: () -> bool
return bool(self._flag_pairs)
@classmethod
def extract(cls, sargs):
# type: (str) -> _UnquotedFlags
all_unquoted_flag_pairs = [] # type: List[Tuple[re.Match, str]]
prev_flags_arg = None
for arg in shlex.split(sargs):
if prev_flags_arg is not None:
all_unquoted_flag_pairs.append((prev_flags_arg, arg))
prev_flags_arg = cls.flags_arg_pattern.match(arg)
return cls(all_unquoted_flag_pairs)
def report(self):
# type: () -> str
single_errors = [
'({0}) {1} {2} => {3}'.format(
i + 1, match.group(0), next_arg,
'{0}="{1} {2}"'.format(match.group(1), match.group(2), next_arg),
)
for i, (match, next_arg) in enumerate(self._flag_pairs)
]
return dedent("""\
Some compiler or linker flags were provided without quoting their arguments,
which now causes spack to try to parse the *next* argument as a spec component
such as a variant instead of an additional compiler or linker flag. If the
intent was to set multiple flags, try quoting them together as described below.
Possible flag quotation errors (with the correctly-quoted version after the =>):
{0}""").format('\n'.join(single_errors))
def parse_specs(args, **kwargs):
"""Convenience function for parsing arguments from specs. Handles common
exceptions and dies if there are errors.
@@ -212,28 +157,15 @@ def parse_specs(args, **kwargs):
sargs = args
if not isinstance(args, six.string_types):
sargs = ' '.join(args)
unquoted_flags = _UnquotedFlags.extract(sargs)
sargs = ' '.join(spack.util.string.quote(args))
specs = spack.spec.parse(sargs)
for spec in specs:
if concretize:
spec.concretize(tests=tests) # implies normalize
elif normalize:
spec.normalize(tests=tests)
try:
specs = spack.spec.parse(sargs)
for spec in specs:
if concretize:
spec.concretize(tests=tests) # implies normalize
elif normalize:
spec.normalize(tests=tests)
return specs
except spack.error.SpecError as e:
msg = e.message
if e.long_message:
msg += e.long_message
if unquoted_flags:
msg += '\n\n'
msg += unquoted_flags.report()
raise spack.error.SpackError(msg)
return specs
def matching_spec_from_env(spec):

View File

@@ -0,0 +1,116 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import llnl.util.tty as tty
import spack.analyzers
import spack.build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.fetch_strategy
import spack.monitor
import spack.paths
import spack.report
description = "run analyzers on installed packages"
section = "analysis"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='analyze_command')
sp.add_parser('list-analyzers',
description="list available analyzers",
help="show list of analyzers that are available to run.")
# This adds the monitor group to the subparser
spack.monitor.get_monitor_group(subparser)
# Run Parser
run_parser = sp.add_parser('run', description="run an analyzer",
help="provide the name of the analyzer to run.")
run_parser.add_argument(
'--overwrite', action='store_true',
help="re-analyze even if the output file already exists.")
run_parser.add_argument(
'-p', '--path', default=None,
dest='path',
help="write output to a different directory than ~/.spack/analyzers")
run_parser.add_argument(
'-a', '--analyzers', default=None,
dest="analyzers", action="append",
help="add an analyzer (defaults to all available)")
arguments.add_common_arguments(run_parser, ['spec'])
def analyze_spec(spec, analyzers=None, outdir=None, monitor=None, overwrite=False):
"""
Do an analysis for a spec, optionally adding monitoring.
We also allow the user to specify a custom output directory.
analyze_spec(spec, args.analyzers, args.outdir, monitor)
Args:
spec (spack.spec.Spec): spec object of installed package
analyzers (list): list of analyzer (keys) to run
monitor (spack.monitor.SpackMonitorClient): a monitor client
overwrite (bool): overwrite result if already exists
"""
analyzers = analyzers or list(spack.analyzers.analyzer_types.keys())
# Load the build environment from the spec install directory, and send
# the spec to the monitor if it's not known
if monitor:
monitor.load_build_environment(spec)
monitor.new_configuration([spec])
for name in analyzers:
# Instantiate the analyzer with the spec and outdir
analyzer = spack.analyzers.get_analyzer(name)(spec, outdir)
# Run the analyzer to get a json result - results are returned as
# a dictionary with a key corresponding to the analyzer type, so
# we can just update the data
result = analyzer.run()
# Send the result. We do them separately because:
# 1. each analyzer might have differently organized output
# 2. the size of a result can be large
analyzer.save_result(result, overwrite)
def analyze(parser, args, **kwargs):
# If the user wants to list analyzers, do so and exit
if args.analyze_command == "list-analyzers":
spack.analyzers.list_all()
sys.exit(0)
# handle active environment, if any
env = ev.active_environment()
# Get an disambiguate spec (we should only have one)
specs = spack.cmd.parse_specs(args.spec)
if not specs:
tty.die("You must provide one or more specs to analyze.")
spec = spack.cmd.disambiguate_spec(specs[0], env)
# The user wants to monitor builds using github.com/spack/spack-monitor
# It is instantianted once here, and then available at spack.monitor.cli
monitor = None
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
)
# Run the analysis
analyze_spec(spec, args.analyzers, args.path, monitor, args.overwrite)

View File

@@ -99,8 +99,8 @@ def blame(parser, args):
blame_file = path
if not blame_file:
pkg_cls = spack.repo.path.get_pkg_class(args.package_or_file)
blame_file = pkg_cls.module.__file__.rstrip('c') # .pyc -> .py
pkg = spack.repo.get(args.package_or_file)
blame_file = pkg.module.__file__.rstrip('c') # .pyc -> .py
# get git blame for the package
with working_dir(spack.paths.prefix):

View File

@@ -379,9 +379,7 @@ def _remove(args):
def _mirror(args):
mirror_dir = spack.util.path.canonicalize_path(
os.path.join(args.root_dir, LOCAL_MIRROR_DIR)
)
mirror_dir = os.path.join(args.root_dir, LOCAL_MIRROR_DIR)
# TODO: Here we are adding gnuconfig manually, but this can be fixed
# TODO: as soon as we have an option to add to a mirror all the possible

View File

@@ -12,12 +12,11 @@
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.repo
import spack.spec
import spack.stage
import spack.util.crypto
from spack.package_base import preferred_version
from spack.util.naming import valid_fully_qualified_module_name
from spack.version import VersionBase, ver
from spack.version import Version, ver
description = "checksum available versions of a package"
section = "packaging"
@@ -55,8 +54,7 @@ def checksum(parser, args):
tty.die("`spack checksum` accepts package names, not URLs.")
# Get the package we're going to generate checksums for
pkg_cls = spack.repo.path.get_pkg_class(args.package)
pkg = pkg_cls(spack.spec.Spec(args.package))
pkg = spack.repo.get(args.package)
url_dict = {}
versions = args.versions
@@ -67,7 +65,7 @@ def checksum(parser, args):
remote_versions = None
for version in versions:
version = ver(version)
if not isinstance(version, VersionBase):
if not isinstance(version, Version):
tty.die("Cannot generate checksums for version lists or "
"version ranges. Use unambiguous versions.")
url = pkg.find_valid_url_for_version(version)

View File

@@ -58,21 +58,6 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['specs'])
def remove_python_cache():
for directory in [lib_path, var_path]:
for root, dirs, files in os.walk(directory):
for f in files:
if f.endswith('.pyc') or f.endswith('.pyo'):
fname = os.path.join(root, f)
tty.debug('Removing {0}'.format(fname))
os.remove(fname)
for d in dirs:
if d == '__pycache__':
dname = os.path.join(root, d)
tty.debug('Removing {0}'.format(dname))
shutil.rmtree(dname)
def clean(parser, args):
# If nothing was set, activate the default
if not any([args.specs, args.stage, args.downloads, args.failures,
@@ -85,7 +70,8 @@ def clean(parser, args):
for spec in specs:
msg = 'Cleaning build stage [{0}]'
tty.msg(msg.format(spec.short_spec))
spec.package.do_clean()
package = spack.repo.get(spec)
package.do_clean()
if args.stage:
tty.msg('Removing all temporary build stages')
@@ -109,7 +95,18 @@ def clean(parser, args):
if args.python_cache:
tty.msg('Removing python cache files')
remove_python_cache()
for directory in [lib_path, var_path]:
for root, dirs, files in os.walk(directory):
for f in files:
if f.endswith('.pyc') or f.endswith('.pyo'):
fname = os.path.join(root, f)
tty.debug('Removing {0}'.format(fname))
os.remove(fname)
for d in dirs:
if d == '__pycache__':
dname = os.path.join(root, d)
tty.debug('Removing {0}'.format(dname))
shutil.rmtree(dname)
if args.bootstrap:
bootstrap_prefix = spack.util.path.canonicalize_path(

View File

@@ -9,6 +9,7 @@
import spack.container
import spack.container.images
import spack.monitor
description = ("creates recipes to build images for different"
" container runtimes")
@@ -17,6 +18,7 @@
def setup_parser(subparser):
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
subparser.add_argument(
'--list-os', action='store_true', default=False,
help='list all the OS that can be used in the bootstrap phase and exit'
@@ -44,5 +46,14 @@ def containerize(parser, args):
raise ValueError(msg.format(config_file))
config = spack.container.validate(config_file)
# If we have a monitor request, add monitor metadata to config
if args.use_monitor:
config['spack']['monitor'] = {
"host": args.monitor_host,
"keep_going": args.monitor_keep_going,
"prefix": args.monitor_prefix,
"tags": args.monitor_tags
}
recipe = spack.container.recipe(config, last_phase=args.last_stage)
print(recipe)

View File

@@ -826,7 +826,7 @@ def get_versions(args, name):
spack.util.url.require_url_format(args.url)
if args.url.startswith('file://'):
valid_url = False # No point in spidering these
except (ValueError, TypeError):
except ValueError:
valid_url = False
if args.url is not None and args.template != 'bundle' and valid_url:

View File

@@ -39,9 +39,9 @@ def inverted_dependencies():
actual dependents.
"""
dag = {}
for pkg_cls in spack.repo.path.all_package_classes():
dag.setdefault(pkg_cls.name, set())
for dep in pkg_cls.dependencies:
for pkg in spack.repo.path.all_packages():
dag.setdefault(pkg.name, set())
for dep in pkg.dependencies:
deps = [dep]
# expand virtuals if necessary
@@ -49,7 +49,7 @@ def inverted_dependencies():
deps += [s.name for s in spack.repo.path.providers_for(dep)]
for d in deps:
dag.setdefault(d, set()).add(pkg_cls.name)
dag.setdefault(d, set()).add(pkg.name)
return dag

View File

@@ -87,7 +87,9 @@ def dev_build(self, args):
# Forces the build to run out of the source directory.
spec.constrain('dev_path=%s' % source_path)
spec.concretize()
package = spack.repo.get(spec)
if spec.installed:
tty.error("Already installed in %s" % spec.prefix)
@@ -107,7 +109,7 @@ def dev_build(self, args):
elif args.test == 'root':
tests = [spec.name for spec in specs]
spec.package.do_install(
package.do_install(
tests=tests,
make_jobs=args.jobs,
keep_prefix=args.keep_prefix,
@@ -120,5 +122,5 @@ def dev_build(self, args):
# drop into the build environment of the package?
if args.shell is not None:
spack.build_environment.setup_package(spec.package, dirty=False)
spack.build_environment.setup_package(package, dirty=False)
os.execvp(args.shell, [args.shell])

View File

@@ -54,9 +54,8 @@ def develop(parser, args):
tty.msg(msg)
continue
spec = spack.spec.Spec(entry['spec'])
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg_cls(spec).stage.steal_source(abspath)
stage = spack.spec.Spec(entry['spec']).package.stage
stage.steal_source(abspath)
if not env.dev_specs:
tty.warn("No develop specs to download")

View File

@@ -559,11 +559,11 @@ def env_depfile(args):
target_prefix = args.make_target_prefix
def get_target(name):
# The `all` and `clean` targets are phony. It doesn't make sense to
# The `all`, `fetch` and `clean` targets are phony. It doesn't make sense to
# have /abs/path/to/env/metadir/{all,clean} targets. But it *does* make
# sense to have a prefix like `env/all`, `env/clean` when they are
# sense to have a prefix like `env/all`, `env/fetch`, `env/clean` when they are
# supposed to be included
if name in ('all', 'clean') and os.path.isabs(target_prefix):
if name in ('all', 'fetch-all', 'clean') and os.path.isabs(target_prefix):
return name
else:
return os.path.join(target_prefix, name)
@@ -571,6 +571,9 @@ def get_target(name):
def get_install_target(name):
return os.path.join(target_prefix, '.install', name)
def get_fetch_target(name):
return os.path.join(target_prefix, '.fetch', name)
for _, spec in env.concretized_specs():
for s in spec.traverse(root=True):
hash_to_spec[s.dag_hash()] = s
@@ -585,30 +588,46 @@ def get_install_target(name):
# All package install targets, not just roots.
all_install_targets = [get_install_target(h) for h in hash_to_spec.keys()]
# Fetch targets for all packages in the environment, not just roots.
all_fetch_targets = [get_fetch_target(h) for h in hash_to_spec.keys()]
buf = six.StringIO()
buf.write("""SPACK ?= spack
.PHONY: {} {}
.PHONY: {} {} {}
{}: {}
{}: {}
{}: {}
\t@touch $@
{}: {}
\t@touch $@
{}:
\t@mkdir -p {}
\t@mkdir -p {} {}
{}: | {}
\t$(info Fetching $(SPEC))
\t$(SPACK) -e '{}' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@
{}: {}
\t$(info Installing $(SPEC))
\t{}$(SPACK) -e '{}' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package \
--no-add /$(notdir $@) && touch $@
""".format(get_target('all'), get_target('clean'),
""".format(get_target('all'), get_target('fetch-all'), get_target('clean'),
get_target('all'), get_target('env'),
get_target('fetch-all'), get_target('fetch'),
get_target('env'), ' '.join(root_install_targets),
get_target('dirs'), get_target('.install'),
get_target('.install/%'), get_target('dirs'),
get_target('fetch'), ' '.join(all_fetch_targets),
get_target('dirs'), get_target('.fetch'), get_target('.install'),
get_target('.fetch/%'), get_target('dirs'),
env.path,
get_target('.install/%'), get_target('.fetch/%'),
'+' if args.jobserver else '', env.path))
# Targets are of the form <prefix>/<name>: [<prefix>/<depname>]...,
@@ -638,9 +657,11 @@ def get_install_target(name):
# --make-target-prefix can be any existing directory we do not control,
# including empty string (which means deleting the containing folder
# would delete the folder with the Makefile)
buf.write("{}:\n\trm -f -- {} {}\n".format(
buf.write("{}:\n\trm -f -- {} {} {} {}\n".format(
get_target('clean'),
get_target('env'),
get_target('fetch'),
' '.join(all_fetch_targets),
' '.join(all_install_targets)))
makefile = buf.getvalue()

View File

@@ -52,8 +52,8 @@ def extensions(parser, args):
extendable_pkgs = []
for name in spack.repo.all_package_names():
pkg_cls = spack.repo.path.get_pkg_class(name)
if pkg_cls.extendable:
pkg = spack.repo.get(name)
if pkg.extendable:
extendable_pkgs.append(name)
colify(extendable_pkgs, indent=4)
@@ -64,12 +64,12 @@ def extensions(parser, args):
if len(spec) > 1:
tty.die("Can only list extensions for one package.")
if not spec[0].package.extendable:
tty.die("%s is not an extendable package." % spec[0].name)
env = ev.active_environment()
spec = cmd.disambiguate_spec(spec[0], env)
if not spec.package.extendable:
tty.die("%s is not an extendable package." % spec[0].name)
if not spec.package.extendable:
tty.die("%s does not have extensions." % spec.short_spec)

View File

@@ -5,7 +5,6 @@
from __future__ import print_function
import argparse
import errno
import os
import sys
@@ -94,21 +93,6 @@ def external_find(args):
# It's fine to not find any manifest file if we are doing the
# search implicitly (i.e. as part of 'spack external find')
pass
except Exception as e:
# For most exceptions, just print a warning and continue.
# Note that KeyboardInterrupt does not subclass Exception
# (so CTRL-C will terminate the program as expected).
skip_msg = ("Skipping manifest and continuing with other external "
"checks")
if ((isinstance(e, IOError) or isinstance(e, OSError)) and
e.errno in [errno.EPERM, errno.EACCES]):
# The manifest file does not have sufficient permissions enabled:
# print a warning and keep going
tty.warn("Unable to read manifest due to insufficient "
"permissions.", skip_msg)
else:
tty.warn("Unable to read manifest, unexpected error: {0}"
.format(str(e)), skip_msg)
# If the user didn't specify anything, search for build tools by default
if not args.tags and not args.all and not args.packages:
@@ -119,37 +103,34 @@ def external_find(args):
args.tags = []
# Construct the list of possible packages to be detected
pkg_cls_to_check = []
packages_to_check = []
# Add the packages that have been required explicitly
if args.packages:
pkg_cls_to_check = [
spack.repo.path.get_pkg_class(pkg) for pkg in args.packages
]
packages_to_check = list(spack.repo.get(pkg) for pkg in args.packages)
if args.tags:
allowed = set(spack.repo.path.packages_with_tags(*args.tags))
pkg_cls_to_check = [x for x in pkg_cls_to_check if x.name in allowed]
packages_to_check = [x for x in packages_to_check if x in allowed]
if args.tags and not pkg_cls_to_check:
if args.tags and not packages_to_check:
# If we arrived here we didn't have any explicit package passed
# as argument, which means to search all packages.
# Since tags are cached it's much faster to construct what we need
# to search directly, rather than filtering after the fact
pkg_cls_to_check = [
spack.repo.path.get_pkg_class(pkg_name)
for tag in args.tags
for pkg_name in spack.repo.path.packages_with_tags(tag)
packages_to_check = [
spack.repo.get(pkg) for tag in args.tags for pkg in
spack.repo.path.packages_with_tags(tag)
]
pkg_cls_to_check = list(set(pkg_cls_to_check))
packages_to_check = list(set(packages_to_check))
# If the list of packages is empty, search for every possible package
if not args.tags and not pkg_cls_to_check:
pkg_cls_to_check = list(spack.repo.path.all_package_classes())
if not args.tags and not packages_to_check:
packages_to_check = spack.repo.path.all_packages()
detected_packages = spack.detection.by_executable(
pkg_cls_to_check, path_hints=args.path)
packages_to_check, path_hints=args.path)
detected_packages.update(spack.detection.by_library(
pkg_cls_to_check, path_hints=args.path))
packages_to_check, path_hints=args.path))
new_entries = spack.detection.update_configuration(
detected_packages, scope=args.scope, buildable=not args.not_buildable
@@ -196,10 +177,7 @@ def _collect_and_consume_cray_manifest_files(
for directory in manifest_dirs:
for fname in os.listdir(directory):
if fname.endswith('.json'):
fpath = os.path.join(directory, fname)
tty.debug("Adding manifest file: {0}".format(fpath))
manifest_files.append(os.path.join(directory, fpath))
manifest_files.append(os.path.join(directory, fname))
if not manifest_files:
raise NoManifestFileError(
@@ -207,7 +185,6 @@ def _collect_and_consume_cray_manifest_files(
.format(cray_manifest.default_path))
for path in manifest_files:
tty.debug("Reading manifest file: " + path)
try:
cray_manifest.read(path, not dry_run)
except (spack.compilers.UnknownCompilerError, spack.error.SpackError) as e:
@@ -220,7 +197,7 @@ def _collect_and_consume_cray_manifest_files(
def external_list(args):
# Trigger a read of all packages, might take a long time.
list(spack.repo.path.all_package_classes())
list(spack.repo.path.all_packages())
# Print all the detectable packages
tty.msg("Detectable packages per repository")
for namespace, pkgs in sorted(spack.package_base.detectable_packages.items()):

View File

@@ -292,9 +292,10 @@ def print_tests(pkg):
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
pkg = v_spec.package
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
names.append('{0}.test'.format(pkg_cls.name.lower()))
names.append('{0}.test'.format(pkg.name.lower()))
except spack.repo.UnknownPackageError:
pass
@@ -385,9 +386,7 @@ def print_virtuals(pkg):
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
pkg = spack.repo.get(args.package)
# Output core package information
header = section_title(

View File

@@ -17,6 +17,7 @@
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.fetch_strategy
import spack.monitor
import spack.paths
import spack.report
from spack.error import SpackError
@@ -104,6 +105,8 @@ def setup_parser(subparser):
'--cache-only', action='store_true', dest='cache_only', default=False,
help="only install package from binary mirrors")
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
subparser.add_argument(
'--include-build-deps', action='store_true', dest='include_build_deps',
default=False, help="""include build deps when installing from cache,
@@ -289,6 +292,15 @@ def install(parser, args, **kwargs):
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package_base.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
@@ -329,6 +341,10 @@ def get_tests(specs):
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(**kwargs)
@@ -374,6 +390,10 @@ def get_tests(specs):
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
# Tell spack monitor about it
if args.use_monitor and abstract_specs:
monitor.failed_concretization(abstract_specs)
raise
# 2. Concrete specs from yaml files
@@ -434,4 +454,17 @@ def get_tests(specs):
# overwrite all concrete explicit specs from this build
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
# Update install_args with the monitor args, needed for build task
kwargs.update({
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
# If we are using the monitor, we send configs. and create build
# The dag_hash is the main package id
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))

View File

@@ -84,9 +84,9 @@ def match(p, f):
if f.match(p):
return True
pkg_cls = spack.repo.path.get_pkg_class(p)
if pkg_cls.__doc__:
return f.match(pkg_cls.__doc__)
pkg = spack.repo.get(p)
if pkg.__doc__:
return f.match(pkg.__doc__)
return False
else:
def match(p, f):
@@ -133,7 +133,7 @@ def get_dependencies(pkg):
@formatter
def version_json(pkg_names, out):
"""Print all packages with their latest versions."""
pkg_classes = [spack.repo.path.get_pkg_class(name) for name in pkg_names]
pkgs = [spack.repo.get(name) for name in pkg_names]
out.write('[\n')
@@ -147,14 +147,14 @@ def version_json(pkg_names, out):
' "maintainers": {5},\n'
' "dependencies": {6}'
'}}'.format(
pkg_cls.name,
VersionList(pkg_cls.versions).preferred(),
json.dumps([str(v) for v in reversed(sorted(pkg_cls.versions))]),
pkg_cls.homepage,
github_url(pkg_cls),
json.dumps(pkg_cls.maintainers),
json.dumps(get_dependencies(pkg_cls))
) for pkg_cls in pkg_classes
pkg.name,
VersionList(pkg.versions).preferred(),
json.dumps([str(v) for v in reversed(sorted(pkg.versions))]),
pkg.homepage,
github_url(pkg),
json.dumps(pkg.maintainers),
json.dumps(get_dependencies(pkg))
) for pkg in pkgs
])
out.write(pkg_latest)
# important: no trailing comma in JSON arrays
@@ -172,7 +172,7 @@ def html(pkg_names, out):
"""
# Read in all packages
pkg_classes = [spack.repo.path.get_pkg_class(name) for name in pkg_names]
pkgs = [spack.repo.get(name) for name in pkg_names]
# Start at 2 because the title of the page from Sphinx is id1.
span_id = 2
@@ -189,7 +189,7 @@ def head(n, span_id, title, anchor=None):
# Start with the number of packages, skipping the title and intro
# blurb, which we maintain in the RST file.
out.write('<p>\n')
out.write('Spack currently has %d mainline packages:\n' % len(pkg_classes))
out.write('Spack currently has %d mainline packages:\n' % len(pkgs))
out.write('</p>\n')
# Table of links to all packages
@@ -209,9 +209,9 @@ def head(n, span_id, title, anchor=None):
out.write('<hr class="docutils"/>\n')
# Output some text for each package.
for pkg_cls in pkg_classes:
out.write('<div class="section" id="%s">\n' % pkg_cls.name)
head(2, span_id, pkg_cls.name)
for pkg in pkgs:
out.write('<div class="section" id="%s">\n' % pkg.name)
head(2, span_id, pkg.name)
span_id += 1
out.write('<dl class="docutils">\n')
@@ -219,10 +219,10 @@ def head(n, span_id, title, anchor=None):
out.write('<dt>Homepage:</dt>\n')
out.write('<dd><ul class="first last simple">\n')
if pkg_cls.homepage:
if pkg.homepage:
out.write(('<li>'
'<a class="reference external" href="%s">%s</a>'
'</li>\n') % (pkg_cls.homepage, escape(pkg_cls.homepage, True)))
'</li>\n') % (pkg.homepage, escape(pkg.homepage, True)))
else:
out.write('No homepage\n')
out.write('</ul></dd>\n')
@@ -231,19 +231,19 @@ def head(n, span_id, title, anchor=None):
out.write('<dd><ul class="first last simple">\n')
out.write(('<li>'
'<a class="reference external" href="%s">%s/package.py</a>'
'</li>\n') % (github_url(pkg_cls), pkg_cls.name))
'</li>\n') % (github_url(pkg), pkg.name))
out.write('</ul></dd>\n')
if pkg_cls.versions:
if pkg.versions:
out.write('<dt>Versions:</dt>\n')
out.write('<dd>\n')
out.write(', '.join(
str(v) for v in reversed(sorted(pkg_cls.versions))))
str(v) for v in reversed(sorted(pkg.versions))))
out.write('\n')
out.write('</dd>\n')
for deptype in spack.dependency.all_deptypes:
deps = pkg_cls.dependencies_of_type(deptype)
deps = pkg.dependencies_of_type(deptype)
if deps:
out.write('<dt>%s Dependencies:</dt>\n' % deptype.capitalize())
out.write('<dd>\n')
@@ -256,7 +256,7 @@ def head(n, span_id, title, anchor=None):
out.write('<dt>Description:</dt>\n')
out.write('<dd>\n')
out.write(escape(pkg_cls.format_doc(indent=2), True))
out.write(escape(pkg.format_doc(indent=2), True))
out.write('\n')
out.write('</dd>\n')
out.write('</dl>\n')

View File

@@ -221,7 +221,7 @@ def _read_specs_from_file(filename):
for i, string in enumerate(stream):
try:
s = Spec(string)
spack.repo.path.get_pkg_class(s.name)
s.package
specs.append(s)
except SpackError as e:
tty.debug(e)

View File

@@ -0,0 +1,33 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.monitor
description = "interact with a monitor server"
section = "analysis"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='monitor_command')
# This adds the monitor group to the subparser
spack.monitor.get_monitor_group(subparser)
# Spack Monitor Uploads
monitor_parser = sp.add_parser('upload', description="upload to spack monitor")
monitor_parser.add_argument("upload_dir", help="directory root to upload")
def monitor(parser, args, **kwargs):
if args.monitor_command == "upload":
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
)
# Upload the directory
monitor.upload_local_save(args.upload_dir)

View File

@@ -31,4 +31,5 @@ def patch(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=True)
for spec in specs:
spec.package.do_patch()
package = spack.repo.get(spec)
package.do_patch()

View File

@@ -50,7 +50,7 @@ def _show_patch(sha256):
owner = rec['owner']
if 'relative_path' in rec:
pkg_dir = spack.repo.path.get_pkg_class(owner).package_dir
pkg_dir = spack.repo.get(owner).package_dir
path = os.path.join(pkg_dir, rec['relative_path'])
print(" path: %s" % path)
else:

View File

@@ -24,4 +24,5 @@ def restage(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=True)
for spec in specs:
spec.package.do_restage()
package = spack.repo.get(spec)
package.do_restage()

View File

@@ -80,8 +80,7 @@ def spec(parser, args):
# Use command line specified specs, otherwise try to use environment specs.
if args.specs:
input_specs = spack.cmd.parse_specs(args.specs)
concretized_specs = spack.cmd.parse_specs(args.specs, concretize=True)
specs = list(zip(input_specs, concretized_specs))
specs = [(s, s.concretized()) for s in input_specs]
else:
env = ev.active_environment()
if env:

View File

@@ -58,7 +58,8 @@ def stage(parser, args):
for spec in specs:
spec = spack.cmd.matching_spec_from_env(spec)
package = spack.repo.get(spec)
if custom_path:
spec.package.path = custom_path
spec.package.do_stage()
tty.msg("Staged {0} in {1}".format(spec.package.name, spec.package.stage.path))
package.path = custom_path
package.do_stage()
tty.msg("Staged {0} in {1}".format(package.name, package.stage.path))

View File

@@ -94,16 +94,16 @@ def changed_files(base="develop", untracked=True, all_files=False, root=None):
git = which("git", required=True)
# ensure base is in the repo
base_sha = git("rev-parse", "--quiet", "--verify", "--revs-only", base,
fail_on_error=False, output=str)
git("show-ref", "--verify", "--quiet", "refs/heads/%s" % base,
fail_on_error=False)
if git.returncode != 0:
tty.die(
"This repository does not have a '%s' revision." % base,
"This repository does not have a '%s' branch." % base,
"spack style needs this branch to determine which files changed.",
"Ensure that '%s' exists, or specify files to check explicitly." % base
)
range = "{0}...".format(base_sha.strip())
range = "{0}...".format(base)
git_args = [
# Add changed files committed since branching off of develop

View File

@@ -14,7 +14,6 @@
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
import spack.util.crypto as crypto
from spack.url import (
UndetectableNameError,
@@ -148,13 +147,13 @@ def url_list(args):
urls = set()
# Gather set of URLs from all packages
for pkg_cls in spack.repo.path.all_package_classes():
url = getattr(pkg_cls, 'url', None)
urls = url_list_parsing(args, urls, url, pkg_cls)
for pkg in spack.repo.path.all_packages():
url = getattr(pkg, 'url', None)
urls = url_list_parsing(args, urls, url, pkg)
for params in pkg_cls.versions.values():
for params in pkg.versions.values():
url = params.get('url', None)
urls = url_list_parsing(args, urls, url, pkg_cls)
urls = url_list_parsing(args, urls, url, pkg)
# Print URLs
for url in sorted(urls):
@@ -185,9 +184,8 @@ def url_summary(args):
tty.msg('Generating a summary of URL parsing in Spack...')
# Loop through all packages
for pkg_cls in spack.repo.path.all_package_classes():
for pkg in spack.repo.path.all_packages():
urls = set()
pkg = pkg_cls(spack.spec.Spec(pkg_cls.name))
url = getattr(pkg, 'url', None)
if url:
@@ -320,20 +318,19 @@ def add(self, pkg_name, fetcher):
version_stats = UrlStats()
resource_stats = UrlStats()
for pkg_cls in spack.repo.path.all_package_classes():
for pkg in spack.repo.path.all_packages():
npkgs += 1
for v in pkg_cls.versions:
for v in pkg.versions:
try:
pkg = pkg_cls(spack.spec.Spec(pkg_cls.name))
fetcher = fs.for_package_version(pkg, v)
except (fs.InvalidArgsError, fs.FetcherConflict):
continue
version_stats.add(pkg_cls.name, fetcher)
version_stats.add(pkg.name, fetcher)
for _, resources in pkg_cls.resources.items():
for _, resources in pkg.resources.items():
for resource in resources:
resource_stats.add(pkg_cls.name, resource.fetcher)
resource_stats.add(pkg.name, resource.fetcher)
# print a nice summary table
tty.msg("URL stats for %d packages:" % npkgs)
@@ -393,8 +390,8 @@ def print_stat(indent, name, stat_name=None):
tty.msg("Found %d issues." % total_issues)
for issue_type, pkgs in issues.items():
tty.msg("Package URLs with %s" % issue_type)
for pkg_cls, pkg_issues in pkgs.items():
color.cprint(" @*C{%s}" % pkg_cls)
for pkg, pkg_issues in pkgs.items():
color.cprint(" @*C{%s}" % pkg)
for issue in pkg_issues:
print(" %s" % issue)

View File

@@ -12,7 +12,6 @@
import spack.cmd.common.arguments as arguments
import spack.repo
import spack.spec
from spack.version import infinity_versions, ver
description = "list available versions of a package"
@@ -40,9 +39,7 @@ def setup_parser(subparser):
def versions(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
pkg = spack.repo.get(args.package)
safe_versions = pkg.versions

View File

@@ -252,13 +252,6 @@ def find_new_compilers(path_hints=None, scope=None):
merged configuration.
"""
compilers = find_compilers(path_hints)
return select_new_compilers(compilers, scope)
def select_new_compilers(compilers, scope=None):
"""Given a list of compilers, remove those that are already defined in
the configuration.
"""
compilers_not_in_config = []
for c in compilers:
arch_spec = spack.spec.ArchSpec((None, c.operating_system, c.target))

View File

@@ -81,14 +81,6 @@ def cxx11_flag(self):
def cxx14_flag(self):
return "-std=c++14"
@property
def cxx17_flag(self):
return "-std=c++17"
@property
def cxx20_flag(self):
return "-std=c++20"
@property
def c99_flag(self):
return "-std=c99"

View File

@@ -110,7 +110,7 @@
#: metavar to use for commands that accept scopes
#: this is shorter and more readable than listing all choices
scopes_metavar = '{defaults,system,site,user}[/PLATFORM] or env:ENVIRONMENT'
scopes_metavar = '{defaults,system,site,user}[/PLATFORM]'
#: Base name for the (internal) overrides scope.
overrides_base_name = 'overrides-'

View File

@@ -171,15 +171,34 @@ def strip(self):
def paths(self):
"""Important paths in the image"""
Paths = collections.namedtuple('Paths', [
'environment', 'store', 'hidden_view', 'view'
'environment', 'store', 'view'
])
return Paths(
environment='/opt/spack-environment',
store='/opt/software',
hidden_view='/opt/._view',
view='/opt/view'
)
@tengine.context_property
def monitor(self):
"""Enable using spack monitor during build."""
Monitor = collections.namedtuple('Monitor', [
'enabled', 'host', 'prefix', 'keep_going', 'tags'
])
monitor = self.config.get("monitor")
# If we don't have a monitor group, cut out early.
if not monitor:
return Monitor(False, None, None, None, None)
return Monitor(
enabled=True,
host=monitor.get('host'),
prefix=monitor.get('prefix'),
keep_going=monitor.get("keep_going"),
tags=monitor.get('tags')
)
@tengine.context_property
def manifest(self):
"""The spack.yaml file that should be used in the image"""
@@ -188,6 +207,8 @@ def manifest(self):
# Copy in the part of spack.yaml prescribed in the configuration file
manifest = copy.deepcopy(self.config)
manifest.pop('container')
if "monitor" in manifest:
manifest.pop("monitor")
# Ensure that a few paths are where they need to be
manifest.setdefault('config', syaml.syaml_dict())

View File

@@ -20,7 +20,6 @@
compiler_name_translation = {
'nvidia': 'nvhpc',
'rocm': 'rocmcc',
}
@@ -39,6 +38,10 @@ def translated_compiler_name(manifest_compiler_name):
elif manifest_compiler_name in spack.compilers.supported_compilers():
return manifest_compiler_name
else:
# Try to fail quickly. This can occur in two cases: (1) the compiler
# definition (2) a spec can specify a compiler that doesn't exist; the
# first will be caught when creating compiler definition. The second
# will result in Specs with associated undefined compilers.
raise spack.compilers.UnknownCompilerError(
"Manifest parsing - unknown compiler: {0}"
.format(manifest_compiler_name))
@@ -86,13 +89,13 @@ def spec_from_entry(entry):
arch=arch_str
)
pkg_cls = spack.repo.path.get_pkg_class(entry['name'])
package = spack.repo.get(entry['name'])
if 'parameters' in entry:
variant_strs = list()
for name, value in entry['parameters'].items():
# TODO: also ensure that the variant value is valid?
if not (name in pkg_cls.variants):
if not (name in package.variants):
tty.debug("Omitting variant {0} for entry {1}/{2}"
.format(name, entry['name'], entry['hash'][:7]))
continue
@@ -182,8 +185,6 @@ def read(path, apply_updates):
tty.debug("{0}: {1} compilers read from manifest".format(
path,
str(len(compilers))))
# Filter out the compilers that already appear in the configuration
compilers = spack.compilers.select_new_compilers(compilers)
if apply_updates and compilers:
spack.compilers.add_compilers_to_config(
compilers, init_config=False)

View File

@@ -220,7 +220,7 @@ def by_executable(packages_to_check, path_hints=None):
searching by path.
Args:
packages_to_check (list): list of package classes to be detected
packages_to_check (list): list of packages to be detected
path_hints (list): list of paths to be searched. If None the list will be
constructed based on the PATH environment variable.
"""
@@ -228,7 +228,7 @@ def by_executable(packages_to_check, path_hints=None):
exe_pattern_to_pkgs = collections.defaultdict(list)
for pkg in packages_to_check:
if hasattr(pkg, 'executables'):
for exe in pkg.platform_executables():
for exe in pkg.platform_executables:
exe_pattern_to_pkgs[exe].append(pkg)
# Add Windows specific, package related paths to the search paths
path_hints.extend(compute_windows_program_path_for_package(pkg))

View File

@@ -46,7 +46,7 @@ class OpenMpi(Package):
from spack.dependency import Dependency, canonical_deptype, default_deptype
from spack.fetch_strategy import from_kwargs
from spack.resource import Resource
from spack.version import GitVersion, Version, VersionChecksumError, VersionLookupError
from spack.version import Version, VersionChecksumError
__all__ = ['DirectiveError', 'DirectiveMeta', 'version', 'conflicts', 'depends_on',
'extends', 'provides', 'patch', 'variant', 'resource']
@@ -330,17 +330,7 @@ def _execute_version(pkg):
kwargs['checksum'] = checksum
# Store kwargs for the package to later with a fetch_strategy.
version = Version(ver)
if isinstance(version, GitVersion):
if not hasattr(pkg, 'git') and 'git' not in kwargs:
msg = "Spack version directives cannot include git hashes fetched from"
msg += " URLs. Error in package '%s'\n" % pkg.name
msg += " version('%s', " % version.string
msg += ', '.join("%s='%s'" % (argname, value)
for argname, value in kwargs.items())
msg += ")"
raise VersionLookupError(msg)
pkg.versions[version] = kwargs
pkg.versions[Version(ver)] = kwargs
return _execute_version

View File

@@ -1113,13 +1113,8 @@ def develop(self, spec, path, clone=False):
# "steal" the source code via staging API
abspath = os.path.normpath(os.path.join(self.path, path))
# Stage, at the moment, requires a concrete Spec, since it needs the
# dag_hash for the stage dir name. Below though we ask for a stage
# to be created, to copy it afterwards somewhere else. It would be
# better if we can create the `source_path` directly into its final
# destination.
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg_cls(spec).stage.steal_source(abspath)
stage = spec.package.stage
stage.steal_source(abspath)
# If it wasn't already in the list, append it
self.dev_specs[spec.name] = {'path': path, 'spec': str(spec)}
@@ -1617,10 +1612,9 @@ def install_specs(self, specs=None, **install_args):
# ensure specs already installed are marked explicit
all_specs = specs or [cs for _, cs in self.concretized_specs()]
specs_installed = [s for s in all_specs if s.installed]
if specs_installed:
with spack.store.db.write_transaction(): # do all in one transaction
for spec in specs_installed:
spack.store.db.update_explicit(spec, True)
with spack.store.db.write_transaction(): # do all in one transaction
for spec in specs_installed:
spack.store.db.update_explicit(spec, True)
if not specs_to_install:
tty.msg('All of the packages are already installed')

View File

@@ -35,7 +35,6 @@
import six.moves.urllib.parse as urllib_parse
import llnl.util
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.filesystem import (
get_single_file,
@@ -120,11 +119,6 @@ def __init__(self, **kwargs):
# 'no_cache' option from version directive.
self.cache_enabled = not kwargs.pop('no_cache', False)
self.package = None
def set_package(self, package):
self.package = package
# Subclasses need to implement these methods
def fetch(self):
"""Fetch source code archive or repo.
@@ -248,10 +242,6 @@ def source_id(self):
if all(component_ids):
return component_ids
def set_package(self, package):
for item in self:
item.package = package
@fetcher
class URLFetchStrategy(FetchStrategy):
@@ -530,7 +520,7 @@ def expand(self):
"Failed on expand() for URL %s" % self.url)
if not self.extension:
self.extension = extension(self.url)
self.extension = extension(self.archive_file)
if self.stage.expanded:
tty.debug('Source already staged to %s' % self.stage.source_path)
@@ -538,11 +528,50 @@ def expand(self):
decompress = decompressor_for(self.archive_file, self.extension)
# Expand all tarballs in their own directory to contain
# exploding tarballs.
tarball_container = os.path.join(self.stage.path,
"spack-expanded-archive")
# Below we assume that the command to decompress expand the
# archive in the current working directory
with fs.exploding_archive_catch(self.stage):
mkdirp(tarball_container)
with working_dir(tarball_container):
decompress(self.archive_file)
# Check for an exploding tarball, i.e. one that doesn't expand to
# a single directory. If the tarball *didn't* explode, move its
# contents to the staging source directory & remove the container
# directory. If the tarball did explode, just rename the tarball
# directory to the staging source directory.
#
# NOTE: The tar program on Mac OS X will encode HFS metadata in
# hidden files, which can end up *alongside* a single top-level
# directory. We initially ignore presence of hidden files to
# accomodate these "semi-exploding" tarballs but ensure the files
# are copied to the source directory.
files = os.listdir(tarball_container)
non_hidden = [f for f in files if not f.startswith('.')]
if len(non_hidden) == 1:
src = os.path.join(tarball_container, non_hidden[0])
if os.path.isdir(src):
self.stage.srcdir = non_hidden[0]
shutil.move(src, self.stage.source_path)
if len(files) > 1:
files.remove(non_hidden[0])
for f in files:
src = os.path.join(tarball_container, f)
dest = os.path.join(self.stage.path, f)
shutil.move(src, dest)
os.rmdir(tarball_container)
else:
# This is a non-directory entry (e.g., a patch file) so simply
# rename the tarball container to be the source path.
shutil.move(tarball_container, self.stage.source_path)
else:
shutil.move(tarball_container, self.stage.source_path)
def archive(self, destination):
"""Just moves this archive to the destination."""
if not self.archive_file:
@@ -985,20 +1014,9 @@ def clone(self, dest=None, commit=None, branch=None, tag=None, bare=False):
git(*args)
# Init submodules if the user asked for them.
git_commands = []
submodules = self.submodules
if callable(submodules):
submodules = list(submodules(self.package))
git_commands.append(["submodule", "init", "--"] + submodules)
git_commands.append(['submodule', 'update', '--recursive'])
elif submodules:
git_commands.append(["submodule", "update", "--init", "--recursive"])
if not git_commands:
return
with working_dir(dest):
for args in git_commands:
if self.submodules:
with working_dir(dest):
args = ['submodule', 'update', '--init', '--recursive']
if not spack.config.get('config:debug'):
args.insert(1, '--quiet')
git(*args)
@@ -1575,30 +1593,16 @@ def for_package_version(pkg, version):
check_pkg_attributes(pkg)
if not isinstance(version, spack.version.VersionBase):
if not isinstance(version, spack.version.Version):
version = spack.version.Version(version)
# if it's a commit, we must use a GitFetchStrategy
if isinstance(version, spack.version.GitVersion):
if not hasattr(pkg, "git"):
raise FetchError(
"Cannot fetch git version for %s. Package has no 'git' attribute" %
pkg.name
)
if version.is_commit and hasattr(pkg, "git"):
# Populate the version with comparisons to other commits
version.generate_git_lookup(pkg.name)
# For GitVersion, we have no way to determine whether a ref is a branch or tag
# Fortunately, we handle branches and tags identically, except tags are
# handled slightly more conservatively for older versions of git.
# We call all non-commit refs tags in this context, at the cost of a slight
# performance hit for branches on older versions of git.
# Branches cannot be cached, so we tell the fetcher not to cache tags/branches
ref_type = 'commit' if version.is_commit else 'tag'
version.generate_commit_lookup(pkg.name)
kwargs = {
'git': pkg.git,
ref_type: version.ref,
'no_cache': True,
'commit': str(version)
}
kwargs['submodules'] = getattr(pkg, 'submodules', False)
fetcher = GitFetchStrategy(**kwargs)

View File

@@ -95,10 +95,7 @@ def view_copy(src, dst, view, spec=None):
view.get_projection_for_spec(dep)
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin(
binaries=[dst],
prefixes=prefix_to_projection
)
spack.relocate.relocate_text_bin([dst], prefix_to_projection)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang

View File

@@ -535,10 +535,9 @@ def graph_dot(specs, deptype='all', static=False, out=None):
deptype = spack.dependency.canonical_deptype(deptype)
def static_graph(spec, deptype):
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
possible = pkg_cls.possible_dependencies(
expand_virtuals=True, deptype=deptype
)
pkg = spec.package
possible = pkg.possible_dependencies(
expand_virtuals=True, deptype=deptype)
nodes = set() # elements are (node name, node label)
edges = set() # elements are (src key, dest key)

View File

@@ -2,10 +2,10 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Definitions that control how Spack creates Spec hashes."""
import spack.dependency as dp
import spack.repo
hashes = []
@@ -51,16 +51,10 @@ def __call__(self, spec):
)
def _content_hash_override(spec):
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
return pkg.content_hash()
#: Package hash used as part of dag hash
package_hash = SpecHashDescriptor(
deptype=(), package_hash=True, name='package_hash',
override=_content_hash_override)
override=lambda s: s.package.content_hash())
# Deprecated hash types, no longer used, but needed to understand old serialized

View File

@@ -21,6 +21,7 @@
* on_phase_success(pkg, phase_name, log_file)
* on_phase_error(pkg, phase_name, log_file)
* on_phase_error(pkg, phase_name, log_file)
* on_analyzer_save(pkg, result)
* post_env_write(env)
This can be used to implement support for things like module
@@ -91,5 +92,8 @@ def __call__(self, *args, **kwargs):
on_install_failure = _HookRunner('on_install_failure')
on_install_cancel = _HookRunner('on_install_cancel')
# Analyzer hooks
on_analyzer_save = _HookRunner('on_analyzer_save')
# Environment hooks
post_env_write = _HookRunner('post_env_write')

View File

@@ -0,0 +1,85 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import spack.monitor
def on_install_start(spec):
"""On start of an install, we want to ping the server if it exists
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_start for %s" % spec)
build_id = spack.monitor.cli.new_build(spec)
tty.verbose("Build created with id %s" % build_id)
def on_install_success(spec):
"""On the success of an install (after everything is complete)
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_success for %s" % spec)
result = spack.monitor.cli.update_build(spec, status="SUCCESS")
tty.verbose(result.get('message'))
def on_install_failure(spec):
"""Triggered on failure of an install
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_failure for %s" % spec)
result = spack.monitor.cli.fail_task(spec)
tty.verbose(result.get('message'))
def on_install_cancel(spec):
"""Triggered on cancel of an install
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_cancel for %s" % spec)
result = spack.monitor.cli.cancel_task(spec)
tty.verbose(result.get('message'))
def on_phase_success(pkg, phase_name, log_file):
"""Triggered on a phase success
"""
if not spack.monitor.cli:
return
tty.debug("Running on_phase_success %s, phase %s" % (pkg.name, phase_name))
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "SUCCESS")
tty.verbose(result.get('message'))
def on_phase_error(pkg, phase_name, log_file):
"""Triggered on a phase error
"""
if not spack.monitor.cli:
return
tty.debug("Running on_phase_error %s, phase %s" % (pkg.name, phase_name))
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "ERROR")
tty.verbose(result.get('message'))
def on_analyzer_save(pkg, result):
"""given a package and a result, if we have a spack monitor, upload
the result to it.
"""
if not spack.monitor.cli:
return
# This hook runs after a save result
spack.monitor.cli.send_analyze_metadata(pkg, result)

View File

@@ -49,6 +49,7 @@
import spack.compilers
import spack.error
import spack.hooks
import spack.monitor
import spack.package_base
import spack.package_prefs as prefs
import spack.repo
@@ -188,9 +189,6 @@ def _do_fake_install(pkg):
dump_packages(pkg.spec, packages_dir)
_compiler_concretization_cache = {}
def _packages_needed_to_bootstrap_compiler(compiler, architecture, pkgs):
"""
Return a list of packages required to bootstrap `pkg`s compiler
@@ -225,31 +223,17 @@ def _packages_needed_to_bootstrap_compiler(compiler, architecture, pkgs):
dep.constrain('os=%s' % str(architecture.os))
dep.constrain('target=%s:' %
architecture.target.microarchitecture.family.name)
# concrete CompilerSpec has less info than concrete Spec
# concretize as Spec to add that information
# cache concretizations within an instantiation of Spack
from time import gmtime, strftime
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
global _compiler_concretization_cache
depstr = str(dep)
if depstr not in _compiler_concretization_cache:
print("Cache miss")
_compiler_concretization_cache[depstr] = dep.concretized()
else:
print("Cache hit")
print(_compiler_concretization_cache.keys())
concrete_dep = _compiler_concretization_cache[depstr]
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
dep.concretize()
# mark compiler as depended-on by the packages that use it
for pkg in pkgs:
concrete_dep._dependents.add(
spack.spec.DependencySpec(pkg.spec, concrete_dep, ('build',))
dep._dependents.add(
spack.spec.DependencySpec(pkg.spec, dep, ('build',))
)
packages = [(s.package, False) for
s in concrete_dep.traverse(order='post', root=False)]
packages.append((concrete_dep.package, True))
s in dep.traverse(order='post', root=False)]
packages.append((dep.package, True))
return packages

View File

@@ -391,8 +391,7 @@ def mirror_archive_paths(fetcher, per_package_ref, spec=None):
storage path of the resource associated with the specified ``fetcher``."""
ext = None
if spec:
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
versions = pkg_cls.versions.get(spec.version, {})
versions = spec.package.versions.get(spec.package.version, {})
ext = versions.get('extension', None)
# If the spec does not explicitly specify an extension (the default case),
# then try to determine it automatically. An extension can only be

738
lib/spack/spack/monitor.py Normal file
View File

@@ -0,0 +1,738 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Interact with a Spack Monitor Service. Derived from
https://github.com/spack/spack-monitor/blob/main/script/spackmoncli.py
"""
import base64
import hashlib
import os
import re
from datetime import datetime
try:
from urllib.error import URLError
from urllib.request import Request, urlopen
except ImportError:
from urllib2 import urlopen, Request, URLError # type: ignore # novm
from copy import deepcopy
from glob import glob
import llnl.util.tty as tty
import spack
import spack.config
import spack.hash_types as ht
import spack.main
import spack.paths
import spack.store
import spack.util.path
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
# A global client to instantiate once
cli = None
def get_client(host, prefix="ms1", allow_fail=False, tags=None, save_local=False):
"""
Get a monitor client for a particular host and prefix.
If the client is not running, we exit early, unless allow_fail is set
to true, indicating that we should continue the build even if the
server is not present. Note that this client is defined globally as "cli"
so we can istantiate it once (checking for credentials, etc.) and then
always have access to it via spack.monitor.cli. Also note that
typically, we call the monitor by way of hooks in spack.hooks.monitor.
So if you want the monitor to have a new interaction with some part of
the codebase, it's recommended to write a hook first, and then have
the monitor use it.
"""
global cli
cli = SpackMonitorClient(host=host, prefix=prefix, allow_fail=allow_fail,
tags=tags, save_local=save_local)
# Auth is always required unless we are saving locally
if not save_local:
cli.require_auth()
# We will exit early if the monitoring service is not running, but
# only if we aren't doing a local save
if not save_local:
info = cli.service_info()
# If we allow failure, the response will be done
if info:
tty.debug("%s v.%s has status %s" % (
info['id'],
info['version'],
info['status'])
)
return cli
def get_monitor_group(subparser):
"""
Retrieve the monitor group for the argument parser.
Since the monitor group is shared between commands, we provide a common
function to generate the group for it. The user can pass the subparser, and
the group is added, and returned.
"""
# Monitoring via https://github.com/spack/spack-monitor
monitor_group = subparser.add_argument_group()
monitor_group.add_argument(
'--monitor', action='store_true', dest='use_monitor', default=False,
help="interact with a monitor server during builds.")
monitor_group.add_argument(
'--monitor-save-local', action='store_true', dest='monitor_save_local',
default=False, help="save monitor results to .spack instead of server.")
monitor_group.add_argument(
'--monitor-tags', dest='monitor_tags', default=None,
help="One or more (comma separated) tags for a build.")
monitor_group.add_argument(
'--monitor-keep-going', action='store_true', dest='monitor_keep_going',
default=False, help="continue the build if a request to monitor fails.")
monitor_group.add_argument(
'--monitor-host', dest='monitor_host', default="http://127.0.0.1",
help="If using a monitor, customize the host.")
monitor_group.add_argument(
'--monitor-prefix', dest='monitor_prefix', default="ms1",
help="The API prefix for the monitor service.")
return monitor_group
class SpackMonitorClient:
"""Client to interact with a spack monitor server.
We require the host url, along with the prefix to discover the
service_info endpoint. If allow_fail is set to True, we will not exit
on error with tty.die given that a request is not successful. The spack
version is one of the fields to uniquely identify a spec, so we add it
to the client on init.
"""
def __init__(self, host=None, prefix="ms1", allow_fail=False, tags=None,
save_local=False):
# We can control setting an arbitrary version if needed
sv = spack.main.get_version()
self.spack_version = os.environ.get("SPACKMON_SPACK_VERSION") or sv
self.host = host or "http://127.0.0.1"
self.baseurl = "%s/%s" % (self.host, prefix.strip("/"))
self.token = os.environ.get("SPACKMON_TOKEN")
self.username = os.environ.get("SPACKMON_USER")
self.headers = {}
self.allow_fail = allow_fail
self.capture_build_environment()
self.tags = tags
self.save_local = save_local
# We key lookup of build_id by dag_hash
self.build_ids = {}
self.setup_save()
def setup_save(self):
"""Given a local save "save_local" ensure the output directory exists.
"""
if not self.save_local:
return
save_dir = spack.util.path.canonicalize_path(
spack.config.get('config:monitor_dir', spack.paths.default_monitor_path)
)
# Name based on timestamp
now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%s')
self.save_dir = os.path.join(save_dir, now)
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def save(self, obj, filename):
"""
Save a monitor json result to the save directory.
"""
filename = os.path.join(self.save_dir, filename)
write_json(obj, filename)
return {"message": "Build saved locally to %s" % filename}
def load_build_environment(self, spec):
"""
Load a build environment from install_environment.json.
If we are running an analyze command, we will need to load previously
used build environment metadata from install_environment.json to capture
what was done during the build.
"""
if not hasattr(spec, "package") or not spec.package:
tty.die("A spec must have a package to load the environment.")
pkg_dir = os.path.dirname(spec.package.install_log_path)
env_file = os.path.join(pkg_dir, "install_environment.json")
build_environment = read_json(env_file)
if not build_environment:
tty.warn(
"install_environment.json not found in package folder. "
" This means that the current environment metadata will be used."
)
else:
self.build_environment = build_environment
def capture_build_environment(self):
"""
Capture the environment for the build.
This uses spack.util.environment.get_host_environment_metadata to do so.
This is important because it's a unique identifier, along with the spec,
for a Build. It should look something like this:
{'host_os': 'ubuntu20.04',
'platform': 'linux',
'host_target': 'skylake',
'hostname': 'vanessa-ThinkPad-T490s',
'spack_version': '0.16.1-1455-52d5b55b65',
'kernel_version': '#73-Ubuntu SMP Mon Jan 18 17:25:17 UTC 2021'}
This is saved to a package install's metadata folder as
install_environment.json, and can be loaded by the monitor for uploading
data relevant to a later analysis.
"""
from spack.util.environment import get_host_environment_metadata
self.build_environment = get_host_environment_metadata()
keys = list(self.build_environment.keys())
# Allow to customize any of these values via the environment
for key in keys:
envar_name = "SPACKMON_%s" % key.upper()
envar = os.environ.get(envar_name)
if envar:
self.build_environment[key] = envar
def require_auth(self):
"""
Require authentication.
The token and username must not be unset
"""
if not self.save_local and (not self.token or not self.username):
tty.die("You are required to export SPACKMON_TOKEN and SPACKMON_USER")
def set_header(self, name, value):
self.headers.update({name: value})
def set_basic_auth(self, username, password):
"""
A wrapper to adding basic authentication to the Request
"""
auth_str = "%s:%s" % (username, password)
auth_header = base64.b64encode(auth_str.encode("utf-8"))
self.set_header("Authorization", "Basic %s" % auth_header.decode("utf-8"))
def reset(self):
"""
Reset and prepare for a new request.
"""
if "Authorization" in self.headers:
self.headers = {"Authorization": self.headers['Authorization']}
else:
self.headers = {}
def prepare_request(self, endpoint, data, headers):
"""
Prepare a request given an endpoint, data, and headers.
If data is provided, urllib makes the request a POST
"""
# Always reset headers for new request.
self.reset()
# Preserve previously used auth token
headers = headers or self.headers
# The calling function can provide a full or partial url
if not endpoint.startswith("http"):
endpoint = "%s/%s" % (self.baseurl, endpoint)
# If we have data, the request will be POST
if data:
if not isinstance(data, str):
data = sjson.dump(data)
data = data.encode('ascii')
return Request(endpoint, data=data, headers=headers)
def issue_request(self, request, retry=True):
"""
Given a prepared request, issue it.
If we get an error, die. If
there are times when we don't want to exit on error (but instead
disable using the monitoring service) we could add that here.
"""
try:
response = urlopen(request)
except URLError as e:
# If we have an authorization request, retry once with auth
if hasattr(e, "code") and e.code == 401 and retry:
if self.authenticate_request(e):
request = self.prepare_request(
e.url,
sjson.load(request.data.decode('utf-8')),
self.headers
)
return self.issue_request(request, False)
# Handle permanent re-directs!
elif hasattr(e, "code") and e.code == 308:
location = e.headers.get('Location')
request_data = None
if request.data:
request_data = sjson.load(request.data.decode('utf-8'))[0]
if location:
request = self.prepare_request(
location,
request_data,
self.headers
)
return self.issue_request(request, True)
# Otherwise, relay the message and exit on error
msg = ""
if hasattr(e, 'reason'):
msg = e.reason
elif hasattr(e, 'code'):
msg = e.code
# If we can parse the message, try it
try:
msg += "\n%s" % e.read().decode("utf8", 'ignore')
except Exception:
pass
if self.allow_fail:
tty.warning("Request to %s was not successful, but continuing." % e.url)
return
tty.die(msg)
return response
def do_request(self, endpoint, data=None, headers=None, url=None):
"""
Do the actual request.
If data is provided, it is POST, otherwise GET.
If an entire URL is provided, don't use the endpoint
"""
request = self.prepare_request(endpoint, data, headers)
# If we have an authorization error, we retry with
response = self.issue_request(request)
# A 200/201 response incidates success
if response.code in [200, 201]:
return sjson.load(response.read().decode('utf-8'))
return response
def authenticate_request(self, originalResponse):
"""
Authenticate the request.
Given a response (an HTTPError 401), look for a Www-Authenticate
header to parse. We return True/False to indicate if the request
should be retried.
"""
authHeaderRaw = originalResponse.headers.get("Www-Authenticate")
if not authHeaderRaw:
return False
# If we have a username and password, set basic auth automatically
if self.token and self.username:
self.set_basic_auth(self.username, self.token)
headers = deepcopy(self.headers)
if "Authorization" not in headers:
tty.error(
"This endpoint requires a token. Please set "
"client.set_basic_auth(username, password) first "
"or export them to the environment."
)
return False
# Prepare request to retry
h = parse_auth_header(authHeaderRaw)
headers.update({
"service": h.Service,
"Accept": "application/json",
"User-Agent": "spackmoncli"}
)
# Currently we don't set a scope (it defaults to build)
authResponse = self.do_request(h.Realm, headers=headers)
# Request the token
token = authResponse.get("token")
if not token:
return False
# Set the token to the original request and retry
self.headers.update({"Authorization": "Bearer %s" % token})
return True
# Functions correspond to endpoints
def service_info(self):
"""
Get the service information endpoint
"""
# Base endpoint provides service info
return self.do_request("")
def new_configuration(self, specs):
"""
Given a list of specs, generate a new configuration for each.
We return a lookup of specs with their package names. This assumes
that we are only installing one version of each package. We aren't
starting or creating any builds, so we don't need a build environment.
"""
configs = {}
# There should only be one spec generally (what cases would have >1?)
for spec in specs:
# Not sure if this is needed here, but I see it elsewhere
if spec.name in spack.repo.path or spec.virtual:
spec.concretize()
# Remove extra level of nesting
# This is the only place in Spack we still use full_hash, as `spack monitor`
# requires specs with full_hash-keyed dependencies.
as_dict = {"spec": spec.to_dict(hash=ht.full_hash)['spec'],
"spack_version": self.spack_version}
if self.save_local:
filename = "spec-%s-%s-config.json" % (spec.name, spec.version)
self.save(as_dict, filename)
else:
response = self.do_request("specs/new/", data=sjson.dump(as_dict))
configs[spec.package.name] = response.get('data', {})
return configs
def failed_concretization(self, specs):
"""
Given a list of abstract specs, tell spack monitor concretization failed.
"""
configs = {}
# There should only be one spec generally (what cases would have >1?)
for spec in specs:
# update the spec to have build hash indicating that cannot be built
meta = spec.to_dict()['spec']
nodes = []
for node in meta.get("nodes", []):
node["full_hash"] = "FAILED_CONCRETIZATION"
nodes.append(node)
meta['nodes'] = nodes
# We can't concretize / hash
as_dict = {"spec": meta,
"spack_version": self.spack_version}
if self.save_local:
filename = "spec-%s-%s-config.json" % (spec.name, spec.version)
self.save(as_dict, filename)
else:
response = self.do_request("specs/new/", data=sjson.dump(as_dict))
configs[spec.package.name] = response.get('data', {})
return configs
def new_build(self, spec):
"""
Create a new build.
This means sending the hash of the spec to be built,
along with the build environment. These two sets of data uniquely can
identify the build, and we will add objects (the binaries produced) to
it. We return the build id to the calling client.
"""
return self.get_build_id(spec, return_response=True)
def get_build_id(self, spec, return_response=False, spec_exists=True):
"""
Retrieve a build id, either in the local cache, or query the server.
"""
dag_hash = spec.dag_hash()
if dag_hash in self.build_ids:
return self.build_ids[dag_hash]
# Prepare build environment data (including spack version)
data = self.build_environment.copy()
data['full_hash'] = dag_hash
# If the build should be tagged, add it
if self.tags:
data['tags'] = self.tags
# If we allow the spec to not exist (meaning we create it) we need to
# include the full specfile here
if not spec_exists:
meta_dir = os.path.dirname(spec.package.install_log_path)
spec_file = os.path.join(meta_dir, "spec.json")
if os.path.exists(spec_file):
data['spec'] = sjson.load(read_file(spec_file))
else:
spec_file = os.path.join(meta_dir, "spec.yaml")
data['spec'] = syaml.load(read_file(spec_file))
if self.save_local:
return self.get_local_build_id(data, dag_hash, return_response)
return self.get_server_build_id(data, dag_hash, return_response)
def get_local_build_id(self, data, dag_hash, return_response):
"""
Generate a local build id based on hashing the expected data
"""
hasher = hashlib.md5()
hasher.update(str(data).encode('utf-8'))
bid = hasher.hexdigest()
filename = "build-metadata-%s.json" % bid
response = self.save(data, filename)
if return_response:
return response
return bid
def get_server_build_id(self, data, dag_hash, return_response=False):
"""
Retrieve a build id from the spack monitor server
"""
response = self.do_request("builds/new/", data=sjson.dump(data))
# Add the build id to the lookup
bid = self.build_ids[dag_hash] = response['data']['build']['build_id']
self.build_ids[dag_hash] = bid
# If the function is called directly, the user might want output
if return_response:
return response
return bid
def update_build(self, spec, status="SUCCESS"):
"""
Update a build with a new status.
This typically updates the relevant package to indicate a
successful install. This endpoint can take a general status to update.
"""
data = {"build_id": self.get_build_id(spec), "status": status}
if self.save_local:
filename = "build-%s-status.json" % data['build_id']
return self.save(data, filename)
return self.do_request("builds/update/", data=sjson.dump(data))
def fail_task(self, spec):
"""Given a spec, mark it as failed. This means that Spack Monitor
marks all dependencies as cancelled, unless they are already successful
"""
return self.update_build(spec, status="FAILED")
def cancel_task(self, spec):
"""Given a spec, mark it as cancelled.
"""
return self.update_build(spec, status="CANCELLED")
def send_analyze_metadata(self, pkg, metadata):
"""
Send spack analyzer metadata to the spack monitor server.
Given a dictionary of analyzers (with key as analyzer type, and
value as the data) upload the analyzer output to Spack Monitor.
Spack Monitor should either have a known understanding of the analyzer,
or if not (the key is not recognized), it's assumed to be a dictionary
of objects/files, each with attributes to be updated. E.g.,
{"analyzer-name": {"object-file-path": {"feature1": "value1"}}}
"""
# Prepare build environment data (including spack version)
# Since the build might not have been generated, we include the spec
data = {"build_id": self.get_build_id(pkg.spec, spec_exists=False),
"metadata": metadata}
return self.do_request("analyze/builds/", data=sjson.dump(data))
def send_phase(self, pkg, phase_name, phase_output_file, status):
"""
Send the result of a phase during install.
Given a package, phase name, and status, update the monitor endpoint
to alert of the status of the stage. This includes parsing the package
metadata folder for phase output and error files
"""
data = {"build_id": self.get_build_id(pkg.spec)}
# Send output specific to the phase (does this include error?)
data.update({"status": status,
"output": read_file(phase_output_file),
"phase_name": phase_name})
if self.save_local:
filename = "build-%s-phase-%s.json" % (data['build_id'], phase_name)
return self.save(data, filename)
return self.do_request("builds/phases/update/", data=sjson.dump(data))
def upload_specfile(self, filename):
"""
Upload a spec file to the spack monitor server.
Given a spec file (must be json) upload to the UploadSpec endpoint.
This function is not used in the spack to server workflow, but could
be useful is Spack Monitor is intended to send an already generated
file in some kind of separate analysis. For the environment file, we
parse out SPACK_* variables to include.
"""
# We load as json just to validate it
spec = read_json(filename)
data = {"spec": spec, "spack_verison": self.spack_version}
if self.save_local:
filename = "spec-%s-%s.json" % (spec.name, spec.version)
return self.save(data, filename)
return self.do_request("specs/new/", data=sjson.dump(data))
def iter_read(self, pattern):
"""
A helper to read json from a directory glob and return it loaded.
"""
for filename in glob(pattern):
basename = os.path.basename(filename)
tty.info("Reading %s" % basename)
yield read_json(filename)
def upload_local_save(self, dirname):
"""
Upload results from a locally saved directory to spack monitor.
The general workflow will first include an install with save local:
spack install --monitor --monitor-save-local
And then a request to upload the root or specific directory.
spack upload monitor ~/.spack/reports/monitor/<date>/
"""
dirname = os.path.abspath(dirname)
if not os.path.exists(dirname):
tty.die("%s does not exist." % dirname)
# We can't be sure the level of nesting the user has provided
# So we walk recursively through and look for build metadata
for subdir, dirs, files in os.walk(dirname):
root = os.path.join(dirname, subdir)
# A metadata file signals a monitor export
metadata = glob("%s%sbuild-metadata*" % (root, os.sep))
if not metadata or not files or not root or not subdir:
continue
self._upload_local_save(root)
tty.info("Upload complete")
def _upload_local_save(self, dirname):
"""
Given a found metadata file, upload results to spack monitor.
"""
# First find all the specs
for spec in self.iter_read("%s%sspec*" % (dirname, os.sep)):
self.do_request("specs/new/", data=sjson.dump(spec))
# Load build metadata to generate an id
metadata = glob("%s%sbuild-metadata*" % (dirname, os.sep))
if not metadata:
tty.die("Build metadata file(s) missing in %s" % dirname)
# Create a build_id lookup based on hash
hashes = {}
for metafile in metadata:
data = read_json(metafile)
build = self.do_request("builds/new/", data=sjson.dump(data))
localhash = os.path.basename(metafile).replace(".json", "")
hashes[localhash.replace('build-metadata-', "")] = build
# Next upload build phases
for phase in self.iter_read("%s%sbuild*phase*" % (dirname, os.sep)):
build_id = hashes[phase['build_id']]['data']['build']['build_id']
phase['build_id'] = build_id
self.do_request("builds/phases/update/", data=sjson.dump(phase))
# Next find the status objects
for status in self.iter_read("%s%sbuild*status*" % (dirname, os.sep)):
build_id = hashes[status['build_id']]['data']['build']['build_id']
status['build_id'] = build_id
self.do_request("builds/update/", data=sjson.dump(status))
# Helper functions
def parse_auth_header(authHeaderRaw):
"""
Parse an authentication header into relevant pieces
"""
regex = re.compile('([a-zA-z]+)="(.+?)"')
matches = regex.findall(authHeaderRaw)
lookup = dict()
for match in matches:
lookup[match[0]] = match[1]
return authHeader(lookup)
class authHeader:
def __init__(self, lookup):
"""Given a dictionary of values, match them to class attributes"""
for key in lookup:
if key in ["realm", "service", "scope"]:
setattr(self, key.capitalize(), lookup[key])
def read_file(filename):
"""
Read a file, if it exists. Otherwise return None
"""
if not os.path.exists(filename):
return
with open(filename, 'r') as fd:
content = fd.read()
return content
def write_file(content, filename):
"""
Write content to file
"""
with open(filename, 'w') as fd:
fd.writelines(content)
return content
def write_json(obj, filename):
"""
Write a json file, if the output directory exists.
"""
if not os.path.exists(os.path.dirname(filename)):
return
return write_file(sjson.dump(obj), filename)
def read_json(filename):
"""
Read a file and load into json, if it exists. Otherwise return None.
"""
if not os.path.exists(filename):
return
return sjson.load(read_file(filename))

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform as py_platform
import re
from subprocess import check_output
from spack.version import Version
@@ -52,17 +51,6 @@ def __init__(self):
if 'ubuntu' in distname:
version = '.'.join(version[0:2])
# openSUSE Tumbleweed is a rolling release which can change
# more than once in a week, so set version to tumbleweed$GLIBVERS
elif 'opensuse-tumbleweed' in distname or 'opensusetumbleweed' in distname:
distname = 'opensuse'
output = check_output(["ldd", "--version"]).decode()
libcvers = re.findall(r'ldd \(GNU libc\) (.*)', output)
if len(libcvers) == 1:
version = 'tumbleweed' + libcvers[0]
else:
version = 'tumbleweed' + version[0]
else:
version = version[0]

View File

@@ -82,4 +82,4 @@
conditional,
disjoint_sets,
)
from spack.version import Version, ver
from spack.version import Version, ver

View File

@@ -33,7 +33,7 @@
import llnl.util.filesystem as fsys
import llnl.util.tty as tty
from llnl.util.lang import classproperty, match_predicate, memoized, nullcontext
from llnl.util.lang import memoized, nullcontext
from llnl.util.link_tree import LinkTree
import spack.compilers
@@ -50,7 +50,6 @@
import spack.multimethod
import spack.paths
import spack.repo
import spack.spec
import spack.store
import spack.url
import spack.util.environment
@@ -63,7 +62,7 @@
from spack.util.executable import ProcessError, which
from spack.util.package_hash import package_hash
from spack.util.prefix import Prefix
from spack.version import GitVersion, Version, VersionBase
from spack.version import Version
if sys.version_info[0] >= 3:
FLAG_HANDLER_RETURN_TYPE = Tuple[
@@ -208,8 +207,8 @@ def __init__(cls, name, bases, attr_dict):
# If a package has the executables or libraries attribute then it's
# assumed to be detectable
if hasattr(cls, 'executables') or hasattr(cls, 'libraries'):
@classmethod
def platform_executables(cls):
@property
def platform_executables(self):
def to_windows_exe(exe):
if exe.endswith('$'):
exe = exe.replace('$', '%s$' % spack.util.path.win_exe_ext())
@@ -217,8 +216,8 @@ def to_windows_exe(exe):
exe += spack.util.path.win_exe_ext()
return exe
plat_exe = []
if hasattr(cls, 'executables'):
for exe in cls.executables:
if hasattr(self, 'executables'):
for exe in self.executables:
if sys.platform == 'win32':
exe = to_windows_exe(exe)
plat_exe.append(exe)
@@ -398,6 +397,63 @@ def _decorator(func):
return func
return _decorator
@property
def package_dir(self):
"""Directory where the package.py file lives."""
return os.path.abspath(os.path.dirname(self.module.__file__))
@property
def module(self):
"""Module object (not just the name) that this package is defined in.
We use this to add variables to package modules. This makes
install() methods easier to write (e.g., can call configure())
"""
return __import__(self.__module__, fromlist=[self.__name__])
@property
def namespace(self):
"""Spack namespace for the package, which identifies its repo."""
return spack.repo.namespace_from_fullname(self.__module__)
@property
def fullname(self):
"""Name of this package, including the namespace"""
return '%s.%s' % (self.namespace, self.name)
@property
def fullnames(self):
"""
Fullnames for this package and any packages from which it inherits.
"""
fullnames = []
for cls in inspect.getmro(self):
namespace = getattr(cls, 'namespace', None)
if namespace:
fullnames.append('%s.%s' % (namespace, self.name))
if namespace == 'builtin':
# builtin packages cannot inherit from other repos
break
return fullnames
@property
def name(self):
"""The name of this package.
The name of a package is the name of its Python module, without
the containing module names.
"""
if self._name is None:
self._name = self.module.__name__
if '.' in self._name:
self._name = self._name[self._name.rindex('.') + 1:]
return self._name
@property
def global_license_dir(self):
"""Returns the directory where license files for all packages are stored."""
return spack.util.path.canonicalize_path(spack.config.get('config:license_dir'))
def run_before(*phases):
"""Registers a method of a package to be run before a given phase"""
@@ -750,8 +806,7 @@ def __init__(self, spec):
self._fetch_time = 0.0
if self.is_extension:
pkg_cls = spack.repo.path.get_pkg_class(self.extendee_spec.name)
pkg_cls(self.extendee_spec)._check_extendable()
spack.repo.get(self.extendee_spec)._check_extendable()
super(PackageBase, self).__init__()
@@ -847,60 +902,60 @@ def possible_dependencies(
return visited
@classproperty
def package_dir(cls):
def enum_constraints(self, visited=None):
"""Return transitive dependency constraints on this package."""
if visited is None:
visited = set()
visited.add(self.name)
names = []
clauses = []
for name in self.dependencies:
if name not in visited and not spack.spec.Spec(name).virtual:
pkg = spack.repo.get(name)
dvis, dnames, dclauses = pkg.enum_constraints(visited)
visited |= dvis
names.extend(dnames)
clauses.extend(dclauses)
return visited
# package_dir and module are *class* properties (see PackageMeta),
# but to make them work on instances we need these defs as well.
@property
def package_dir(self):
"""Directory where the package.py file lives."""
return os.path.abspath(os.path.dirname(cls.module.__file__))
return type(self).package_dir
@classproperty
def module(cls):
"""Module object (not just the name) that this package is defined in.
@property
def module(self):
"""Module object that this package is defined in."""
return type(self).module
We use this to add variables to package modules. This makes
install() methods easier to write (e.g., can call configure())
"""
return __import__(cls.__module__, fromlist=[cls.__name__])
@classproperty
def namespace(cls):
@property
def namespace(self):
"""Spack namespace for the package, which identifies its repo."""
return spack.repo.namespace_from_fullname(cls.__module__)
return type(self).namespace
@classproperty
def fullname(cls):
"""Name of this package, including the namespace"""
return '%s.%s' % (cls.namespace, cls.name)
@property
def fullname(self):
"""Name of this package, including namespace: namespace.name."""
return type(self).fullname
@classproperty
def fullnames(cls):
"""Fullnames for this package and any packages from which it inherits."""
fullnames = []
for cls in inspect.getmro(cls):
namespace = getattr(cls, 'namespace', None)
if namespace:
fullnames.append('%s.%s' % (namespace, cls.name))
if namespace == 'builtin':
# builtin packages cannot inherit from other repos
break
return fullnames
@property
def fullnames(self):
return type(self).fullnames
@classproperty
def name(cls):
"""The name of this package.
@property
def name(self):
"""Name of this package (the module without parent modules)."""
return type(self).name
The name of a package is the name of its Python module, without
the containing module names.
"""
if cls._name is None:
cls._name = cls.module.__name__
if '.' in cls._name:
cls._name = cls._name[cls._name.rindex('.') + 1:]
return cls._name
@classproperty
def global_license_dir(cls):
"""Returns the directory where license files for all packages are stored."""
return spack.util.path.canonicalize_path(spack.config.get('config:license_dir'))
@property
def global_license_dir(self):
"""Returns the directory where global license files are stored."""
return type(self).global_license_dir
@property
def global_license_file(self):
@@ -918,9 +973,8 @@ def version(self):
" does not have a concrete version.")
return self.spec.versions[0]
@classmethod
@memoized
def version_urls(cls):
def version_urls(self):
"""OrderedDict of explicitly defined URLs for versions of this package.
Return:
@@ -932,7 +986,7 @@ def version_urls(cls):
if a package only defines ``url`` at the top level.
"""
version_urls = collections.OrderedDict()
for v, args in sorted(cls.versions.items()):
for v, args in sorted(self.versions.items()):
if 'url' in args:
version_urls[v] = args['url']
return version_urls
@@ -972,12 +1026,14 @@ def url_for_version(self, version):
"""
return self._implement_all_urls_for_version(version)[0]
def all_urls_for_version(self, version):
"""Return all URLs derived from version_urls(), url, urls, and
def all_urls_for_version(self, version, custom_url_for_version=None):
"""Returns all URLs derived from version_urls(), url, urls, and
list_url (if it contains a version) in a package in that order.
Args:
version (spack.version.Version): the version for which a URL is sought
version: class Version
The version for which a URL is sought.
See Class Version (version.py)
"""
uf = None
if type(self).url_for_version != Package.url_for_version:
@@ -985,7 +1041,7 @@ def all_urls_for_version(self, version):
return self._implement_all_urls_for_version(version, uf)
def _implement_all_urls_for_version(self, version, custom_url_for_version=None):
if not isinstance(version, VersionBase):
if not isinstance(version, Version):
version = Version(version)
urls = []
@@ -1256,7 +1312,6 @@ def _make_fetcher(self):
resources = self._get_needed_resources()
for resource in resources:
fetcher.append(resource.fetcher)
fetcher.set_package(self)
return fetcher
@property
@@ -1271,10 +1326,8 @@ def fetcher(self):
@fetcher.setter
def fetcher(self, f):
self._fetcher = f
self._fetcher.set_package(self)
@classmethod
def dependencies_of_type(cls, *deptypes):
def dependencies_of_type(self, *deptypes):
"""Get dependencies that can possibly have these deptypes.
This analyzes the package and determines which dependencies *can*
@@ -1284,8 +1337,8 @@ def dependencies_of_type(cls, *deptypes):
run dependency in another.
"""
return dict(
(name, conds) for name, conds in cls.dependencies.items()
if any(dt in cls.dependencies[name][cond].type
(name, conds) for name, conds in self.dependencies.items()
if any(dt in self.dependencies[name][cond].type
for cond in conds for dt in deptypes))
@property
@@ -1316,8 +1369,8 @@ def extendee_spec(self):
# TODO: do something sane here with more than one extendee
# If it's not concrete, then return the spec from the
# extends() directive since that is all we know so far.
spec_str, kwargs = next(iter(self.extendees.items()))
return spack.spec.Spec(spec_str)
spec, kwargs = next(iter(self.extendees.items()))
return spec
@property
def extendee_args(self):
@@ -1392,10 +1445,6 @@ def prefix(self):
"""Get the prefix into which this package should be installed."""
return self.spec.prefix
@property
def home(self):
return self.prefix
@property # type: ignore[misc]
@memoized
def compiler(self):
@@ -1450,7 +1499,7 @@ def do_fetch(self, mirror_only=False):
checksum = spack.config.get('config:checksum')
fetch = self.stage.managed_by_spack
if checksum and fetch and (self.version not in self.versions) \
and (not isinstance(self.version, GitVersion)):
and (not self.version.is_commit):
tty.warn("There is no checksum on file to fetch %s safely." %
self.spec.cformat('{name}{@version}'))
@@ -1672,7 +1721,7 @@ def content_hash(self, content=None):
# referenced by branch name rather than tag or commit ID.
env = spack.environment.active_environment()
from_local_sources = env and env.is_develop(self.spec)
if self.has_code and not self.spec.external and not from_local_sources:
if not self.spec.external and not from_local_sources:
message = 'Missing a source id for {s.name}@{s.version}'
tty.warn(message.format(s=self))
hash_content.append(''.encode('utf-8'))
@@ -2129,8 +2178,10 @@ def check_paths(path_list, filetype, predicate):
check_paths(self.sanity_check_is_file, 'file', os.path.isfile)
check_paths(self.sanity_check_is_dir, 'directory', os.path.isdir)
ignore_file = match_predicate(spack.store.layout.hidden_file_regexes)
if all(map(ignore_file, os.listdir(self.prefix))):
installed = set(os.listdir(self.prefix))
installed.difference_update(
spack.store.layout.hidden_file_regexes)
if not installed:
raise InstallError(
"Install failed for %s. Nothing was installed!" % self.name)
@@ -2652,15 +2703,14 @@ def do_clean(self):
self.stage.destroy()
@classmethod
def format_doc(cls, **kwargs):
def format_doc(self, **kwargs):
"""Wrap doc string at 72 characters and format nicely"""
indent = kwargs.get('indent', 0)
if not cls.__doc__:
if not self.__doc__:
return ""
doc = re.sub(r'\s+', ' ', cls.__doc__)
doc = re.sub(r'\s+', ' ', self.__doc__)
lines = textwrap.wrap(doc, 72)
results = six.StringIO()
for line in lines:

View File

@@ -138,8 +138,8 @@ def has_preferred_targets(cls, pkg_name):
@classmethod
def preferred_variants(cls, pkg_name):
"""Return a VariantMap of preferred variants/values for a spec."""
for pkg_cls in (pkg_name, 'all'):
variants = spack.config.get('packages').get(pkg_cls, {}).get(
for pkg in (pkg_name, 'all'):
variants = spack.config.get('packages').get(pkg, {}).get(
'variants', '')
if variants:
break
@@ -149,26 +149,21 @@ def preferred_variants(cls, pkg_name):
variants = " ".join(variants)
# Only return variants that are actually supported by the package
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
spec = spack.spec.Spec("%s %s" % (pkg_name, variants))
return dict((name, variant) for name, variant in spec.variants.items()
if name in pkg_cls.variants)
if name in pkg.variants)
def spec_externals(spec):
"""Return a list of external specs (w/external directory path filled in),
one for each known external installation.
"""
one for each known external installation."""
# break circular import.
from spack.util.module_cmd import path_from_modules # NOQA: ignore=F401
def _package(maybe_abstract_spec):
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
return pkg_cls(maybe_abstract_spec)
allpkgs = spack.config.get('packages')
names = set([spec.name])
names |= set(vspec.name for vspec in _package(spec).virtuals_provided)
names |= set(vspec.name for vspec in spec.package.virtuals_provided)
external_specs = []
for name in names:
@@ -195,21 +190,17 @@ def _package(maybe_abstract_spec):
def is_spec_buildable(spec):
"""Return true if the spec is configured as buildable"""
"""Return true if the spec pkgspec is configured as buildable"""
allpkgs = spack.config.get('packages')
all_buildable = allpkgs.get('all', {}).get('buildable', True)
def _package(s):
pkg_cls = spack.repo.path.get_pkg_class(s.name)
return pkg_cls(s)
# Get the list of names for which all_buildable is overridden
reverse = [name for name, entry in allpkgs.items()
if entry.get('buildable', all_buildable) != all_buildable]
# Does this spec override all_buildable
spec_reversed = (spec.name in reverse or
any(_package(spec).provides(name) for name in reverse))
any(spec.package.provides(name) for name in reverse))
return not all_buildable if spec_reversed else all_buildable

View File

@@ -284,11 +284,11 @@ def from_dict(dictionary):
owner = dictionary.get('owner')
if 'owner' not in dictionary:
raise ValueError('Invalid patch dictionary: %s' % dictionary)
pkg_cls = spack.repo.path.get_pkg_class(owner)
pkg = spack.repo.get(owner)
if 'url' in dictionary:
return UrlPatch(
pkg_cls,
pkg,
dictionary['url'],
dictionary['level'],
dictionary['working_dir'],
@@ -297,7 +297,7 @@ def from_dict(dictionary):
elif 'relative_path' in dictionary:
patch = FilePatch(
pkg_cls,
pkg,
dictionary['relative_path'],
dictionary['level'],
dictionary['working_dir'])
@@ -404,8 +404,8 @@ def update_package(self, pkg_fullname):
del self.index[sha256]
# update the index with per-package patch indexes
pkg_cls = spack.repo.path.get_pkg_class(pkg_fullname)
partial_index = self._index_patches(pkg_cls)
pkg = spack.repo.get(pkg_fullname)
partial_index = self._index_patches(pkg)
for sha256, package_to_patch in partial_index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
@@ -432,10 +432,10 @@ def _index_patches(pkg_class):
for cond, dependency in conditions.items():
for pcond, patch_list in dependency.patches.items():
for patch in patch_list:
dspec_cls = spack.repo.path.get_pkg_class(dependency.spec.name)
dspec = spack.repo.get(dependency.spec.name)
patch_dict = patch.to_dict()
patch_dict.pop('sha256') # save some space
index[patch.sha256] = {dspec_cls.fullname: patch_dict}
index[patch.sha256] = {dspec.fullname: patch_dict}
return index

View File

@@ -433,9 +433,8 @@ def needs_binary_relocation(m_type, m_subtype):
m_type (str): MIME type of the file
m_subtype (str): MIME subtype of the file
"""
subtypes = ('x-executable', 'x-sharedlib', 'x-mach-binary', 'x-pie-executable')
if m_type == 'application':
if m_subtype in subtypes:
if m_subtype in ('x-executable', 'x-sharedlib', 'x-mach-binary'):
return True
return False
@@ -470,47 +469,6 @@ def _replace_prefix_text(filename, compiled_prefixes):
f.truncate()
def _replace_prefix_bin(filename, byte_prefixes):
"""Replace all the occurrences of the old install prefix with a
new install prefix in binary files.
The new install prefix is prefixed with ``os.sep`` until the
lengths of the prefixes are the same.
Args:
filename (str): target binary file
byte_prefixes (OrderedDict): OrderedDictionary where the keys are
precompiled regex of the old prefixes and the values are the new
prefixes (uft-8 encoded)
"""
with open(filename, 'rb+') as f:
data = f.read()
f.seek(0)
for orig_bytes, new_bytes in byte_prefixes.items():
original_data_len = len(data)
# Skip this hassle if not found
if orig_bytes not in data:
continue
# We only care about this problem if we are about to replace
length_compatible = len(new_bytes) <= len(orig_bytes)
if not length_compatible:
tty.debug('Binary failing to relocate is %s' % filename)
raise BinaryTextReplaceError(orig_bytes, new_bytes)
pad_length = len(orig_bytes) - len(new_bytes)
padding = os.sep * pad_length
padding = padding.encode('utf-8')
data = data.replace(orig_bytes, new_bytes + padding)
# Really needs to be the same length
if not len(data) == original_data_len:
print('Length of pad:', pad_length, 'should be', len(padding))
print(new_bytes, 'was to replace', orig_bytes)
raise BinaryStringReplacementError(
filename, original_data_len, len(data))
f.write(data)
f.truncate()
def relocate_macho_binaries(path_names, old_layout_root, new_layout_root,
prefix_to_prefix, rel, old_prefix, new_prefix):
"""
@@ -818,49 +776,6 @@ def relocate_text(files, prefixes, concurrency=32):
tp.join()
def relocate_text_bin(binaries, prefixes, concurrency=32):
"""Replace null terminated path strings hard coded into binaries.
The new install prefix must be shorter than the original one.
Args:
binaries (list): binaries to be relocated
prefixes (OrderedDict): String prefixes which need to be changed.
concurrency (int): Desired degree of parallelism.
Raises:
BinaryTextReplaceError: when the new path is longer than the old path
"""
byte_prefixes = collections.OrderedDict({})
for orig_prefix, new_prefix in prefixes.items():
if orig_prefix != new_prefix:
if isinstance(orig_prefix, bytes):
orig_bytes = orig_prefix
else:
orig_bytes = orig_prefix.encode('utf-8')
if isinstance(new_prefix, bytes):
new_bytes = new_prefix
else:
new_bytes = new_prefix.encode('utf-8')
byte_prefixes[orig_bytes] = new_bytes
# Do relocations on text in binaries that refers to the install tree
# multiprocesing.ThreadPool.map requires single argument
args = []
for binary in binaries:
args.append((binary, byte_prefixes))
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
tp.map(llnl.util.lang.star(_replace_prefix_bin), args)
finally:
tp.terminate()
tp.join()
def is_relocatable(spec):
"""Returns True if an installed spec is relocatable.
@@ -1127,3 +1042,120 @@ def fixup_macos_rpaths(spec):
))
else:
tty.debug('No rpath fixup needed for ' + specname)
def compute_indices(filename, paths_to_relocate):
"""
Compute the indices in filename at which each of paths_to_relocate occurs.
Arguments:
filename (str): file to compute indices for
paths_to_relocate (List[str]): paths to find indices of
Returns:
Dict
"""
with open(filename, 'rb') as f:
contents = f.read()
substring_prefix = os.path.commonprefix(paths_to_relocate).encode('utf-8')
indices = {}
index = 0
max_length = max(len(path) for path in paths_to_relocate)
while True:
try:
# We search for the smallest substring of all paths we relocate
# In practice, this is the spack install root, and we relocate
# prefixes in the root and the root itself
index = contents.index(substring_prefix, index)
except ValueError:
# The string isn't found in the rest of the binary
break
else:
# only copy the smallest portion of the binary for comparisons
substring_to_check = contents[index:index + max_length]
for path in paths_to_relocate:
# We guarantee any substring in the list comes after any superstring
p = path.encode('utf-8')
if substring_to_check.startswith(p):
indices[index] = str(path)
index += len(path)
break
else:
index += 1
return indices
def _relocate_binary_text(filename, offsets, prefix_to_prefix):
"""
Relocate the text of a single binary file, given the offsets at which the
replacements need to be made
Arguments:
filename (str): file to modify
offsets (Dict[int, str]): locations of the strings to replace
prefix_to_prefix (Dict[str, str]): strings to replace and their replacements
"""
with open(filename, 'rb+') as f:
for index, prefix in offsets.items():
replacement = prefix_to_prefix[prefix].encode('utf-8')
if len(replacement) > len(prefix):
raise BinaryTextReplaceError(prefix, replacement)
# read forward until we find the end of the string including
# the prefix and compute the replacement as we go
f.seek(index + len(prefix))
c = f.read(1)
while c not in (None, b'\x00'):
replacement += c
c = f.read(1)
# seek back to the index position and write the replacement in
# and add null-terminator
f.seek(index)
f.write(replacement)
f.write(b'\x00')
def relocate_text_bin(
files_to_relocate, prefix_to_prefix, offsets=None,
relative_root=None, concurrency=32
):
"""
For each file given, replace all keys in the given translation dict with
the associated values. Optionally executes using precomputed memoized offsets
for the substitutions.
Arguments:
files_to_relocate (List[str]): The files to modify
prefix_to_prefix (Dict[str, str]): keys are strings to replace, values are
replacements
offsets (Dict[str, Dict[int, str]): (optional) Mapping from relative filenames to
a mapping from indices to strings to replace found at each index
relative_root (str): (optional) prefix for relative paths in offsets
"""
# defaults to the common prefix of all input files
rel_root = relative_root or os.path.commonprefix(files_to_relocate)
if offsets is None:
offsets = {}
for filename in files_to_relocate:
indices = compute_indices(
filename,
list(prefix_to_prefix.keys()),
)
relpath = os.path.relpath(filename, rel_root)
offsets[relpath] = indices
args = [
(filename, offsets[os.path.relpath(filename, rel_root)], prefix_to_prefix)
for filename in files_to_relocate
]
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
tp.map(llnl.util.lang.star(_relocate_binary_text), args)
finally:
tp.terminate()
tp.join()

View File

@@ -862,6 +862,10 @@ def packages_with_tags(self, *tags):
r |= set(repo.packages_with_tags(*tags))
return sorted(r)
def all_packages(self):
for name in self.all_package_names():
yield self.get(name)
def all_package_classes(self):
for name in self.all_package_names():
yield self.get_pkg_class(name)
@@ -905,9 +909,7 @@ def providers_for(self, vpkg_spec):
@autospec
def extensions_for(self, extendee_spec):
return [pkg_cls(spack.spec.Spec(pkg_cls.name))
for pkg_cls in self.all_package_classes()
if pkg_cls(spack.spec.Spec(pkg_cls.name)).extends(extendee_spec)]
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def last_mtime(self):
"""Time a package file in this repo was last updated."""
@@ -943,10 +945,9 @@ def repo_for_pkg(self, spec):
# that can operate on packages that don't exist yet.
return self.first_repo()
@autospec
def get(self, spec):
"""Returns the package associated with the supplied spec."""
msg = "RepoPath.get can only be called on concrete specs"
assert isinstance(spec, spack.spec.Spec) and spec.concrete, msg
return self.repo_for_pkg(spec).get(spec)
def get_pkg_class(self, pkg_name):
@@ -1106,10 +1107,9 @@ def _read_config(self):
tty.die("Error reading %s when opening %s"
% (self.config_file, self.root))
@autospec
def get(self, spec):
"""Returns the package associated with the supplied spec."""
msg = "Repo.get can only be called on concrete specs"
assert isinstance(spec, spack.spec.Spec) and spec.concrete, msg
# NOTE: we only check whether the package is None here, not whether it
# actually exists, because we have to load it anyway, and that ends up
# checking for existence. We avoid constructing FastPackageChecker,
@@ -1199,9 +1199,7 @@ def providers_for(self, vpkg_spec):
@autospec
def extensions_for(self, extendee_spec):
return [pkg_cls(spack.spec.Spec(pkg_cls.name))
for pkg_cls in self.all_package_classes()
if pkg_cls(spack.spec.Spec(pkg_cls.name)).extends(extendee_spec)]
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def dirname_for_package_name(self, pkg_name):
"""Get the directory name for a particular package. This is the
@@ -1243,6 +1241,15 @@ def packages_with_tags(self, *tags):
return sorted(v)
def all_packages(self):
"""Iterator over all packages in the repository.
Use this with care, because loading packages is slow.
"""
for name in self.all_package_names():
yield self.get(name)
def all_package_classes(self):
"""Iterator over all package *classes* in the repository.
@@ -1391,6 +1398,11 @@ def _path(repo_dirs=None):
sys.meta_path.append(ReposFinder())
def get(spec):
"""Convenience wrapper around ``spack.repo.get()``."""
return path.get(spec)
def all_package_names(include_virtuals=False):
"""Convenience wrapper around ``spack.repo.all_package_names()``."""
return path.all_package_names(include_virtuals)

View File

@@ -2,6 +2,7 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Tools to produce reports of spec installations"""
import codecs
import collections
@@ -280,9 +281,9 @@ def __init__(self, cls, function, format_name, args):
.format(self.format_name))
self.report_writer = report_writers[self.format_name](args)
def __call__(self, type, dir=None):
def __call__(self, type, dir=os.getcwd()):
self.type = type
self.dir = dir or os.getcwd()
self.dir = dir
return self
def concretization_report(self, msg):

View File

@@ -93,8 +93,10 @@ def rewire_node(spec, explicit):
False,
spec.build_spec.prefix,
spec.prefix)
relocate.relocate_text_bin(binaries=bins_to_relocate,
prefixes=prefix_to_prefix)
# Relocate text strings of prefixes embedded in binaries
relocate.relocate_text_bin(bins_to_relocate, prefix_to_prefix)
# Copy package into place, except for spec.json (because spec.json
# describes the old spec and not the new spliced spec).
shutil.copytree(os.path.join(tempdir, spec.dag_hash()), spec.prefix,

View File

@@ -26,9 +26,6 @@
"cpe-version": {"type": "string", "minLength": 1},
"system-type": {"type": "string", "minLength": 1},
"schema-version": {"type": "string", "minLength": 1},
# Older schemas use did not have "cpe-version", just the
# schema version; in that case it was just called "version"
"version": {"type": "string", "minLength": 1},
}
},
"compilers": {

View File

@@ -198,6 +198,9 @@ def update(data):
" [files={0}]")
warnings.warn(msg.format(', '.join(data['include'])))
if 'packages' in data:
updated |= spack.schema.packages.update(data['packages'])
# Spack 0.19 drops support for `spack:concretization` in favor of
# `spack:concretizer:unify`. Here we provide an upgrade path that changes the former
# into the latter, or warns when there's an ambiguity. Note that Spack 0.17 is not

View File

@@ -9,6 +9,54 @@
"""
def deprecate_paths_and_modules(instance, deprecated_properties):
"""Function to produce warning/error messages if "paths" and "modules" are
found in "packages.yaml"
Args:
instance: instance of the configuration file
deprecated_properties: deprecated properties in instance
Returns:
Warning/Error message to be printed
"""
import copy
import os.path
import llnl.util.tty
import spack.util.spack_yaml as syaml
# Copy the instance to remove default attributes that are not related
# to the part that needs to be reported
instance_copy = copy.copy(instance)
# Check if this configuration comes from an environment or not
absolute_path = instance_copy._end_mark.name
command_to_suggest = '$ spack config update packages'
if os.path.basename(absolute_path) == 'spack.yaml':
command_to_suggest = '$ spack env update <environment>'
# Retrieve the relevant part of the configuration as YAML
keys_to_be_removed = [
x for x in instance_copy if x not in deprecated_properties
]
for key in keys_to_be_removed:
instance_copy.pop(key)
yaml_as_str = syaml.dump_config(instance_copy, blame=True)
if llnl.util.tty.is_debug():
msg = 'OUTDATED CONFIGURATION FILE [file={0}]\n{1}'
llnl.util.tty.debug(msg.format(absolute_path, yaml_as_str))
msg = ('detected deprecated properties in {0}\nActivate the debug '
'flag to have more information on the deprecated parts or '
'run:\n\n\t{2}\n\nto update the file to the new format\n')
return msg.format(
absolute_path, yaml_as_str, command_to_suggest
)
#: Properties for inclusion in other schemas
properties = {
'packages': {
@@ -88,7 +136,16 @@
'required': ['spec']
}
},
# Deprecated properties, will trigger an error with a
# message telling how to update.
'paths': {'type': 'object'},
'modules': {'type': 'object'},
},
'deprecatedProperties': {
'properties': ['modules', 'paths'],
'message': deprecate_paths_and_modules,
'error': False
}
},
},
},
@@ -103,3 +160,41 @@
'additionalProperties': False,
'properties': properties,
}
def update(data):
"""Update the data in place to remove deprecated properties.
Args:
data (dict): dictionary to be updated
Returns:
True if data was changed, False otherwise
"""
changed = False
for cfg_object in data.values():
externals = []
# If we don't have these deprecated attributes, continue
if not any(x in cfg_object for x in ('paths', 'modules')):
continue
# If we arrive here we need to make some changes i.e.
# we need to remove and eventually convert some attributes
changed = True
paths = cfg_object.pop('paths', {})
for spec, prefix in paths.items():
externals.append({
'spec': str(spec),
'prefix': str(prefix)
})
modules = cfg_object.pop('modules', {})
for spec, module in modules.items():
externals.append({
'spec': str(spec),
'modules': [str(module)]
})
if externals:
cfg_object['externals'] = externals
return changed

View File

@@ -631,7 +631,6 @@ def visit(node):
# Load the file itself
self.control.load(os.path.join(parent_dir, 'concretize.lp'))
self.control.load(os.path.join(parent_dir, "os_compatibility.lp"))
self.control.load(os.path.join(parent_dir, "display.lp"))
timer.phase("load")
@@ -717,7 +716,7 @@ def __init__(self, tests=False):
self.variant_values_from_specs = set()
self.version_constraints = set()
self.target_constraints = set()
self.default_targets = []
self.default_targets = {}
self.compiler_version_constraints = set()
self.post_facts = []
@@ -749,13 +748,7 @@ def key_fn(version):
pkg = packagize(pkg)
declared_versions = self.declared_versions[pkg.name]
partially_sorted_versions = sorted(set(declared_versions), key=key_fn)
most_to_least_preferred = []
for _, group in itertools.groupby(partially_sorted_versions, key=key_fn):
most_to_least_preferred.extend(list(sorted(
group, reverse=True, key=lambda x: spack.version.ver(x.version)
)))
most_to_least_preferred = sorted(declared_versions, key=key_fn)
for weight, declared_version in enumerate(most_to_least_preferred):
self.gen.fact(fn.version_declared(
@@ -1178,19 +1171,29 @@ def target_preferences(self, pkg_name):
if not self.target_specs_cache:
self.target_specs_cache = [
spack.spec.Spec('target={0}'.format(target_name))
for _, target_name in self.default_targets
for target_name in archspec.cpu.TARGETS
]
package_targets = self.target_specs_cache[:]
package_targets.sort(key=key_fn)
target_specs = self.target_specs_cache
preferred_targets = [x for x in target_specs if key_fn(x) < 0]
offset = 0
best_default = self.default_targets[0][1]
for i, preferred in enumerate(package_targets):
if str(preferred.architecture.target) == best_default and i != 0:
offset = 100
self.gen.fact(fn.target_weight(
pkg_name, str(preferred.architecture.target), i + offset
for i, preferred in enumerate(preferred_targets):
self.gen.fact(fn.package_target_weight(
str(preferred.architecture.target), pkg_name, i
))
# generate weights for non-preferred targets on a per-package basis
default_targets = {
name: weight for
name, weight in self.default_targets.items()
if not any(preferred.architecture.target.name == name
for preferred in preferred_targets)
}
num_preferred = len(preferred_targets)
for name, weight in default_targets.items():
self.gen.fact(fn.default_target_weight(
name, pkg_name, weight + num_preferred + 30
))
def flag_defaults(self):
@@ -1310,16 +1313,13 @@ class Body(object):
if not spec.concrete:
reserved_names = spack.directives.reserved_names
if not spec.virtual and vname not in reserved_names:
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
try:
variant_def, _ = pkg_cls.variants[vname]
variant_def, _ = spec.package.variants[vname]
except KeyError:
msg = 'variant "{0}" not found in package "{1}"'
raise RuntimeError(msg.format(vname, spec.name))
else:
variant_def.validate_or_raise(
variant, spack.repo.path.get_pkg_class(spec.name)
)
variant_def.validate_or_raise(variant, spec.package)
clauses.append(f.variant_value(spec.name, vname, value))
@@ -1394,7 +1394,7 @@ def build_version_dict(self, possible_pkgs, specs):
packages_yaml = spack.config.get("packages")
packages_yaml = _normalize_packages_yaml(packages_yaml)
for pkg_name in possible_pkgs:
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
pkg = spack.repo.get(pkg_name)
# All the versions from the corresponding package.py file. Since concepts
# like being a "develop" version or being preferred exist only at a
@@ -1407,7 +1407,7 @@ def key_fn(item):
return info.get('preferred', False), not version.isdevelop(), version
for idx, item in enumerate(sorted(
pkg_cls.versions.items(), key=key_fn, reverse=True
pkg.versions.items(), key=key_fn, reverse=True
)):
v, version_info = item
self.possible_versions[pkg_name].add(v)
@@ -1432,16 +1432,11 @@ def key_fn(item):
continue
known_versions = self.possible_versions[dep.name]
if (not isinstance(dep.version, spack.version.GitVersion) and
if (not dep.version.is_commit and
any(v.satisfies(dep.version) for v in known_versions)):
# some version we know about satisfies this constraint, so we
# should use that one. e.g, if the user asks for qt@5 and we
# know about qt@5.5. This ensures we don't add under-specified
# versions to the solver
#
# For git versions, we know the version is already fully specified
# so we don't have to worry about whether it's an under-specified
# version
# know about qt@5.5.
continue
# if there is a concrete version on the CLI *that we know nothing
@@ -1602,12 +1597,11 @@ def target_defaults(self, specs):
# these are stored to be generated as facts later offset by the
# number of preferred targets
if target.name in best_targets:
self.default_targets.append((i, target.name))
self.default_targets[target.name] = i
i += 1
else:
self.default_targets.append((100, target.name))
self.default_targets[target.name] = 100
self.default_targets = list(sorted(set(self.default_targets)))
self.gen.newline()
def virtual_providers(self):
@@ -1686,7 +1680,7 @@ def define_virtual_constraints(self):
# extract all the real versions mentioned in version ranges
def versions_for(v):
if isinstance(v, spack.version.VersionBase):
if isinstance(v, spack.version.Version):
return [v]
elif isinstance(v, spack.version.VersionRange):
result = [v.start] if v.start else []
@@ -2195,8 +2189,8 @@ def build_specs(self, function_tuples):
# concretization process)
for root in self._specs.values():
for spec in root.traverse():
if isinstance(spec.version, spack.version.GitVersion):
spec.version.generate_git_lookup(spec.fullname)
if spec.version.is_commit:
spec.version.generate_commit_lookup(spec.fullname)
return self._specs

View File

@@ -107,28 +107,10 @@ possible_version_weight(Package, Weight)
:- version(Package, Version),
version_declared(Package, Version, Weight).
% we can't use the weight for an external version if we don't use the
% corresponding external spec.
:- version(Package, Version),
version_weight(Package, Weight),
version_declared(Package, Version, Weight, "external"),
not external(Package).
% we can't use a weight from an installed spec if we are building it
% and vice-versa
:- version(Package, Version),
version_weight(Package, Weight),
version_declared(Package, Version, Weight, "installed"),
build(Package).
:- version(Package, Version),
version_weight(Package, Weight),
not version_declared(Package, Version, Weight, "installed"),
not build(Package).
1 { version_weight(Package, Weight) : version_declared(Package, Version, Weight) } 1
version_weight(Package, Weight)
:- version(Package, Version),
node(Package).
node(Package),
Weight = #min{W : version_declared(Package, Version, W)}.
% node_version_satisfies implies that exactly one of the satisfying versions
% is the package's version, and vice versa.
@@ -138,11 +120,6 @@ possible_version_weight(Package, Weight)
{ version(Package, Version) : version_satisfies(Package, Constraint, Version) }
:- node_version_satisfies(Package, Constraint).
% If there is at least a version that satisfy the constraint, impose a lower
% bound on the choice rule to avoid false positives with the error below
1 { version(Package, Version) : version_satisfies(Package, Constraint, Version) }
:- node_version_satisfies(Package, Constraint), version_satisfies(Package, Constraint, _).
% More specific error message if the version cannot satisfy some constraint
% Otherwise covered by `no_version_error` and `versions_conflict_error`.
error(1, "No valid version for '{0}' satisfies '@{1}'", Package, Constraint)
@@ -504,13 +481,13 @@ variant(Package, Variant) :- variant_condition(ID, Package, Variant),
condition_holds(ID).
% a variant cannot be set if it is not a variant on the package
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Variant, Package)
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Package, Variant)
:- variant_set(Package, Variant),
not variant(Package, Variant),
build(Package).
% a variant cannot take on a value if it is not a variant of the package
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Variant, Package)
error(2, "Cannot set variant '{0}' for package '{1}' because the variant condition cannot be satisfied for the given spec", Package, Variant)
:- variant_value(Package, Variant, _),
not variant(Package, Variant),
build(Package).
@@ -737,14 +714,9 @@ node_os_mismatch(Package, Dependency) :-
% every OS is compatible with itself. We can use `os_compatible` to declare
os_compatible(OS, OS) :- os(OS).
% Transitive compatibility among operating systems
os_compatible(OS1, OS3) :- os_compatible(OS1, OS2), os_compatible(OS2, OS3).
% We can select only operating systems compatible with the ones
% for which we can build software. We need a cardinality constraint
% since we might have more than one "buildable_os(OS)" fact.
:- not 1 { os_compatible(CurrentOS, ReusedOS) : buildable_os(CurrentOS) },
node_os(Package, ReusedOS).
% OS compatibility rules for reusing solves.
% catalina binaries can be used on bigsur. Direction is package -> dependency.
os_compatible("bigsur", "catalina").
% If an OS is set explicitly respect the value
node_os(Package, OS) :- node_os_set(Package, OS), node(Package).
@@ -807,6 +779,27 @@ target_compatible(Descendent, Ancestor)
#defined target_satisfies/2.
#defined target_parent/2.
% If the package does not have any specific weight for this
% target, offset the default weights by the number of specific
% weights and use that. We additionally offset by 30 to ensure
% preferences are propagated even against large numbers of
% otherwise "better" matches.
target_weight(Target, Package, Weight)
:- default_target_weight(Target, Package, Weight),
node(Package),
not derive_target_from_parent(_, Package),
not package_target_weight(Target, Package, _).
% TODO: Need to account for the case of more than one parent
% TODO: each of which sets different targets
target_weight(Target, Dependency, Weight)
:- depends_on(Package, Dependency),
derive_target_from_parent(Package, Dependency),
target_weight(Target, Package, Weight).
target_weight(Target, Package, Weight)
:- package_target_weight(Target, Package, Weight).
% can't use targets on node if the compiler for the node doesn't support them
error(2, "{0} compiler '{2}@{3}' incompatible with 'target={1}'", Package, Target, Compiler, Version)
:- node_target(Package, Target),
@@ -823,7 +816,11 @@ node_target(Package, Target)
node_target_weight(Package, Weight)
:- node(Package),
node_target(Package, Target),
target_weight(Package, Target, Weight).
target_weight(Target, Package, Weight).
derive_target_from_parent(Parent, Package)
:- depends_on(Parent, Package),
not package_target_weight(_, Package, _).
% compatibility rules for targets among nodes
node_target_match(Parent, Dependency)
@@ -941,9 +938,6 @@ compiler_weight(Package, 100)
not node_compiler_preference(Package, Compiler, Version, _),
not default_compiler_preference(Compiler, Version, _).
% For the time being, be strict and reuse only if the compiler match one we have on the system
:- node_compiler_version(Package, Compiler, Version), not compiler_version(Compiler, Version).
#defined node_compiler_preference/4.
#defined default_compiler_preference/3.
@@ -1252,7 +1246,6 @@ opt_criterion(1, "non-preferred targets").
#heuristic variant_value(Package, Variant, Value) : variant_default_value(Package, Variant, Value), node(Package). [10, true]
#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), virtual_node(Virtual). [10, true]
#heuristic node(Package) : possible_provider_weight(Package, Virtual, 0, _), virtual_node(Virtual). [10, true]
#heuristic node_os(Package, OS) : buildable_os(OS). [10, true]
%-----------
% Notes

View File

@@ -1,22 +0,0 @@
% Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
% Spack Project Developers. See the top-level COPYRIGHT file for details.
%
% SPDX-License-Identifier: (Apache-2.0 OR MIT)
% OS compatibility rules for reusing solves.
% os_compatible(RecentOS, OlderOS)
% OlderOS binaries can be used on RecentOS
% macOS
os_compatible("monterey", "bigsur").
os_compatible("bigsur", "catalina").
% Ubuntu
os_compatible("ubuntu22.04", "ubuntu21.10").
os_compatible("ubuntu21.10", "ubuntu21.04").
os_compatible("ubuntu21.04", "ubuntu20.10").
os_compatible("ubuntu20.10", "ubuntu20.04").
os_compatible("ubuntu20.04", "ubuntu19.10").
os_compatible("ubuntu19.10", "ubuntu19.04").
os_compatible("ubuntu19.04", "ubuntu18.10").
os_compatible("ubuntu18.10", "ubuntu18.04").

View File

@@ -896,7 +896,7 @@ def clear(self):
def _command_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'command' attribute.
Tries to search for ``spec.name`` in the ``spec.home.bin`` directory.
Tries to search for ``spec.name`` in the ``spec.prefix.bin`` directory.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
@@ -910,21 +910,20 @@ def _command_default_handler(descriptor, spec, cls):
Raises:
RuntimeError: If the command is not found
"""
home = getattr(spec.package, 'home')
path = os.path.join(home.bin, spec.name)
path = os.path.join(spec.prefix.bin, spec.name)
if fs.is_exe(path):
return spack.util.executable.Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(spec.name, home.bin))
raise RuntimeError(msg.format(spec.name, spec.prefix.bin))
def _headers_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'headers' attribute.
Tries to search for ``*.h`` files recursively starting from
``spec.package.home.include``.
``spec.prefix.include``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
@@ -938,22 +937,21 @@ def _headers_default_handler(descriptor, spec, cls):
Raises:
NoHeadersError: If no headers are found
"""
home = getattr(spec.package, 'home')
headers = fs.find_headers('*', root=home.include, recursive=True)
headers = fs.find_headers('*', root=spec.prefix.include, recursive=True)
if headers:
return headers
else:
msg = 'Unable to locate {0} headers in {1}'
raise spack.error.NoHeadersError(
msg.format(spec.name, home))
msg.format(spec.name, spec.prefix.include))
def _libs_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'libs' attribute.
Tries to search for ``lib{spec.name}`` recursively starting from
``spec.package.home``. If ``spec.name`` starts with ``lib``, searches for
``spec.prefix``. If ``spec.name`` starts with ``lib``, searches for
``{spec.name}`` instead.
Parameters:
@@ -980,7 +978,6 @@ def _libs_default_handler(descriptor, spec, cls):
# get something like 'libabcXabc.so, but for now we consider this
# unlikely).
name = spec.name.replace('-', '?')
home = getattr(spec.package, 'home')
# Avoid double 'lib' for packages whose names already start with lib
if not name.startswith('lib'):
@@ -993,12 +990,12 @@ def _libs_default_handler(descriptor, spec, cls):
for shared in search_shared:
libs = fs.find_libraries(
name, home, shared=shared, recursive=True)
name, spec.prefix, shared=shared, recursive=True)
if libs:
return libs
msg = 'Unable to recursively locate {0} libraries in {1}'
raise spack.error.NoLibrariesError(msg.format(spec.name, home))
raise spack.error.NoLibrariesError(msg.format(spec.name, spec.prefix))
class ForwardQueryToPackage(object):
@@ -1119,9 +1116,6 @@ def __set__(self, instance, value):
class SpecBuildInterface(lang.ObjectWrapper):
# home is available in the base Package so no default is needed
home = ForwardQueryToPackage('home', default_handler=None)
command = ForwardQueryToPackage(
'command',
default_handler=_command_default_handler
@@ -1517,9 +1511,8 @@ def root(self):
@property
def package(self):
assert self.concrete, "Spec.package can only be called on concrete specs"
if not self._package:
self._package = spack.repo.path.get(self)
self._package = spack.repo.get(self)
return self._package
@property
@@ -2501,9 +2494,8 @@ def validate_detection(self):
assert isinstance(self.extra_attributes, Mapping), msg
# Validate the spec calling a package specific method
pkg_cls = spack.repo.path.get_pkg_class(self.name)
validate_fn = getattr(
pkg_cls, 'validate_detected_spec', lambda x, y: None
self.package, 'validate_detected_spec', lambda x, y: None
)
validate_fn(self, self.extra_attributes)
@@ -2731,8 +2723,7 @@ def _old_concretize(self, tests=False, deprecation_warning=True):
visited_user_specs = set()
for dep in self.traverse():
visited_user_specs.add(dep.name)
pkg_cls = spack.repo.path.get_pkg_class(dep.name)
visited_user_specs.update(x.name for x in pkg_cls(dep).provided)
visited_user_specs.update(x.name for x in dep.package.provided)
extra = set(user_spec_deps.keys()).difference(visited_user_specs)
if extra:
@@ -2866,12 +2857,10 @@ def ensure_external_path_if_external(external_spec):
for mod in compiler.modules:
md.load_module(mod)
# Get the path from the module the package can override the default
# (this is mostly needed for Cray)
pkg_cls = spack.repo.path.get_pkg_class(external_spec.name)
package = pkg_cls(external_spec)
# get the path from the module
# the package can override the default
external_spec.external_path = getattr(
package, 'external_prefix',
external_spec.package, 'external_prefix',
md.path_from_modules(external_spec.external_modules)
)
@@ -3382,7 +3371,7 @@ def validate_or_raise(self):
for spec in self.traverse():
# raise an UnknownPackageError if the spec's package isn't real.
if (not spec.virtual) and spec.name:
spack.repo.path.get_pkg_class(spec.fullname)
spack.repo.get(spec.fullname)
# validate compiler in addition to the package name.
if spec.compiler:
@@ -3449,8 +3438,8 @@ def update_variant_validate(self, variant_name, values):
variant = pkg_variant.make_variant(value)
self.variants[variant_name] = variant
pkg_cls = spack.repo.path.get_pkg_class(self.name)
pkg_variant.validate_or_raise(self.variants[variant_name], pkg_cls)
pkg_variant.validate_or_raise(
self.variants[variant_name], self.package)
def constrain(self, other, deps=True):
"""Merge the constraints of other with self.
@@ -3638,9 +3627,7 @@ def satisfies(self, other, deps=True, strict=False, strict_deps=False):
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
try:
# Here we might get an abstract spec
pkg_cls = spack.repo.path.get_pkg_class(self.fullname)
pkg = pkg_cls(self)
pkg = spack.repo.get(self.fullname)
except spack.repo.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
@@ -3778,8 +3765,7 @@ def patches(self):
if self._patches_assigned():
for sha256 in self.variants["patches"]._patches_in_order_of_appearance:
index = spack.repo.path.patch_index
pkg_cls = spack.repo.path.get_pkg_class(self.name)
patch = index.patch_for_package(sha256, pkg_cls)
patch = index.patch_for_package(sha256, self.package)
self._patches.append(patch)
return self._patches
@@ -5162,9 +5148,9 @@ def do_parse(self):
# Note: VersionRange(x, x) is currently concrete, hence isinstance(...).
if (
spec.name and spec.versions.concrete and
isinstance(spec.version, vn.GitVersion)
isinstance(spec.version, vn.Version) and spec.version.is_commit
):
spec.version.generate_git_lookup(spec.fullname)
spec.version.generate_commit_lookup(spec.fullname)
return specs

View File

@@ -363,13 +363,12 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def expected_archive_files(self):
"""Possible archive file paths."""
paths = []
fnames = []
expanded = True
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
expanded = self.default_fetcher.expand_archive
clean_url = os.path.basename(
sup.sanitize_file_path(self.default_fetcher.url))
fnames.append(clean_url)
fnames.append(os.path.basename(self.default_fetcher.url))
if self.mirror_paths:
fnames.extend(os.path.basename(x) for x in self.mirror_paths)

View File

@@ -118,7 +118,7 @@ def update_package(self, pkg_name):
pkg_name (str): name of the package to be removed from the index
"""
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
package = spack.repo.path.get(pkg_name)
# Remove the package from the list of packages, if present
for pkg_list in self._tag_dict.values():
@@ -126,9 +126,9 @@ def update_package(self, pkg_name):
pkg_list.remove(pkg_name)
# Add it again under the appropriate tags
for tag in getattr(pkg_cls, 'tags', []):
for tag in getattr(package, 'tags', []):
tag = tag.lower()
self._tag_dict[tag].append(pkg_cls.name)
self._tag_dict[tag].append(package.name)
class TagIndexError(spack.error.SpackError):

View File

@@ -180,20 +180,3 @@ def test_status_function_find_files(
_, missing = spack.bootstrap.status_message('optional')
assert missing is expected_missing
@pytest.mark.regression('31042')
def test_source_is_disabled(mutable_config):
# Get the configuration dictionary of the current bootstrapping source
conf = next(iter(spack.bootstrap.bootstrapping_sources()))
# The source is not explicitly enabled or disabled, so the following
# call should raise to skip using it for bootstrapping
with pytest.raises(ValueError):
spack.bootstrap.source_is_enabled_or_raise(conf)
# Try to explicitly disable the source and verify that the behavior
# is the same as above
spack.config.add('bootstrap:trusted:{0}:{1}'.format(conf['name'], False))
with pytest.raises(ValueError):
spack.bootstrap.source_is_enabled_or_raise(conf)

Some files were not shown because too many files have changed in this diff Show More