Compare commits
1 Commits
features/g
...
per-instan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71b877b1d2 |
18
.codecov.yml
18
.codecov.yml
@@ -14,21 +14,3 @@ ignore:
|
||||
- share/spack/qa/.*
|
||||
|
||||
comment: off
|
||||
|
||||
# Inline codecov annotations make the code hard to read, and they add
|
||||
# annotations in files that seemingly have nothing to do with the PR.
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
# Attempt to fix "Missing base commit" messages in the codecov UI.
|
||||
# Because we do not run full tests on package PRs, package PRs' merge
|
||||
# commits on `develop` don't have coverage info. It appears that
|
||||
# codecov will give you an error if the pseudo-base's coverage data
|
||||
# doesn't all apply properly to the real PR base.
|
||||
#
|
||||
# See here for docs:
|
||||
# https://docs.codecov.com/docs/comparing-commits#pseudo-comparison
|
||||
# See here for another potential solution:
|
||||
# https://community.codecov.com/t/2480/15
|
||||
codecov:
|
||||
allow_coverage_offsets: true
|
||||
|
||||
38
.coveragerc
Normal file
38
.coveragerc
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- conf -*-
|
||||
# .coveragerc to control coverage.py
|
||||
[run]
|
||||
parallel = True
|
||||
concurrency = multiprocessing
|
||||
branch = True
|
||||
source =
|
||||
bin
|
||||
lib
|
||||
omit =
|
||||
lib/spack/spack/test/*
|
||||
lib/spack/docs/*
|
||||
lib/spack/external/*
|
||||
share/spack/qa/*
|
||||
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
# Have to re-enable the standard pragma
|
||||
pragma: no cover
|
||||
|
||||
# Don't complain about missing debug-only code:
|
||||
def __repr__
|
||||
if self\.debug
|
||||
|
||||
# Don't complain if tests don't hit defensive assertion code:
|
||||
raise AssertionError
|
||||
raise NotImplementedError
|
||||
|
||||
# Don't complain if non-runnable code isn't run:
|
||||
if 0:
|
||||
if False:
|
||||
if __name__ == .__main__.:
|
||||
|
||||
ignore_errors = True
|
||||
|
||||
[html]
|
||||
directory = htmlcov
|
||||
@@ -8,4 +8,4 @@ share/spack/dotkit/*
|
||||
share/spack/lmod/*
|
||||
share/spack/modules/*
|
||||
lib/spack/spack/test/*
|
||||
var/spack/cache/*
|
||||
|
||||
|
||||
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
42
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: "\U0001F41E Bug report"
|
||||
about: Report a bug in the core of Spack (command not working as expected, etc.)
|
||||
labels: "bug,triage"
|
||||
---
|
||||
|
||||
<!-- Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
|
||||
Example: "I ran `spack find` to list all the installed packages and ..." -->
|
||||
|
||||
### Steps to reproduce the issue
|
||||
|
||||
```console
|
||||
$ spack <command1> <spec>
|
||||
$ spack <command2> <spec>
|
||||
...
|
||||
```
|
||||
|
||||
### Error Message
|
||||
|
||||
<!-- If Spack reported an error, provide the error message. If it did not report an error but the output appears incorrect, provide the incorrect output. If there was no error message and no output but the result is incorrect, describe how it does not match what you expect. -->
|
||||
```console
|
||||
$ spack --debug --stacktrace <command>
|
||||
```
|
||||
|
||||
### Information on your system
|
||||
|
||||
<!-- Please include the output of `spack debug report` -->
|
||||
|
||||
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
|
||||
|
||||
### Additional information
|
||||
|
||||
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
|
||||
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
|
||||
- [ ] I have searched the issues of this repo and believe this is not a duplicate
|
||||
- [ ] I have run the failing commands in debug mode and reported the output
|
||||
|
||||
<!-- We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
|
||||
|
||||
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
|
||||
|
||||
Other than that, thanks for taking the time to contribute to Spack! -->
|
||||
58
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
58
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: "\U0001F41E Bug report"
|
||||
description: Report a bug in the core of Spack (command not working as expected, etc.)
|
||||
labels: [bug, triage]
|
||||
body:
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: |
|
||||
Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
|
||||
Example: "I ran `spack find` to list all the installed packages and ..."
|
||||
placeholder: |
|
||||
```console
|
||||
$ spack <command1> <spec>
|
||||
$ spack <command2> <spec>
|
||||
...
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: error
|
||||
attributes:
|
||||
label: Error message
|
||||
description: |
|
||||
If Spack reported an error, provide the error message. If it did not report an error but the output appears incorrect, provide the incorrect output. If there was no error message and no output but the result is incorrect, describe how it does not match what you expect.
|
||||
placeholder: |
|
||||
```console
|
||||
$ spack --debug --stacktrace <command>
|
||||
```
|
||||
- type: textarea
|
||||
id: information
|
||||
attributes:
|
||||
label: Information on your system
|
||||
description: Please include the output of `spack debug report`
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well.
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
label: General information
|
||||
options:
|
||||
- label: I have run `spack debug report` and reported the version of Spack/Python/Platform
|
||||
required: true
|
||||
- label: I have searched the issues of this repo and believe this is not a duplicate
|
||||
required: true
|
||||
- label: I have run the failing commands in debug mode and reported the output
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
|
||||
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on [our Slack](https://slack.spack.io/) first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
|
||||
|
||||
Other than that, thanks for taking the time to contribute to Spack!
|
||||
43
.github/ISSUE_TEMPLATE/build_error.md
vendored
Normal file
43
.github/ISSUE_TEMPLATE/build_error.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
name: "\U0001F4A5 Build error"
|
||||
about: Some package in Spack didn't build correctly
|
||||
title: "Installation issue: "
|
||||
labels: "build-error"
|
||||
---
|
||||
|
||||
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
|
||||
|
||||
1. Title the issue "Installation issue: <name-of-the-package>".
|
||||
2. Provide the information required below.
|
||||
|
||||
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
|
||||
|
||||
### Steps to reproduce the issue
|
||||
|
||||
<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->
|
||||
```console
|
||||
$ spack install <spec>
|
||||
...
|
||||
```
|
||||
|
||||
### Information on your system
|
||||
|
||||
<!-- Please include the output of `spack debug report` -->
|
||||
|
||||
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
|
||||
|
||||
### Additional information
|
||||
|
||||
<!-- Please upload the following files. They should be present in the stage directory of the failing build. Also upload any config.log or similar file if one exists. -->
|
||||
* [spack-build-out.txt]()
|
||||
* [spack-build-env.txt]()
|
||||
|
||||
<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->
|
||||
|
||||
### General information
|
||||
|
||||
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
|
||||
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
|
||||
- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
|
||||
- [ ] I have uploaded the build log and environment files
|
||||
- [ ] I have searched the issues of this repo and believe this is not a duplicate
|
||||
64
.github/ISSUE_TEMPLATE/build_error.yml
vendored
64
.github/ISSUE_TEMPLATE/build_error.yml
vendored
@@ -1,64 +0,0 @@
|
||||
name: "\U0001F4A5 Build error"
|
||||
description: Some package in Spack didn't build correctly
|
||||
title: "Installation issue: "
|
||||
labels: [build-error]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to report this build failure. To proceed with the report please:
|
||||
1. Title the issue `Installation issue: <name-of-the-package>`.
|
||||
2. Provide the information required below.
|
||||
|
||||
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: Steps to reproduce the issue
|
||||
description: |
|
||||
Fill in the exact spec you are trying to build and the relevant part of the error message
|
||||
placeholder: |
|
||||
```console
|
||||
$ spack install <spec>
|
||||
...
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: information
|
||||
attributes:
|
||||
label: Information on your system
|
||||
description: Please include the output of `spack debug report`
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well.
|
||||
- type: textarea
|
||||
id: additional_information
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: |
|
||||
Please upload the following files:
|
||||
* **`spack-build-out.txt`**
|
||||
* **`spack-build-env.txt`**
|
||||
|
||||
They should be present in the stage directory of the failing build. Also upload any `config.log` or similar file if one exists.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and **@mention** them here if they exist.
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
label: General information
|
||||
options:
|
||||
- label: I have run `spack debug report` and reported the version of Spack/Python/Platform
|
||||
required: true
|
||||
- label: I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers
|
||||
required: true
|
||||
- label: I have uploaded the build log and environment files
|
||||
required: true
|
||||
- label: I have searched the issues of this repo and believe this is not a duplicate
|
||||
required: true
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
1
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1 +0,0 @@
|
||||
blank_issues_enabled: true
|
||||
33
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: "\U0001F38A Feature request"
|
||||
about: Suggest adding a feature that is not yet in Spack
|
||||
labels: feature
|
||||
|
||||
---
|
||||
|
||||
<!--*Please add a concise summary of your suggestion here.*-->
|
||||
|
||||
### Rationale
|
||||
|
||||
<!--*Is your feature request related to a problem? Please describe it!*-->
|
||||
|
||||
### Description
|
||||
|
||||
<!--*Describe the solution you'd like and the alternatives you have considered.*-->
|
||||
|
||||
|
||||
### Additional information
|
||||
<!--*Add any other context about the feature request here.*-->
|
||||
|
||||
|
||||
### General information
|
||||
|
||||
- [ ] I have run `spack --version` and reported the version of Spack
|
||||
- [ ] I have searched the issues of this repo and believe this is not a duplicate
|
||||
|
||||
|
||||
|
||||
<!--If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
|
||||
|
||||
Other than that, thanks for taking the time to contribute to Spack!
|
||||
-->
|
||||
41
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
41
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: "\U0001F38A Feature request"
|
||||
description: Suggest adding a feature that is not yet in Spack
|
||||
labels: [feature]
|
||||
body:
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Please add a concise summary of your suggestion here.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: rationale
|
||||
attributes:
|
||||
label: Rationale
|
||||
description: Is your feature request related to a problem? Please describe it!
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Describe the solution you'd like and the alternatives you have considered.
|
||||
- type: textarea
|
||||
id: additional_information
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Add any other context about the feature request here.
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
label: General information
|
||||
options:
|
||||
- label: I have run `spack --version` and reported the version of Spack
|
||||
required: true
|
||||
- label: I have searched the issues of this repo and believe this is not a duplicate
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on [our Slack](https://slack.spack.io/) first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
|
||||
|
||||
Other than that, thanks for taking the time to contribute to Spack!
|
||||
6
.github/actions/add-maintainers-as-reviewers/Dockerfile
vendored
Normal file
6
.github/actions/add-maintainers-as-reviewers/Dockerfile
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM python:3.7-alpine
|
||||
|
||||
RUN pip install pygithub
|
||||
|
||||
ADD entrypoint.py /entrypoint.py
|
||||
ENTRYPOINT ["/entrypoint.py"]
|
||||
85
.github/actions/add-maintainers-as-reviewers/entrypoint.py
vendored
Executable file
85
.github/actions/add-maintainers-as-reviewers/entrypoint.py
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Maintainer review action.
|
||||
|
||||
This action checks which packages have changed in a PR, and adds their
|
||||
maintainers to the pull request for review.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from github import Github
|
||||
|
||||
|
||||
def spack(*args):
|
||||
"""Run the spack executable with arguments, and return the output split.
|
||||
|
||||
This does just enough to run `spack pkg` and `spack maintainers`, the
|
||||
two commands used by this action.
|
||||
"""
|
||||
github_workspace = os.environ['GITHUB_WORKSPACE']
|
||||
spack = os.path.join(github_workspace, 'bin', 'spack')
|
||||
output = subprocess.check_output([spack] + list(args))
|
||||
split = re.split(r'\s*', output.decode('utf-8').strip())
|
||||
return [s for s in split if s]
|
||||
|
||||
|
||||
def main():
|
||||
# get these first so that we'll fail early
|
||||
token = os.environ['GITHUB_TOKEN']
|
||||
event_path = os.environ['GITHUB_EVENT_PATH']
|
||||
|
||||
with open(event_path) as file:
|
||||
data = json.load(file)
|
||||
|
||||
# make sure it's a pull_request event
|
||||
assert 'pull_request' in data
|
||||
|
||||
# only request reviews on open, edit, or reopen
|
||||
action = data['action']
|
||||
if action not in ('opened', 'edited', 'reopened'):
|
||||
return
|
||||
|
||||
# get data from the event payload
|
||||
pr_data = data['pull_request']
|
||||
base_branch_name = pr_data['base']['ref']
|
||||
full_repo_name = pr_data['base']['repo']['full_name']
|
||||
pr_number = pr_data['number']
|
||||
requested_reviewers = pr_data['requested_reviewers']
|
||||
author = pr_data['user']['login']
|
||||
|
||||
# get a list of packages that this PR modified
|
||||
changed_pkgs = spack(
|
||||
'pkg', 'changed', '--type', 'ac', '%s...' % base_branch_name)
|
||||
|
||||
# get maintainers for all modified packages
|
||||
maintainers = set()
|
||||
for pkg in changed_pkgs:
|
||||
pkg_maintainers = set(spack('maintainers', pkg))
|
||||
maintainers |= pkg_maintainers
|
||||
|
||||
# remove any maintainers who are already on the PR, and the author,
|
||||
# as you can't review your own PR)
|
||||
maintainers -= set(requested_reviewers)
|
||||
maintainers -= set([author])
|
||||
|
||||
if not maintainers:
|
||||
return
|
||||
|
||||
# request reviews from each maintainer
|
||||
gh = Github(token)
|
||||
repo = gh.get_repo(full_repo_name)
|
||||
pr = repo.get_pull(pr_number)
|
||||
pr.create_review_request(list(maintainers))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
161
.github/workflows/bootstrap.yml
vendored
161
.github/workflows/bootstrap.yml
vendored
@@ -1,161 +0,0 @@
|
||||
name: Bootstrapping
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the
|
||||
# built-in repository or documentation
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
|
||||
- '!var/spack/repos/builtin/packages/python/**'
|
||||
- '!var/spack/repos/builtin/packages/re2c/**'
|
||||
- 'lib/spack/docs/**'
|
||||
schedule:
|
||||
# nightly at 2:16 AM
|
||||
- cron: '16 2 * * *'
|
||||
|
||||
jobs:
|
||||
|
||||
fedora-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "fedora:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison bison-devel libstdc++-static
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap clingo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
apt-get update -y && apt-get upgrade -y
|
||||
apt-get install -y \
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
cmake bison
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd -m spack-test
|
||||
chown -R spack-test .
|
||||
- name: Bootstrap clingo
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
opensuse-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "opensuse/tumbleweed:latest"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
zypper update -y
|
||||
zypper install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-fortran tar git gpg2 gzip \
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-sources:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install cmake bison@2.7 tree
|
||||
- uses: actions/checkout@v2
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-clingo-binaries:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
|
||||
ubuntu-clingo-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
72
.github/workflows/build-containers.yml
vendored
72
.github/workflows/build-containers.yml
vendored
@@ -1,72 +0,0 @@
|
||||
name: Build & Deploy Docker Containers
|
||||
on:
|
||||
# Build new Spack develop containers nightly.
|
||||
schedule:
|
||||
- cron: '34 0 * * *'
|
||||
# Let's also build & tag Spack containers on releases.
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
deploy-images:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
# Even if one container fails to build we still want the others
|
||||
# to continue their builds.
|
||||
fail-fast: false
|
||||
# A matrix of Dockerfile paths, associated tags, and which architectures
|
||||
# they support.
|
||||
matrix:
|
||||
dockerfile: [[amazon-linux, amazonlinux-2.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[centos7, centos-7.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[leap15, leap-15.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[ubuntu-xenial, ubuntu-1604.dockerfile, 'linux/amd64,linux/arm64'],
|
||||
[ubuntu-bionic, ubuntu-1804.dockerfile, 'linux/amd64,linux/arm64']]
|
||||
name: Build ${{ matrix.dockerfile[0] }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set Container Tag Normal (Nightly)
|
||||
run: |
|
||||
container="ghcr.io/spack/${{ matrix.dockerfile[0]}}:latest"
|
||||
echo "container=${container}" >> $GITHUB_ENV
|
||||
echo "versioned=${container}" >> $GITHUB_ENV
|
||||
|
||||
# On a new release create a container with the same tag as the release.
|
||||
- name: Set Container Tag on Release
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
versioned="ghcr.io/spack/${{matrix.dockerfile[0]}}:${GITHUB_REF##*/}"
|
||||
echo "versioned=${versioned}" >> $GITHUB_ENV
|
||||
|
||||
- name: Check ${{ matrix.dockerfile[1] }} Exists
|
||||
run: |
|
||||
printf "Preparing to build ${{ env.container }} from ${{ matrix.dockerfile[1] }}"
|
||||
if [ ! -f "share/spack/docker/${{ matrix.dockerfile[1]}}" ]; then
|
||||
printf "Dockerfile ${{ matrix.dockerfile[0]}} does not exist"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[1] }}
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: share/spack/docker/${{matrix.dockerfile[1]}}
|
||||
platforms: ${{ matrix.dockerfile[2] }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.container }}
|
||||
${{ env.versioned }}
|
||||
77
.github/workflows/linux_build_tests.yaml
vendored
Normal file
77
.github/workflows/linux_build_tests.yaml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: linux builds
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/lz4/**'
|
||||
- '!var/spack/repos/builtin/packages/mpich/**'
|
||||
- '!var/spack/repos/builtin/packages/tut/**'
|
||||
- '!var/spack/repos/builtin/packages/py-setuptools/**'
|
||||
- '!var/spack/repos/builtin/packages/openjpeg/**'
|
||||
- '!var/spack/repos/builtin/packages/r-rcpp/**'
|
||||
- '!var/spack/repos/builtin/packages/ruby-rake/**'
|
||||
# Don't run if we only modified documentation
|
||||
- 'lib/spack/docs/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/lz4/**'
|
||||
- '!var/spack/repos/builtin/packages/mpich/**'
|
||||
- '!var/spack/repos/builtin/packages/tut/**'
|
||||
- '!var/spack/repos/builtin/packages/py-setuptools/**'
|
||||
- '!var/spack/repos/builtin/packages/openjpeg/**'
|
||||
- '!var/spack/repos/builtin/packages/r-rcpp/**'
|
||||
- '!var/spack/repos/builtin/packages/ruby-rake/**'
|
||||
# Don't run if we only modified documentation
|
||||
- 'lib/spack/docs/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
package:
|
||||
- lz4 # MakefilePackage
|
||||
- mpich~fortran # AutotoolsPackage
|
||||
- tut # WafPackage
|
||||
- py-setuptools # PythonPackage
|
||||
- openjpeg # CMakePackage
|
||||
- r-rcpp # RPackage
|
||||
- ruby-rake # RubyPackage
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2.1.4
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-build-${{ matrix.package }}
|
||||
restore-keys: |
|
||||
ccache-build-${{ matrix.package }}
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System Packages
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev ruby findutils openssl libssl-dev libpciaccess-dev
|
||||
R --version
|
||||
perl --version
|
||||
ruby --version
|
||||
- name: Copy Configuration
|
||||
run: |
|
||||
ccache -M 300M && ccache -z
|
||||
# Set up external deps for build tests, b/c they take too long to compile
|
||||
cp share/spack/qa/configuration/*.yaml etc/spack/
|
||||
- name: Run the build test
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
SPEC=${{ matrix.package }} share/spack/qa/run-build-tests
|
||||
ccache -s
|
||||
144
.github/workflows/unit_tests.yaml
vendored
144
.github/workflows/unit_tests.yaml
vendored
@@ -24,9 +24,9 @@ jobs:
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade vermin
|
||||
- name: vermin (Spack's Core)
|
||||
run: vermin --backport argparse --violations --backport typing -t=2.6- -t=3.5- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
|
||||
run: vermin --backport argparse --backport typing -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
|
||||
- name: vermin (Repositories)
|
||||
run: vermin --backport argparse --violations --backport typing -t=2.6- -t=3.5- -vvv var/spack/repos
|
||||
run: vermin --backport argparse --backport typing -t=2.6- -t=3.5- -v var/spack/repos
|
||||
# Run style checks on the files that have been changed
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools types-six
|
||||
pip install --upgrade pip six setuptools flake8 mypy>=0.800 black
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
@@ -129,34 +129,39 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf cmake bison libbison-dev kcov
|
||||
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial
|
||||
sudo apt-get install -y ninja-build patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
# ensure style checks are not skipped in unit tests for python >= 3.6
|
||||
# note that true/false (i.e., 1/0) are opposite in conditions in python and bash
|
||||
if python -c 'import sys; sys.exit(not sys.version_info >= (3, 6))'; then
|
||||
pip install --upgrade flake8 isort>=4.3.5 mypy>=0.900 black
|
||||
fi
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
if: ${{ matrix.concretizer == 'clingo' }}
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Bootstrap clingo from sources
|
||||
if: ${{ matrix.concretizer == 'clingo' }}
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -v solve zlib
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
@@ -166,12 +171,11 @@ jobs:
|
||||
- name: Run unit tests (reduced suite without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
@@ -190,15 +194,29 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for shell tests
|
||||
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
|
||||
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests (without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
run: |
|
||||
@@ -209,7 +227,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
@@ -265,56 +283,40 @@ jobs:
|
||||
useradd spack-test
|
||||
chown -R spack-test .
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_PYTHON: /usr/libexec/platform-python
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
|
||||
spack unit-test -k 'not svn and not hg' -x --verbose
|
||||
# Test for the clingo based solver (using clingo-cffi)
|
||||
clingo-cffi:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
container: spack/github-actions:clingo-cffi
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf kcov
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml] clingo
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Run unit tests (reduced suite without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
whoami && echo PWD=$PWD && echo HOME=$HOME && echo SPACK_TEST_SOLVER=$SPACK_TEST_SOLVER
|
||||
python3 -c "import clingo; print(hasattr(clingo.Symbol, '_rep'), clingo.__version__)"
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
. share/spack/setup-env.sh
|
||||
spack compiler find
|
||||
spack solve mpileaks%gcc
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -v -x
|
||||
coverage combine
|
||||
coverage xml
|
||||
else
|
||||
$(which spack) unit-test -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
# Run unit tests on MacOS
|
||||
build:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
@@ -332,33 +334,27 @@ jobs:
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade codecov coverage[toml]
|
||||
pip install --upgrade codecov coverage
|
||||
pip install --upgrade flake8 pep8-naming mypy
|
||||
- name: Setup Homebrew packages
|
||||
run: |
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) bootstrap untrust spack-install
|
||||
$(which spack) solve zlib
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -x
|
||||
coverage combine
|
||||
coverage xml
|
||||
# Delete the symlink going from ./lib/spack/docs/_spack_root back to
|
||||
# the initial directory, since it causes ELOOP errors with codecov/actions@2
|
||||
rm lib/spack/docs/_spack_root
|
||||
else
|
||||
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
|
||||
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
file: ./coverage.xml
|
||||
flags: unittests,macos
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -132,7 +132,6 @@ celerybeat.pid
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
!/lib/spack/env
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
@@ -509,4 +508,4 @@ $RECYCLE.BIN/
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
*.lnk
|
||||
|
||||
3
.mailmap
3
.mailmap
@@ -3,8 +3,7 @@ Adam Moody <moody20@llnl.gov> Adam T. Moody
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Adolfo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Andrew Williams <williamsa89@cardiff.ac.uk> Andrew Williams <andrew@alshain.org.uk>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <a.huebl@hzdr.de>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Axel Huebl <a.huebl@hzdr.de> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@gmail.com>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@users.noreply.github.com>
|
||||
Benedikt Hegner <hegner@cern.ch> Benedikt Hegner <benedikt.hegner@cern.ch>
|
||||
|
||||
35
.mypy.ini
Normal file
35
.mypy.ini
Normal file
@@ -0,0 +1,35 @@
|
||||
[mypy]
|
||||
python_version = 3.7
|
||||
files=lib/spack/llnl/**/*.py,lib/spack/spack/**/*.py
|
||||
mypy_path=bin,lib/spack,lib/spack/external,var/spack/repos/builtin
|
||||
# This and a generated import file allows supporting packages
|
||||
namespace_packages=True
|
||||
# To avoid re-factoring all the externals, ignore errors and missing imports
|
||||
# globally, then turn back on in spack and spack submodules
|
||||
ignore_errors=True
|
||||
ignore_missing_imports=True
|
||||
|
||||
[mypy-spack.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-packages.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-llnl.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-spack.test.packages]
|
||||
ignore_errors=True
|
||||
|
||||
# ignore errors in fake import path for packages
|
||||
[mypy-spack.pkg.*]
|
||||
ignore_errors=True
|
||||
ignore_missing_imports=True
|
||||
|
||||
# jinja has syntax in it that requires python3 and causes a parse error
|
||||
# skip importing it
|
||||
[mypy-jinja2]
|
||||
follow_imports=skip
|
||||
27
CHANGELOG.md
27
CHANGELOG.md
@@ -1,30 +1,3 @@
|
||||
# v0.16.2 (2021-05-22)
|
||||
|
||||
* Major performance improvement for `spack load` and other commands. (#23661)
|
||||
* `spack fetch` is now environment-aware. (#19166)
|
||||
* Numerous fixes for the new, `clingo`-based concretizer. (#23016, #23307,
|
||||
#23090, #22896, #22534, #20644, #20537, #21148)
|
||||
* Supoprt for automatically bootstrapping `clingo` from source. (#20652, #20657
|
||||
#21364, #21446, #21913, #22354, #22444, #22460, #22489, #22610, #22631)
|
||||
* Python 3.10 support: `collections.abc` (#20441)
|
||||
* Fix import issues by using `__import__` instead of Spack package importe.
|
||||
(#23288, #23290)
|
||||
* Bugfixes and `--source-dir` argument for `spack location`. (#22755, #22348,
|
||||
#22321)
|
||||
* Better support for externals in shared prefixes. (#22653)
|
||||
* `spack build-env` now prefers specs defined in the active environment.
|
||||
(#21642)
|
||||
* Remove erroneous warnings about quotes in `from_sourcing_files`. (#22767)
|
||||
* Fix clearing cache of `InternalConfigScope`. (#22609)
|
||||
* Bugfix for active when pkg is already active error. (#22587)
|
||||
* Make `SingleFileScope` able to repopulate the cache after clearing it.
|
||||
(#22559)
|
||||
* Channelflow: Fix the package. (#22483)
|
||||
* More descriptive error message for bugs in `package.py` (#21811)
|
||||
* Use package-supplied `autogen.sh`. (#20319)
|
||||
* Respect `-k/verify-ssl-false` in `_existing_url` method. (#21864)
|
||||
|
||||
|
||||
# v0.16.1 (2021-02-22)
|
||||
|
||||
This minor release includes a new feature and associated fixes:
|
||||
|
||||
10
README.md
10
README.md
@@ -1,11 +1,11 @@
|
||||
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
|
||||
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
|
||||
[](https://codecov.io/gh/spack/spack)
|
||||
[](https://spack.readthedocs.io)
|
||||
[](https://slack.spack.io)
|
||||
[](https://spackpm.herokuapp.com)
|
||||
|
||||
Spack is a multi-platform package manager that builds and installs
|
||||
multiple versions and configurations of software. It works on Linux,
|
||||
@@ -36,8 +36,6 @@ Documentation
|
||||
[**Full documentation**](https://spack.readthedocs.io/) is available, or
|
||||
run `spack help` or `spack help --all`.
|
||||
|
||||
For a cheat sheet on Spack syntax, run `spack help --spec`.
|
||||
|
||||
Tutorial
|
||||
----------------
|
||||
|
||||
@@ -60,7 +58,7 @@ packages to bugfixes, documentation, or even new core features.
|
||||
Resources:
|
||||
|
||||
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
|
||||
To get an invitation, visit [slack.spack.io](https://slack.spack.io).
|
||||
To get an invitation, [**click here**](https://spackpm.herokuapp.com).
|
||||
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
|
||||
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
|
||||
`@mention` us!
|
||||
@@ -74,7 +72,7 @@ When you send your request, make ``develop`` the destination branch on the
|
||||
|
||||
Your PR must pass Spack's unit tests and documentation tests, and must be
|
||||
[PEP 8](https://www.python.org/dev/peps/pep-0008/) compliant. We enforce
|
||||
these guidelines with our CI process. To run these tests locally, and for
|
||||
these guidelines with our CI process. To run these tests locally, and for
|
||||
helpful tips on git, see our
|
||||
[Contribution Guide](https://spack.readthedocs.io/en/latest/contribution_guide.html).
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# sbang project developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
26
bin/spack
26
bin/spack
@@ -11,8 +11,7 @@
|
||||
# See https://stackoverflow.com/a/47886254
|
||||
""":"
|
||||
# prefer SPACK_PYTHON environment variable, python3, python, then python2
|
||||
SPACK_PREFERRED_PYTHONS="python3 python python2 /usr/libexec/platform-python"
|
||||
for cmd in "${SPACK_PYTHON:-}" ${SPACK_PREFERRED_PYTHONS}; do
|
||||
for cmd in "${SPACK_PYTHON:-}" python3 python python2; do
|
||||
if command -v > /dev/null "$cmd"; then
|
||||
export SPACK_PYTHON="$(command -v "$cmd")"
|
||||
exec "${SPACK_PYTHON}" "$0" "$@"
|
||||
@@ -28,7 +27,6 @@ exit 1
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
min_python3 = (3, 5)
|
||||
@@ -71,28 +69,6 @@ if "ruamel.yaml" in sys.modules:
|
||||
if "ruamel" in sys.modules:
|
||||
del sys.modules["ruamel"]
|
||||
|
||||
# The following code is here to avoid failures when updating
|
||||
# the develop version, due to spurious argparse.pyc files remaining
|
||||
# in the libs/spack/external directory, see:
|
||||
# https://github.com/spack/spack/pull/25376
|
||||
# TODO: Remove in v0.18.0 or later
|
||||
try:
|
||||
import argparse
|
||||
except ImportError:
|
||||
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
|
||||
if not os.path.exists(argparse_pyc):
|
||||
raise
|
||||
try:
|
||||
os.remove(argparse_pyc)
|
||||
import argparse # noqa
|
||||
except Exception:
|
||||
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
|
||||
'Either delete it manually or ask some administrator to '
|
||||
'delete it for you.')
|
||||
print(msg.format(argparse_pyc))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
import spack.main # noqa
|
||||
|
||||
# Once we've set up the system path, run the spack main method
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
bootstrap:
|
||||
# If set to false Spack will not bootstrap missing software,
|
||||
# but will instead raise an error.
|
||||
enable: true
|
||||
# Root directory for bootstrapping work. The software bootstrapped
|
||||
# by Spack is installed in a "store" subfolder of this root directory
|
||||
root: ~/.spack/bootstrap
|
||||
# Methods that can be used to bootstrap software. Each method may or
|
||||
# may not be able to bootstrap all of the software that Spack needs,
|
||||
# depending on its type.
|
||||
sources:
|
||||
- name: 'github-actions'
|
||||
type: buildcache
|
||||
description: |
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
# This method is just Spack bootstrapping the software it needs from sources.
|
||||
# It has been added here so that users can selectively disable bootstrapping
|
||||
# from sources by "untrusting" it.
|
||||
- name: spack-install
|
||||
type: install
|
||||
description: |
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
trusted:
|
||||
# By default we trust bootstrapping from sources and from binaries
|
||||
# produced on Github via the workflow
|
||||
github-actions: true
|
||||
spack-install: true
|
||||
@@ -33,35 +33,39 @@ config:
|
||||
template_dirs:
|
||||
- $spack/share/spack/templates
|
||||
|
||||
# Temporary locations Spack can try to use for builds.
|
||||
|
||||
# Locations where different types of modules should be installed.
|
||||
module_roots:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
|
||||
|
||||
# `build_stage` determines where Spack builds packages.
|
||||
#
|
||||
# Recommended options are given below.
|
||||
# The default build location is `$tempdir/$user/spack-stage/$instance`.
|
||||
# `$tempdir` indicates that we should build in a temporary directory
|
||||
# (i.e., ``$TMP` or ``$TMPDIR``). On most systems (especially HPC
|
||||
# machines), building in a temporary directory is significantly faster
|
||||
# than other locations. `$user` ensures that the directory is unique by
|
||||
# user, so different users do not fight over Spack's build location.
|
||||
# Finally, `$instance` is an 8-digit hash that is unique per instance
|
||||
# of Spack. This ensures that different Spack instances do not fight
|
||||
# over build locations.
|
||||
#
|
||||
# Builds can be faster in temporary directories on some (e.g., HPC) systems.
|
||||
# Specifying `$tempdir` will ensure use of the default temporary directory
|
||||
# (i.e., ``$TMP` or ``$TMPDIR``).
|
||||
# The second choice, if Spack cannot create the first one for some
|
||||
# reason, is `~/.spack/stage/$instance`. This is unique to each user's
|
||||
# home directory, and it is also unique to each Spack instance.
|
||||
#
|
||||
# Another option that prevents conflicts and potential permission issues is
|
||||
# to specify `~/.spack/stage`, which ensures each user builds in their home
|
||||
# directory.
|
||||
# These choices both have the username in the path. If the username is
|
||||
# NOT in your chosen `build_stage` location, Spack will append it
|
||||
# anyway, to avoid conflicts among users in shared temporary spaces.
|
||||
#
|
||||
# A more traditional path uses the value of `$spack/var/spack/stage`, which
|
||||
# builds directly inside Spack's instance without staging them in a
|
||||
# temporary space. Problems with specifying a path inside a Spack instance
|
||||
# are that it precludes its use as a system package and its ability to be
|
||||
# pip installable.
|
||||
#
|
||||
# In any case, if the username is not already in the path, Spack will append
|
||||
# the value of `$user` in an attempt to avoid potential conflicts between
|
||||
# users in shared temporary spaces.
|
||||
#
|
||||
# The build stage can be purged with `spack clean --stage` and
|
||||
# `spack clean -a`, so it is important that the specified directory uniquely
|
||||
# identifies Spack staging to avoid accidentally wiping out non-Spack work.
|
||||
# The build stage can be purged with `spack clean`, so it is important
|
||||
# to choose a directory that is ONLY used by Spack so that you do not
|
||||
# accidentally wipe out files that have nothing to do with Spack.
|
||||
build_stage:
|
||||
- $tempdir/$user/spack-stage
|
||||
- ~/.spack/stage
|
||||
# - $spack/var/spack/stage
|
||||
- $tempdir/$user/spack-stage/$instance
|
||||
- ~/.spack/stage/$instance
|
||||
|
||||
# Directory in which to run tests and store test results.
|
||||
# Tests will be stored in directories named by date/time and package
|
||||
@@ -134,18 +138,12 @@ config:
|
||||
# enabling locks.
|
||||
locks: true
|
||||
|
||||
# The default url fetch method to use.
|
||||
# If set to 'curl', Spack will require curl on the user's system
|
||||
# If set to 'urllib', Spack will use python built-in libs to fetch
|
||||
url_fetch_method: urllib
|
||||
|
||||
# The maximum number of jobs to use for the build system (e.g. `make`), when
|
||||
# the -j flag is not given on the command line. Defaults to 16 when not set.
|
||||
# Note that the maximum number of jobs is limited by the number of cores
|
||||
# available, taking thread affinity into account when supported. For instance:
|
||||
# - With `build_jobs: 16` and 4 cores available `spack install` will run `make -j4`
|
||||
# - With `build_jobs: 16` and 32 cores available `spack install` will run `make -j16`
|
||||
# - With `build_jobs: 2` and 4 cores available `spack install -j6` will run `make -j6`
|
||||
# The maximum number of jobs to use when running `make` in parallel,
|
||||
# always limited by the number of cores available. For instance:
|
||||
# - If set to 16 on a 4 cores machine `spack install` will run `make -j4`
|
||||
# - If set to 16 on a 18 cores machine `spack install` will run `make -j16`
|
||||
# If not set, Spack will use all available cores up to 16.
|
||||
# build_jobs: 16
|
||||
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# -------------------------------------------------------------------------
|
||||
# This is the default configuration for Spack's module file generation.
|
||||
#
|
||||
# Settings here are versioned with Spack and are intended to provide
|
||||
# sensible defaults out of the box. Spack maintainers should edit this
|
||||
# file to keep it current.
|
||||
#
|
||||
# Users can override these settings by editing the following files.
|
||||
#
|
||||
# Per-spack-instance settings (overrides defaults):
|
||||
# $SPACK_ROOT/etc/spack/modules.yaml
|
||||
#
|
||||
# Per-user settings (overrides default and site settings):
|
||||
# ~/.spack/modules.yaml
|
||||
# -------------------------------------------------------------------------
|
||||
modules:
|
||||
prefix_inspections:
|
||||
lib:
|
||||
- LD_LIBRARY_PATH
|
||||
lib64:
|
||||
- LD_LIBRARY_PATH
|
||||
@@ -21,10 +21,12 @@ packages:
|
||||
- gcc
|
||||
- intel
|
||||
providers:
|
||||
elf: [libelf]
|
||||
fuse: [macfuse]
|
||||
unwind: [apple-libunwind]
|
||||
uuid: [apple-libuuid]
|
||||
elf:
|
||||
- libelf
|
||||
unwind:
|
||||
- apple-libunwind
|
||||
uuid:
|
||||
- apple-libuuid
|
||||
apple-libunwind:
|
||||
buildable: false
|
||||
externals:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
mirrors:
|
||||
spack-public: https://mirror.spack.io
|
||||
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
# ~/.spack/modules.yaml
|
||||
# -------------------------------------------------------------------------
|
||||
modules:
|
||||
# Paths to check when creating modules for all module sets
|
||||
enable:
|
||||
- tcl
|
||||
prefix_inspections:
|
||||
bin:
|
||||
- PATH
|
||||
@@ -33,20 +34,6 @@ modules:
|
||||
'':
|
||||
- CMAKE_PREFIX_PATH
|
||||
|
||||
# These are configurations for the module set named "default"
|
||||
default:
|
||||
# These values are defaulted in the code. They are not defaulted here so
|
||||
# that we can enable backwards compatibility with the old syntax more
|
||||
# easily (old value is in the config yaml, config:module_roots)
|
||||
# Where to install modules
|
||||
# roots:
|
||||
# tcl: $spack/share/spack/modules
|
||||
# lmod: $spack/share/spack/lmod
|
||||
# What type of modules to use
|
||||
enable:
|
||||
- tcl
|
||||
|
||||
# Default configurations if lmod is enabled
|
||||
lmod:
|
||||
hierarchy:
|
||||
- mpi
|
||||
lmod:
|
||||
hierarchy:
|
||||
- mpi
|
||||
|
||||
@@ -17,44 +17,39 @@ packages:
|
||||
all:
|
||||
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
|
||||
providers:
|
||||
D: [ldc]
|
||||
awk: [gawk]
|
||||
blas: [openblas, amdblis]
|
||||
D: [ldc]
|
||||
daal: [intel-daal]
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw, amdfftw]
|
||||
flame: [libflame, amdlibflame]
|
||||
fuse: [libfuse]
|
||||
gl: [mesa+opengl, mesa18, opengl]
|
||||
glu: [mesa-glu, openglu]
|
||||
glx: [mesa+glx, mesa18+glx, opengl]
|
||||
glu: [mesa-glu, openglu]
|
||||
golang: [gcc]
|
||||
iconv: [libiconv]
|
||||
ipp: [intel-ipp]
|
||||
java: [openjdk, jdk, ibm-java]
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
lua-lang: [lua, lua-luajit]
|
||||
mariadb-client: [mariadb-c-client, mariadb]
|
||||
mkl: [intel-mkl]
|
||||
mpe: [mpe2]
|
||||
mpi: [openmpi, mpich]
|
||||
mysql-client: [mysql, mariadb-c-client]
|
||||
opencl: [pocl]
|
||||
onedal: [intel-oneapi-dal]
|
||||
osmesa: [mesa+osmesa, mesa18+osmesa]
|
||||
pbs: [openpbs, torque]
|
||||
pil: [py-pillow]
|
||||
pkgconfig: [pkgconf, pkg-config]
|
||||
rpc: [libtirpc]
|
||||
scalapack: [netlib-scalapack, amdscalapack]
|
||||
sycl: [hipsycl]
|
||||
szip: [libaec, libszip]
|
||||
szip: [libszip, libaec]
|
||||
tbb: [intel-tbb]
|
||||
unwind: [libunwind]
|
||||
uuid: [util-linux-uuid, libuuid]
|
||||
xxd: [xxd-standalone, vim]
|
||||
yacc: [bison, byacc]
|
||||
flame: [libflame, amdlibflame]
|
||||
uuid: [util-linux-uuid, libuuid]
|
||||
ziglang: [zig]
|
||||
permissions:
|
||||
read: world
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS = -W --keep-going
|
||||
SPHINXOPTS = -W
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
@@ -1,162 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _analyze:
|
||||
|
||||
=======
|
||||
Analyze
|
||||
=======
|
||||
|
||||
|
||||
The analyze command is a front-end to various tools that let us analyze
|
||||
package installations. Each analyzer is a module for a different kind
|
||||
of analysis that can be done on a package installation, including (but not
|
||||
limited to) binary, log, or text analysis. Thus, the analyze command group
|
||||
allows you to take an existing package install, choose an analyzer,
|
||||
and extract some output for the package using it.
|
||||
|
||||
|
||||
-----------------
|
||||
Analyzer Metadata
|
||||
-----------------
|
||||
|
||||
For all analyzers, we write to an ``analyzers`` folder in ``~/.spack``, or the
|
||||
value that you specify in your spack config at ``config:analyzers_dir``.
|
||||
For example, here we see the results of running an analysis on zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ tree ~/.spack/analyzers/
|
||||
└── linux-ubuntu20.04-skylake
|
||||
└── gcc-9.3.0
|
||||
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
├── environment_variables
|
||||
│ └── spack-analyzer-environment-variables.json
|
||||
├── install_files
|
||||
│ └── spack-analyzer-install-files.json
|
||||
└── libabigail
|
||||
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
|
||||
This means that you can always find analyzer output in this folder, and it
|
||||
is organized with the same logic as the package install it was run for.
|
||||
If you want to customize this top level folder, simply provide the ``--path``
|
||||
argument to ``spack analyze run``. The nested organization will be maintained
|
||||
within your custom root.
|
||||
|
||||
-----------------
|
||||
Listing Analyzers
|
||||
-----------------
|
||||
|
||||
If you aren't familiar with Spack's analyzers, you can quickly list those that
|
||||
are available:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze list-analyzers
|
||||
install_files : install file listing read from install_manifest.json
|
||||
environment_variables : environment variables parsed from spack-build-env.txt
|
||||
config_args : config args loaded from spack-configure-args.txt
|
||||
abigail : Application Binary Interface (ABI) features for objects
|
||||
|
||||
|
||||
In the above, the first three are fairly simple - parsing metadata files from
|
||||
a package install directory to save
|
||||
|
||||
-------------------
|
||||
Analyzing a Package
|
||||
-------------------
|
||||
|
||||
The analyze command, akin to install, will accept a package spec to perform
|
||||
an analysis for. The package must be installed. Let's walk through an example
|
||||
with zlib. We first ask to analyze it. However, since we have more than one
|
||||
install, we are asked to disambiguate:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib
|
||||
==> Error: zlib matches multiple packages.
|
||||
Matching packages:
|
||||
fz2bs56 zlib@1.2.11%gcc@7.5.0 arch=linux-ubuntu18.04-skylake
|
||||
sl7m27m zlib@1.2.11%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
|
||||
Use a more specific spec.
|
||||
|
||||
|
||||
We can then specify the spec version that we want to analyze:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib/fz2bs56
|
||||
|
||||
If you don't provide any specific analyzer names, by default all analyzers
|
||||
(shown in the ``list-analyzers`` subcommand list) will be run. If an analyzer does not
|
||||
have any result, it will be skipped. For example, here is a result running for
|
||||
zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ls ~/.spack/analyzers/linux-ubuntu20.04-skylake/gcc-9.3.0/zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2/
|
||||
spack-analyzer-environment-variables.json
|
||||
spack-analyzer-install-files.json
|
||||
spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
If you want to run a specific analyzer, ask for it with `--analyzer`. Here we run
|
||||
spack analyze on libabigail (already installed) _using_ libabigail1
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --analyzer abigail libabigail
|
||||
|
||||
|
||||
.. _analyze_monitoring:
|
||||
|
||||
----------------------
|
||||
Monitoring An Analysis
|
||||
----------------------
|
||||
|
||||
For any kind of analysis, you can
|
||||
use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
as a server to upload the same run metadata to. You can
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
You should first export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor wget
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io wget
|
||||
|
||||
If your server doesn't have authentication, you can skip it:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-disable-auth wget
|
||||
|
||||
Regardless of your choice, when you run analyze on an installed package (whether
|
||||
it was installed with ``--monitor`` or not, you'll see the results generating as they did
|
||||
before, and a message that the monitor server was pinged:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze --monitor wget
|
||||
...
|
||||
==> Sending result for wget bin/wget to monitor.
|
||||
@@ -27,18 +27,12 @@ It is recommended that the following be put in your ``.bashrc`` file:
|
||||
|
||||
If you do not see colorized output when using ``less -R`` it is because color
|
||||
is being disabled in the piped output. In this case, tell spack to force
|
||||
colorized output with a flag
|
||||
colorized output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack --color always | less -R
|
||||
|
||||
or an environment variable
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ SPACK_COLOR=always spack | less -R
|
||||
|
||||
--------------------------
|
||||
Listing available packages
|
||||
--------------------------
|
||||
@@ -695,136 +689,6 @@ structured the way you want:
|
||||
}
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
``spack diff``
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
It's often the case that you have two versions of a spec that you need to
|
||||
disambiguate. Let's say that we've installed two variants of zlib, one with
|
||||
and one without the optimize variant:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install zlib
|
||||
$ spack install zlib -optimize
|
||||
|
||||
When we do ``spack find`` we see the two versions.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find zlib
|
||||
==> 2 installed packages
|
||||
-- linux-ubuntu20.04-skylake / gcc@9.3.0 ------------------------
|
||||
zlib@1.2.11 zlib@1.2.11
|
||||
|
||||
|
||||
Let's now say that we want to uninstall zlib. We run the command, and hit a problem
|
||||
real quickly since we have two!
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack uninstall zlib
|
||||
==> Error: zlib matches multiple packages:
|
||||
|
||||
-- linux-ubuntu20.04-skylake / gcc@9.3.0 ------------------------
|
||||
efzjziy zlib@1.2.11 sl7m27m zlib@1.2.11
|
||||
|
||||
==> Error: You can either:
|
||||
a) use a more specific spec, or
|
||||
b) specify the spec by its hash (e.g. `spack uninstall /hash`), or
|
||||
c) use `spack uninstall --all` to uninstall ALL matching specs.
|
||||
|
||||
Oh no! We can see from the above that we have two different versions of zlib installed,
|
||||
and the only difference between the two is the hash. This is a good use case for
|
||||
``spack diff``, which can easily show us the "diff" or set difference
|
||||
between properties for two packages. Let's try it out.
|
||||
Since the only difference we see in the ``spack find`` view is the hash, let's use
|
||||
``spack diff`` to look for more detail. We will provide the two hashes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack diff /efzjziy /sl7m27m
|
||||
==> Warning: This interface is subject to change.
|
||||
|
||||
--- zlib@1.2.11efzjziyc3dmb5h5u5azsthgbgog5mj7g
|
||||
+++ zlib@1.2.11sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
@@ variant_value @@
|
||||
- zlib optimize False
|
||||
+ zlib optimize True
|
||||
|
||||
|
||||
The output is colored, and written in the style of a git diff. This means that you
|
||||
can copy and paste it into a GitHub markdown as a code block with language "diff"
|
||||
and it will render nicely! Here is an example:
|
||||
|
||||
.. code-block:: md
|
||||
|
||||
```diff
|
||||
--- zlib@1.2.11/efzjziyc3dmb5h5u5azsthgbgog5mj7g
|
||||
+++ zlib@1.2.11/sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
@@ variant_value @@
|
||||
- zlib optimize False
|
||||
+ zlib optimize True
|
||||
```
|
||||
|
||||
Awesome! Now let's read the diff. It tells us that our first zlib was built with ``~optimize``
|
||||
(``False``) and the second was built with ``+optimize`` (``True``). You can't see it in the docs
|
||||
here, but the output above is also colored based on the content being an addition (+) or
|
||||
subtraction (-).
|
||||
|
||||
This is a small example, but you will be able to see differences for any attributes on the
|
||||
installation spec. Running ``spack diff A B`` means we'll see which spec attributes are on
|
||||
``B`` but not on ``A`` (green) and which are on ``A`` but not on ``B`` (red). Here is another
|
||||
example with an additional difference type, ``version``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack diff python@2.7.8 python@3.8.11
|
||||
==> Warning: This interface is subject to change.
|
||||
|
||||
--- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf
|
||||
+++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx
|
||||
@@ variant_value @@
|
||||
- python patches a8c52415a8b03c0e5f28b5d52ae498f7a7e602007db2b9554df28cd5685839b8
|
||||
+ python patches 0d98e93189bc278fbc37a50ed7f183bd8aaf249a8e1670a465f0db6bb4f8cf87
|
||||
@@ version @@
|
||||
- openssl 1.0.2u
|
||||
+ openssl 1.1.1k
|
||||
- python 2.7.8
|
||||
+ python 3.8.11
|
||||
|
||||
Let's say that we were only interested in one kind of attribute above, ``version``.
|
||||
We can ask the command to only output this attribute. To do this, you'd add
|
||||
the ``--attribute`` for attribute parameter, which defaults to all. Here is how you
|
||||
would filter to show just versions:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack diff --attribute version python@2.7.8 python@3.8.11
|
||||
==> Warning: This interface is subject to change.
|
||||
|
||||
--- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf
|
||||
+++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx
|
||||
@@ version @@
|
||||
- openssl 1.0.2u
|
||||
+ openssl 1.1.1k
|
||||
- python 2.7.8
|
||||
+ python 3.8.11
|
||||
|
||||
And you can add as many attributes as you'd like with multiple `--attribute` arguments
|
||||
(for lots of attributes, you can use ``-a`` for short). Finally, if you want to view the
|
||||
data as json (and possibly pipe into an output file) just add ``--json``:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack diff --json python@2.7.8 python@3.8.11
|
||||
|
||||
|
||||
This data will be much longer because along with the differences for ``A`` vs. ``B`` and
|
||||
``B`` vs. ``A``, the JSON output also showsthe intersection.
|
||||
|
||||
|
||||
------------------------
|
||||
Using installed packages
|
||||
------------------------
|
||||
@@ -1099,7 +963,7 @@ Variants are named options associated with a particular package. They are
|
||||
optional, as each package must provide default values for each variant it
|
||||
makes available. Variants can be specified using
|
||||
a flexible parameter syntax ``name=<value>``. For example,
|
||||
``spack install mercury debug=True`` will install mercury built with debug
|
||||
``spack install libelf debug=True`` will install libelf built with debug
|
||||
flags. The names of particular variants available for a package depend on
|
||||
what was provided by the package author. ``spack info <package>`` will
|
||||
provide information on what build variants are available.
|
||||
@@ -1107,11 +971,11 @@ provide information on what build variants are available.
|
||||
For compatibility with earlier versions, variants which happen to be
|
||||
boolean in nature can be specified by a syntax that represents turning
|
||||
options on and off. For example, in the previous spec we could have
|
||||
supplied ``mercury +debug`` with the same effect of enabling the debug
|
||||
supplied ``libelf +debug`` with the same effect of enabling the debug
|
||||
compile time option for the libelf package.
|
||||
|
||||
Depending on the package a variant may have any default value. For
|
||||
``mercury`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
``libelf`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
with ``debug=True`` or ``+debug``. If a variant is ``True`` by default
|
||||
you can turn it off by either adding ``-name`` or ``~name`` to the spec.
|
||||
|
||||
@@ -1860,39 +1724,6 @@ This issue typically manifests with the error below:
|
||||
|
||||
A nicer error message is TBD in future versions of Spack.
|
||||
|
||||
---------------
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
The ``spack audit`` command:
|
||||
|
||||
.. command-output:: spack audit -h
|
||||
|
||||
can be used to detect a number of configuration issues. This command detects
|
||||
configuration settings which might not be strictly wrong but are not likely
|
||||
to be useful outside of special cases.
|
||||
|
||||
It can also be used to detect dependency issues with packages - for example
|
||||
cases where a package constrains a dependency with a variant that doesn't
|
||||
exist (in this case Spack could report the problem ahead of time but
|
||||
automatically performing the check would slow down most runs of Spack).
|
||||
|
||||
A detailed list of the checks currently implemented for each subcommand can be
|
||||
printed with:
|
||||
|
||||
.. command-output:: spack -v audit list
|
||||
|
||||
Depending on the use case, users might run the appropriate subcommands to obtain
|
||||
diagnostics. Issues, if found, are reported to stdout:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
% spack audit packages lammps
|
||||
PKG-DIRECTIVES: 1 issue found
|
||||
1. lammps: wrong variant in "conflicts" directive
|
||||
the variant 'adios' does not exist
|
||||
in /home/spack/spack/var/spack/repos/builtin/packages/lammps/package.py
|
||||
|
||||
|
||||
------------
|
||||
Getting Help
|
||||
|
||||
@@ -31,25 +31,9 @@ Build caches are created via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache create <spec>
|
||||
$ spack buildcache create spec
|
||||
|
||||
|
||||
If you wanted to create a build cache in a local directory, you would provide
|
||||
the ``-d`` argument to target that directory, again also specifying the spec.
|
||||
Here is an example creating a local directory, "spack-cache" and creating
|
||||
build cache files for the "ninja" spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir -p ./spack-cache
|
||||
$ spack buildcache create -d ./spack-cache ninja
|
||||
==> Buildcache files will be output to file:///home/spackuser/spack/spack-cache/build_cache
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
gpg: using "E6DF6A8BD43208E4D6F392F23777740B7DBD643D" as default secret key for signing
|
||||
|
||||
Note that the targeted spec must already be installed. Once you have a build cache,
|
||||
you can add it as a mirror, discussed next.
|
||||
|
||||
---------------------------------------
|
||||
Finding or installing build cache files
|
||||
---------------------------------------
|
||||
@@ -59,98 +43,19 @@ with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add <name> <url>
|
||||
|
||||
|
||||
Note that the url can be a web url _or_ a local filesystem location. In the previous
|
||||
example, you might add the directory "spack-cache" and call it ``mymirror``:
|
||||
$ spack mirror add <name> <url>
|
||||
|
||||
Build caches are found via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add mymirror ./spack-cache
|
||||
$ spack buildcache list
|
||||
|
||||
|
||||
You can see that the mirror is added with ``spack mirror list`` as follows:
|
||||
Build caches are installed via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
||||
$ spack mirror list
|
||||
mymirror file:///home/spackuser/spack/spack-cache
|
||||
spack-public https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
||||
|
||||
|
||||
At this point, you've create a buildcache, but spack hasn't indexed it, so if
|
||||
you run ``spack buildcache list`` you won't see any results. You need to index
|
||||
this new build cache as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache update-index -d spack-cache/
|
||||
|
||||
Now you can use list:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache list
|
||||
==> 1 cached build.
|
||||
-- linux-ubuntu20.04-skylake / gcc@9.3.0 ------------------------
|
||||
ninja@1.10.2
|
||||
|
||||
|
||||
Great! So now let's say you have a different spack installation, or perhaps just
|
||||
a different environment for the same one, and you want to install a package from
|
||||
that build cache. Let's first uninstall the actual library "ninja" to see if we can
|
||||
re-install it from the cache.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack uninstall ninja
|
||||
|
||||
|
||||
And now reinstall from the buildcache
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache install ninja
|
||||
==> buildcache spec(s) matching ninja
|
||||
==> Fetching file:///home/spackuser/spack/spack-cache/build_cache/linux-ubuntu20.04-skylake/gcc-9.3.0/ninja-1.10.2/linux-ubuntu20.04-skylake-gcc-9.3.0-ninja-1.10.2-i4e5luour7jxdpc3bkiykd4imke3mkym.spack
|
||||
####################################################################################################################################### 100.0%
|
||||
==> Installing buildcache for spec ninja@1.10.2%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
gpg: Signature made Tue 23 Mar 2021 10:16:29 PM MDT
|
||||
gpg: using RSA key E6DF6A8BD43208E4D6F392F23777740B7DBD643D
|
||||
gpg: Good signature from "spackuser (GPG created for Spack) <spackuser@noreply.users.github.com>" [ultimate]
|
||||
|
||||
|
||||
It worked! You've just completed a full example of creating a build cache with
|
||||
a spec of interest, adding it as a mirror, updating it's index, listing the contents,
|
||||
and finally, installing from it.
|
||||
|
||||
|
||||
Note that the above command is intended to install a particular package to a
|
||||
build cache you have created, and not to install a package from a build cache.
|
||||
For the latter, once a mirror is added, by default when you do ``spack install`` the ``--use-cache``
|
||||
flag is set, and you will install a package from a build cache if it is available.
|
||||
If you want to always use the cache, you can do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --cache-only <package>
|
||||
|
||||
For example, to combine all of the commands above to add the E4S build cache
|
||||
and then install from it exclusively, you would do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add E4S https://cache.e4s.io
|
||||
$ spack buildcache keys --install --trust
|
||||
$ spack install --cache-only <package>
|
||||
|
||||
We use ``--install`` and ``--trust`` to say that we are installing keys to our
|
||||
keyring, and trusting all downloaded keys.
|
||||
|
||||
$ spack buildcache install
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
List of popular build caches
|
||||
|
||||
@@ -63,7 +63,6 @@ on these ideas for each distinct build system that Spack supports:
|
||||
build_systems/intelpackage
|
||||
build_systems/rocmpackage
|
||||
build_systems/custompackage
|
||||
build_systems/multiplepackage
|
||||
|
||||
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
|
||||
provide a list of build systems and methods/attributes that can be
|
||||
|
||||
@@ -155,7 +155,7 @@ version, this can be done like so:
|
||||
|
||||
@property
|
||||
def force_autoreconf(self):
|
||||
return self.version == Version('1.2.3')
|
||||
return self.version == Version('1.2.3'):
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Finding configure flags
|
||||
|
||||
@@ -130,8 +130,8 @@ Adding flags to cmake
|
||||
To add additional flags to the ``cmake`` call, simply override the
|
||||
``cmake_args`` function. The following example defines values for the flags
|
||||
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
|
||||
and without the :meth:`~spack.build_systems.cmake.CMakePackage.define` and
|
||||
:meth:`~spack.build_systems.cmake.CMakePackage.define_from_variant` helper functions:
|
||||
and without the :py:meth:`~.CMakePackage.define` and
|
||||
:py:meth:`~.CMakePackage.define_from_variant` helper functions:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ CudaPackage
|
||||
-----------
|
||||
|
||||
Different from other packages, ``CudaPackage`` does not represent a build system.
|
||||
Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a `mixin-class <https://en.wikipedia.org/wiki/Mixin>`_.
|
||||
Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a ` mixin-class <https://en.wikipedia.org/wiki/Mixin>`__.
|
||||
|
||||
You can find source for the package at
|
||||
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/cuda.py>`__.
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
Custom Build Systems
|
||||
--------------------
|
||||
|
||||
While the built-in build systems should meet your needs for the
|
||||
While the build systems listed above should meet your needs for the
|
||||
vast majority of packages, some packages provide custom build scripts.
|
||||
This guide is intended for the following use cases:
|
||||
|
||||
@@ -31,7 +31,7 @@ installation. Both of these packages require custom build systems.
|
||||
Base class
|
||||
^^^^^^^^^^
|
||||
|
||||
If your package does not belong to any of the built-in build
|
||||
If your package does not belong to any of the aforementioned build
|
||||
systems that Spack already supports, you should inherit from the
|
||||
``Package`` base class. ``Package`` is a simple base class with a
|
||||
single phase: ``install``. If your package is simple, you may be able
|
||||
@@ -168,8 +168,7 @@ if and only if this flag is set, we would use the following line:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
Let's put everything together and add unit tests to be optionally run
|
||||
during the installation of our package.
|
||||
Let's put everything together and add unit tests to our package.
|
||||
In the ``perl`` package, we can see:
|
||||
|
||||
.. code-block:: python
|
||||
@@ -183,6 +182,12 @@ As you can guess, this runs ``make test`` *after* building the package,
|
||||
if and only if testing is requested. Again, this is not specific to
|
||||
custom build systems, it can be added to existing build systems as well.
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
.. warning::
|
||||
|
||||
The order of decorators matters. The following ordering:
|
||||
@@ -202,12 +207,3 @@ custom build systems, it can be added to existing build systems as well.
|
||||
the tests will always be run regardless of whether or not
|
||||
``--test=root`` is requested. See https://github.com/spack/spack/issues/3833
|
||||
for more information
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
For more information on other forms of package testing, refer to
|
||||
:ref:`Checking an installation <checking_an_installation>`.
|
||||
|
||||
@@ -25,7 +25,7 @@ use Spack to build packages with the tools.
|
||||
The Spack Python class ``IntelOneapiPackage`` is a base class that is
|
||||
used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``,
|
||||
``IntelOneapiTbb`` and other classes to implement the oneAPI
|
||||
packages. See the :ref:`package-list` for the full list of available
|
||||
packages. See the :ref:<package-list> for the full list of available
|
||||
oneAPI packages or use::
|
||||
|
||||
spack list -d oneAPI
|
||||
@@ -35,28 +35,24 @@ For more information on a specific package, do::
|
||||
spack info <package-name>
|
||||
|
||||
Intel no longer releases new versions of Parallel Studio, which can be
|
||||
used in Spack via the :ref:`intelpackage`. All of its components can
|
||||
used in Spack via the :ref:<intelpackage>. All of its components can
|
||||
now be found in oneAPI.
|
||||
|
||||
Examples
|
||||
========
|
||||
Example
|
||||
=======
|
||||
|
||||
Building a Package With icx
|
||||
---------------------------
|
||||
|
||||
In this example, we build patchelf with ``icc`` and ``icx``. The
|
||||
compilers are installed with spack.
|
||||
|
||||
Install the oneAPI compilers::
|
||||
We start with a simple example that will be sufficient for most
|
||||
users. Install the oneAPI compilers::
|
||||
|
||||
spack install intel-oneapi-compilers
|
||||
|
||||
Add the compilers to your ``compilers.yaml`` so spack can use them::
|
||||
Add the oneAPI compilers to the set of compilers that Spack can use::
|
||||
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
|
||||
|
||||
Verify that the compilers are available::
|
||||
This adds the compilers to your ``compilers.yaml``. Verify that the
|
||||
compilers are available::
|
||||
|
||||
spack compiler list
|
||||
|
||||
@@ -76,11 +72,9 @@ To build with with ``icx``, do ::
|
||||
|
||||
spack install patchelf%oneapi
|
||||
|
||||
Using oneAPI MPI to Satisfy a Virtual Dependence
|
||||
------------------------------------------------------
|
||||
|
||||
The ``hdf5`` package works with any compatible MPI implementation. To
|
||||
build ``hdf5`` with Intel oneAPI MPI do::
|
||||
In addition to compilers, oneAPI contains many libraries. The ``hdf5``
|
||||
package works with any compatible MPI implementation. To build
|
||||
``hdf5`` with Intel oneAPI MPI do::
|
||||
|
||||
spack install hdf5 +mpi ^intel-oneapi-mpi
|
||||
|
||||
@@ -101,23 +95,11 @@ To use the compilers, add some information about the installation to
|
||||
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
|
||||
|
||||
Adapt the paths above if you did not install the tools in the default
|
||||
location. After adding the compilers, using them is the same
|
||||
as if you had installed the ``intel-oneapi-compilers`` package.
|
||||
Another option is to manually add the configuration to
|
||||
``compilers.yaml`` as described in :ref:`Compiler configuration
|
||||
<compiler-config>`.
|
||||
|
||||
Libraries
|
||||
---------
|
||||
|
||||
If you want Spack to use MKL that you have installed without Spack in
|
||||
the default location, then add the following to
|
||||
``~/.spack/packages.yaml``, adjusting the version as appropriate::
|
||||
|
||||
intel-oneapi-mkl:
|
||||
externals:
|
||||
- spec: intel-oneapi-mkl@2021.1.1
|
||||
prefix: /opt/intel/oneapi/
|
||||
location. After adding the compilers, using them in Spack will be
|
||||
exactly the same as if you had installed the
|
||||
``intel-oneapi-compilers`` package. Another option is to manually add
|
||||
the configuration to ``compilers.yaml`` as described in :ref:`Compiler
|
||||
configuration <compiler-config>`.
|
||||
|
||||
|
||||
Using oneAPI Tools Installed by Spack
|
||||
@@ -145,11 +127,11 @@ More information
|
||||
================
|
||||
|
||||
This section describes basic use of oneAPI, especially if it has
|
||||
changed compared to Parallel Studio. See :ref:`intelpackage` for more
|
||||
information on :ref:`intel-virtual-packages`,
|
||||
:ref:`intel-unrelated-packages`,
|
||||
:ref:`intel-integrating-external-libraries`, and
|
||||
:ref:`using-mkl-tips`.
|
||||
changed compared to Parallel Studio. See :ref:<intelpackage> for more
|
||||
information on :ref:<intel-virtual-packages>,
|
||||
:ref:<intel-unrelated-packages>,
|
||||
:ref:<intel-integrating-external-libraries>, and
|
||||
:ref:<using-mkl-tips>.
|
||||
|
||||
|
||||
.. _`Intel installers`: https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top.html
|
||||
|
||||
@@ -561,29 +561,43 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
modules: []
|
||||
spec: intel@18.0.3
|
||||
paths:
|
||||
cc: /usr/bin/true
|
||||
cxx: /usr/bin/true
|
||||
f77: /usr/bin/true
|
||||
fc: /usr/bin/true
|
||||
cc: stub
|
||||
cxx: stub
|
||||
f77: stub
|
||||
fc: stub
|
||||
|
||||
Replace ``18.0.3`` with the version that you determined in the preceding
|
||||
step. The exact contents under ``paths:`` do not matter yet, but the paths must exist.
|
||||
step. The contents under ``paths:`` do not matter yet.
|
||||
|
||||
This temporary stub is required such that the ``intel-parallel-studio`` package
|
||||
can be installed for the ``intel`` compiler (which the package itself is going
|
||||
to provide after the installation) rather than an arbitrary system compiler.
|
||||
The paths given in ``cc``, ``cxx``, ``f77``, ``fc`` must exist, but will
|
||||
never be used to build anything during the installation of ``intel-parallel-studio``.
|
||||
You are right to ask: "Why on earth is that necessary?" [fn8]_.
|
||||
The answer lies in Spack striving for strict compiler consistency.
|
||||
Consider what happens without such a pre-declared compiler stub:
|
||||
Say, you ask Spack to install a particular version
|
||||
``intel-parallel-studio@edition.V``. Spack will apply an unrelated compiler
|
||||
spec to concretize and install your request, resulting in
|
||||
``intel-parallel-studio@edition.V %X``. That compiler ``%X`` is not going to
|
||||
be the version that this new package itself provides. Rather, it would
|
||||
typically be ``%gcc@...`` in a default Spack installation or possibly indeed
|
||||
``%intel@...``, but at a version that precedes ``V``.
|
||||
|
||||
The reason for this stub is that ``intel-parallel-studio`` also provides the
|
||||
``mpi`` and ``mkl`` packages and when concretizing a spec, Spack ensures
|
||||
strong consistency of the used compiler across all dependencies: [fn8]_.
|
||||
Installing a package ``foo +mkl %intel`` will make Spack look for a package
|
||||
``mkl %intel``, which can be provided by ``intel-parallel-studio+mkl %intel``,
|
||||
but not by ``intel-parallel-studio+mkl %gcc``.
|
||||
The problem comes to the fore as soon as you try to use any virtual ``mkl``
|
||||
or ``mpi`` packages that you would expect to now be provided by
|
||||
``intel-parallel-studio@edition.V``. Spack will indeed see those virtual
|
||||
packages, but only as being tied to the compiler that the package
|
||||
``intel-parallel-studio@edition.V`` was concretized with *at installation*.
|
||||
If you were to install a client package with the new compilers now available
|
||||
to you, you would naturally run ``spack install foo +mkl %intel@V``, yet
|
||||
Spack will either complain about ``mkl%intel@V`` being missing (because it
|
||||
only knows about ``mkl%X``) or it will go and attempt to install *another
|
||||
instance* of ``intel-parallel-studio@edition.V %intel@V`` so as to match the
|
||||
compiler spec ``%intel@V`` that you gave for your client package ``foo``.
|
||||
This will be unexpected and will quickly get annoying because each
|
||||
reinstallation takes up time and extra disk space.
|
||||
|
||||
Failure to do so may result in additional installations of ``mkl``, ``intel-mpi`` or
|
||||
even ``intel-parallel-studio`` as dependencies for other packages.
|
||||
To escape this trap, put the compiler stub declaration shown here in place,
|
||||
then use that pre-declared compiler spec to install the actual package, as
|
||||
shown next. This approach works because during installation only the
|
||||
package's own self-sufficient installer will be used, not any compiler.
|
||||
|
||||
.. _`verify-compiler-anticipated`:
|
||||
|
||||
@@ -634,25 +648,11 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
want to use the ``intel64`` variant. The ``icpc`` and ``ifort`` compilers
|
||||
will be located in the same directory as ``icc``.
|
||||
|
||||
* Make sure to specify ``modules: ['intel-parallel-studio-cluster2018.3-intel-18.0.3-HASH']``
|
||||
(with ``HASH`` being the short hash as displayed when running
|
||||
``spack find -l intel-parallel-studio@cluster.2018.3`` and the versions adapted accordingly)
|
||||
to ensure that the correct and complete environment for the Intel compilers gets
|
||||
loaded when running them. With modern versions of the Intel compiler you may otherwise see
|
||||
issues about missing libraries. Please also note that module name must exactly match
|
||||
the name as returned by ``module avail`` (and shown in the example above).
|
||||
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to further specify a suitable accompanying
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to specify a suitable accompanying
|
||||
``gcc`` version to help pacify picky client packages that ask for C++
|
||||
standards more recent than supported by your system-provided ``gcc`` and its
|
||||
``libstdc++.so``.
|
||||
|
||||
* If you specified a custom variant (for example ``+vtune``) you may want to add this as your
|
||||
preferred variant in the packages configuration for the ``intel-parallel-studio`` package
|
||||
as described in :ref:`concretization-preferences`. Otherwise you will have to specify
|
||||
the variant everytime ``intel-parallel-studio`` is being used as ``mkl``, ``fftw`` or ``mpi``
|
||||
implementation to avoid pulling in a different variant.
|
||||
|
||||
* To set the Intel compilers for default use in Spack, instead of the usual ``%gcc``,
|
||||
follow section `Selecting Intel compilers`_.
|
||||
|
||||
|
||||
@@ -147,10 +147,8 @@ and a ``filter_file`` method to help with this. For example:
|
||||
def edit(self, spec, prefix):
|
||||
makefile = FileFilter('Makefile')
|
||||
|
||||
makefile.filter(r'^\s*CC\s*=.*', 'CC = ' + spack_cc)
|
||||
makefile.filter(r'^\s*CXX\s*=.*', 'CXX = ' + spack_cxx)
|
||||
makefile.filter(r'^\s*F77\s*=.*', 'F77 = ' + spack_f77)
|
||||
makefile.filter(r'^\s*FC\s*=.*', 'FC = ' + spack_fc)
|
||||
makefile.filter('CC = gcc', 'CC = cc')
|
||||
makefile.filter('CXX = g++', 'CC = c++')
|
||||
|
||||
|
||||
`stream <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/stream/package.py>`_
|
||||
|
||||
@@ -121,15 +121,11 @@ override the ``meson_args`` method like so:
|
||||
.. code-block:: python
|
||||
|
||||
def meson_args(self):
|
||||
return ['--warnlevel=3']
|
||||
return ['--default-library=both']
|
||||
|
||||
|
||||
This method can be used to pass flags as well as variables.
|
||||
|
||||
Note that the ``MesonPackage`` base class already defines variants for
|
||||
``buildtype``, ``default_library`` and ``strip``, which are mapped to default
|
||||
Meson arguments, meaning that you don't have to specify these.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -1,350 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _multiplepackage:
|
||||
|
||||
----------------------
|
||||
Multiple Build Systems
|
||||
----------------------
|
||||
|
||||
Quite frequently, a package will change build systems from one version to the
|
||||
next. For example, a small project that once used a single Makefile to build
|
||||
may now require Autotools to handle the increased number of files that need to
|
||||
be compiled. Or, a package that once used Autotools may switch to CMake for
|
||||
Windows support. In this case, it becomes a bit more challenging to write a
|
||||
single build recipe for this package in Spack.
|
||||
|
||||
There are several ways that this can be handled in Spack:
|
||||
|
||||
#. Subclass the new build system, and override phases as needed (preferred)
|
||||
#. Subclass ``Package`` and implement ``install`` as needed
|
||||
#. Create separate ``*-cmake``, ``*-autotools``, etc. packages for each build system
|
||||
#. Rename the old package to ``*-legacy`` and create a new package
|
||||
#. Move the old package to a ``legacy`` repository and create a new package
|
||||
#. Drop older versions that only support the older build system
|
||||
|
||||
Of these options, 1 is preferred, and will be demonstrated in this
|
||||
documentation. Options 3-5 have issues with concretization, so shouldn't be
|
||||
used. Options 4-5 also don't support more than two build systems. Option 6 only
|
||||
works if the old versions are no longer needed. Option 1 is preferred over 2
|
||||
because it makes it easier to drop the old build system entirely.
|
||||
|
||||
The exact syntax of the package depends on which build systems you need to
|
||||
support. Below are a couple of common examples.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Makefile -> Autotools
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(MakefilePackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
The package subclasses from :ref:`makefilepackage`, which has three phases:
|
||||
|
||||
#. ``edit`` (does nothing by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, the ``install`` phase needed to be overridden because the
|
||||
Makefile did not have an install target. We also modify the Makefile to use
|
||||
Spack's compiler wrappers. The default ``build`` phase is not changed.
|
||||
|
||||
Starting with version 1.3.0, we want to use Autotools to build instead.
|
||||
:ref:`autotoolspackage` has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def autoreconf(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def configure(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* We moved the contents of the ``edit`` phase to the ``patch`` function. Since
|
||||
``AutotoolsPackage`` doesn't have an ``edit`` phase, the only way for this
|
||||
step to be executed is to move it to the ``patch`` function, which always
|
||||
gets run.
|
||||
* The ``autoreconf`` and ``configure`` phases become no-ops. Since the old
|
||||
Makefile-based build system doesn't use these, we ignore these phases when
|
||||
building ``foo@1.2.0``.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``foo@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Autotools -> CMake
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(AutotoolsPackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
The package subclasses from :ref:`autotoolspackage`, which has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
Starting with version 1.3.0, we want to use CMake to build instead.
|
||||
:ref:`cmakepackage` has three phases:
|
||||
|
||||
#. ``cmake`` (runs ``cmake ...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``cmake_args`` to specify additional flags to pass to ``cmake``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* Since CMake and Autotools are so similar, we only need to override the
|
||||
``cmake`` phase, we can use the default ``build`` and ``install`` phases.
|
||||
* We override ``cmake`` to run ``./configure`` for older versions.
|
||||
``configure_args`` remains the same.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``bar@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Multiple build systems for the same version
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
During the transition from one build system to another, developers often
|
||||
support multiple build systems at the same time. Spack can only use a single
|
||||
build system for a single version. To decide which build system to use for a
|
||||
particular version, take the following things into account:
|
||||
|
||||
1. If the developers explicitly state that one build system is preferred over
|
||||
another, use that one.
|
||||
2. If one build system is considered "experimental" while another is considered
|
||||
"stable", use the stable build system.
|
||||
3. Otherwise, use the newer build system.
|
||||
|
||||
The developer preference for which build system to use can change over time as
|
||||
a newer build system becomes stable/recommended.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dropping support for old build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When older versions of a package don't support a newer build system, it can be
|
||||
tempting to simply delete them from a package. This significantly reduces
|
||||
package complexity and makes the build recipe much easier to maintain. However,
|
||||
other packages or Spack users may rely on these older versions. The recommended
|
||||
approach is to first support both build systems (as demonstrated above),
|
||||
:ref:`deprecate <deprecate>` versions that rely on the old build system, and
|
||||
remove those versions and any phases that needed to be overridden in the next
|
||||
Spack release.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Three or more build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In rare cases, a package may change build systems multiple times. For example,
|
||||
a package may start with Makefiles, then switch to Autotools, then switch to
|
||||
CMake. The same logic used above can be extended to any number of build systems.
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Baz(CMakePackage):
|
||||
version("1.4.0", sha256="...") # CMake
|
||||
version("1.3.0", sha256="...") # Autotools
|
||||
version("1.2.0", sha256="...") # Makefile
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.3.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@1.3")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Additional examples
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When writing new packages, it often helps to see examples of existing packages.
|
||||
Here is an incomplete list of existing Spack packages that have changed build
|
||||
systems before:
|
||||
|
||||
================ ===================== ================
|
||||
Package Previous Build System New Build System
|
||||
================ ===================== ================
|
||||
amber custom CMake
|
||||
arpack-ng Autotools CMake
|
||||
atk Autotools Meson
|
||||
blast None Autotools
|
||||
dyninst Autotools CMake
|
||||
evtgen Autotools CMake
|
||||
fish Autotools CMake
|
||||
gdk-pixbuf Autotools Meson
|
||||
glib Autotools Meson
|
||||
glog Autotools CMake
|
||||
gmt Autotools CMake
|
||||
gtkplus Autotools Meson
|
||||
hpl Makefile Autotools
|
||||
interproscan Perl Maven
|
||||
jasper Autotools CMake
|
||||
kahip SCons CMake
|
||||
kokkos Makefile CMake
|
||||
kokkos-kernels Makefile CMake
|
||||
leveldb Makefile CMake
|
||||
libdrm Autotools Meson
|
||||
libjpeg-turbo Autotools CMake
|
||||
mesa Autotools Meson
|
||||
metis None CMake
|
||||
mpifileutils Autotools CMake
|
||||
muparser Autotools CMake
|
||||
mxnet Makefile CMake
|
||||
nest Autotools CMake
|
||||
neuron Autotools CMake
|
||||
nsimd CMake nsconfig
|
||||
opennurbs Makefile CMake
|
||||
optional-lite None CMake
|
||||
plasma Makefile CMake
|
||||
preseq Makefile Autotools
|
||||
protobuf Autotools CMake
|
||||
py-pygobject Autotools Python
|
||||
singularity Autotools Makefile
|
||||
span-lite None CMake
|
||||
ssht Makefile CMake
|
||||
string-view-lite None CMake
|
||||
superlu Makefile CMake
|
||||
superlu-dist Makefile CMake
|
||||
uncrustify Autotools CMake
|
||||
================ ===================== ================
|
||||
|
||||
Packages that support multiple build systems can be a bit confusing to write.
|
||||
Don't hesitate to open an issue or draft pull request and ask for advice from
|
||||
other Spack developers!
|
||||
@@ -627,8 +627,7 @@ adds:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
``PythonPackage`` provides a couple of options for testing packages
|
||||
both during and after the installation process.
|
||||
``PythonPackage`` provides a couple of options for testing packages.
|
||||
|
||||
""""""""""""
|
||||
Import tests
|
||||
@@ -697,20 +696,16 @@ libraries. Make sure not to add modules/packages containing the word
|
||||
"test", as these likely won't end up in the installation directory,
|
||||
or may require test dependencies like pytest to be installed.
|
||||
|
||||
Import tests can be run during the installation using ``spack install
|
||||
--test=root`` or at any time after the installation using
|
||||
``spack test run``.
|
||||
These tests can be triggered by running ``spack install --test=root``
|
||||
or by running ``spack test run`` after the installation has finished.
|
||||
|
||||
""""""""""
|
||||
Unit tests
|
||||
""""""""""
|
||||
|
||||
The package may have its own unit or regression tests. Spack can
|
||||
run these tests during the installation by adding phase-appropriate
|
||||
test methods.
|
||||
|
||||
For example, ``py-numpy`` adds the following as a check to run
|
||||
after the ``install`` phase:
|
||||
The package you want to install may come with additional unit tests.
|
||||
You can add additional build-time or install-time tests by adding
|
||||
additional testing functions. For example, ``py-numpy`` adds:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -721,13 +716,7 @@ after the ``install`` phase:
|
||||
python('-c', 'import numpy; numpy.test("full", verbose=2)')
|
||||
|
||||
|
||||
when testing is enabled during the installation (i.e., ``spack install
|
||||
--test=root``).
|
||||
|
||||
.. note::
|
||||
|
||||
Additional information is available on :ref:`install phase tests
|
||||
<install_phase-tests>`.
|
||||
These tests can be triggered by running ``spack install --test=root``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Setup file in a sub-directory
|
||||
|
||||
@@ -17,10 +17,10 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from glob import glob
|
||||
|
||||
from sphinx.ext.apidoc import main as sphinx_apidoc
|
||||
@@ -82,8 +82,6 @@
|
||||
# Disable duplicate cross-reference warnings.
|
||||
#
|
||||
from sphinx.domains.python import PythonDomain
|
||||
|
||||
|
||||
class PatchedPythonDomain(PythonDomain):
|
||||
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
|
||||
if 'refspecific' in node:
|
||||
@@ -97,19 +95,15 @@ def setup(sphinx):
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
needs_sphinx = '3.4'
|
||||
needs_sphinx = '1.8'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.graphviz',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinxcontrib.programoutput',
|
||||
]
|
||||
extensions = ['sphinx.ext.autodoc',
|
||||
'sphinx.ext.graphviz',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.todo',
|
||||
'sphinxcontrib.programoutput']
|
||||
|
||||
# Set default graphviz options
|
||||
graphviz_dot_args = [
|
||||
@@ -142,7 +136,6 @@ def setup(sphinx):
|
||||
#
|
||||
# The short X.Y version.
|
||||
import spack
|
||||
|
||||
version = '.'.join(str(s) for s in spack.spack_version_info[:2])
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = spack.spack_version
|
||||
@@ -168,19 +161,6 @@ def setup(sphinx):
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build', '_spack_root', '.spack-env']
|
||||
|
||||
nitpicky = True
|
||||
nitpick_ignore = [
|
||||
# Python classes that intersphinx is unable to resolve
|
||||
('py:class', 'argparse.HelpFormatter'),
|
||||
('py:class', 'contextlib.contextmanager'),
|
||||
('py:class', 'module'),
|
||||
('py:class', '_io.BufferedReader'),
|
||||
('py:class', 'unittest.case.TestCase'),
|
||||
('py:class', '_frozen_importlib_external.SourceFileLoader'),
|
||||
# Spack classes that are private and we don't want to expose
|
||||
('py:class', 'spack.provider_index._IndexBase'),
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
@@ -199,8 +179,7 @@ def setup(sphinx):
|
||||
# We use our own extension of the default style with a few modifications
|
||||
from pygments.style import Style
|
||||
from pygments.styles.default import DefaultStyle
|
||||
from pygments.token import Comment, Generic, Text
|
||||
|
||||
from pygments.token import Generic, Comment, Text
|
||||
|
||||
class SpackStyle(DefaultStyle):
|
||||
styles = DefaultStyle.styles.copy()
|
||||
@@ -209,7 +188,6 @@ class SpackStyle(DefaultStyle):
|
||||
styles[Generic.Prompt] = "bold #346ec9"
|
||||
|
||||
import pkg_resources
|
||||
|
||||
dist = pkg_resources.Distribution(__file__)
|
||||
sys.path.append('.') # make 'conf' module findable
|
||||
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
|
||||
@@ -375,11 +353,3 @@ class SpackStyle(DefaultStyle):
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
# sphinx.ext.intersphinx
|
||||
intersphinx_mapping = {
|
||||
"python": ("https://docs.python.org/3", None),
|
||||
}
|
||||
|
||||
@@ -202,23 +202,21 @@ of builds.
|
||||
|
||||
Unless overridden in a package or on the command line, Spack builds all
|
||||
packages in parallel. The default parallelism is equal to the number of
|
||||
cores available to the process, up to 16 (the default of ``build_jobs``).
|
||||
For a build system that uses Makefiles, this ``spack install`` runs:
|
||||
cores on your machine, up to 16. Parallelism cannot exceed the number of
|
||||
cores available on the host. For a build system that uses Makefiles, this
|
||||
means running:
|
||||
|
||||
- ``make -j<build_jobs>``, when ``build_jobs`` is less than the number of
|
||||
cores available
|
||||
cores on the machine
|
||||
- ``make -j<ncores>``, when ``build_jobs`` is greater or equal to the
|
||||
number of cores available
|
||||
number of cores on the machine
|
||||
|
||||
If you work on a shared login node or have a strict ulimit, it may be
|
||||
necessary to set the default to a lower value. By setting ``build_jobs``
|
||||
to 4, for example, commands like ``spack install`` will run ``make -j4``
|
||||
instead of hogging every core. To build all software in serial,
|
||||
set ``build_jobs`` to 1.
|
||||
instead of hogging every core.
|
||||
|
||||
Note that specifying the number of jobs on the command line always takes
|
||||
priority, so that ``spack install -j<n>`` always runs `make -j<n>`, even
|
||||
when that exceeds the number of cores available.
|
||||
To build all software in serial, set ``build_jobs`` to 1.
|
||||
|
||||
--------------------
|
||||
``ccache``
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -248,9 +248,9 @@ Users can add abstract specs to an Environment using the ``spack add``
|
||||
command. The most important component of an Environment is a list of
|
||||
abstract specs.
|
||||
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file), which is
|
||||
used to define the roots of the Environment, but does not affect the
|
||||
concrete specs in the lockfile, nor does it install the spec.
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file) and to
|
||||
the roots of the Environment, but does not affect the concrete specs
|
||||
in the lockfile, nor does it install the spec.
|
||||
|
||||
The ``spack add`` command is environment aware. It adds to the
|
||||
currently active environment. All environment aware commands can also
|
||||
@@ -356,18 +356,6 @@ command also stores a Spack repo containing the ``package.py`` file
|
||||
used at install time for each package in the ``repos/`` directory in
|
||||
the Environment.
|
||||
|
||||
The ``--no-add`` option can be used in a concrete environment to tell
|
||||
spack to install specs already present in the environment but not to
|
||||
add any new root specs to the environment. For root specs provided
|
||||
to ``spack install`` on the command line, ``--no-add`` is the default,
|
||||
while for dependency specs on the other hand, it is optional. In other
|
||||
words, if there is an unambiguous match in the active concrete environment
|
||||
for a root spec provided to ``spack install`` on the command line, spack
|
||||
does not require you to specify the ``--no-add`` option to prevent the spec
|
||||
from being added again. At the same time, a spec that already exists in the
|
||||
environment, but only as a dependency, will be added to the environment as a
|
||||
root spec without the ``--no-add`` option.
|
||||
|
||||
^^^^^^^
|
||||
Loading
|
||||
^^^^^^^
|
||||
@@ -723,8 +711,6 @@ Spack Environment managed views are updated every time the environment
|
||||
is written out to the lock file ``spack.lock``, so the concrete
|
||||
environment and the view are always compatible.
|
||||
|
||||
.. _configuring_environment_views:
|
||||
|
||||
"""""""""""""""""""""""""""""
|
||||
Configuring environment views
|
||||
"""""""""""""""""""""""""""""
|
||||
@@ -732,17 +718,13 @@ Configuring environment views
|
||||
The Spack Environment manifest file has a top-level keyword
|
||||
``view``. Each entry under that heading is a view descriptor, headed
|
||||
by a name. The view descriptor contains the root of the view, and
|
||||
optionally the projections for the view, ``select`` and
|
||||
``exclude`` lists for the view and link information via ``link`` and
|
||||
``link_type``. For example, in the following manifest
|
||||
optionally the projections for the view, and ``select`` and
|
||||
``exclude`` lists for the view. For example, in the following manifest
|
||||
file snippet we define a view named ``mpis``, rooted at
|
||||
``/path/to/view`` in which all projections use the package name,
|
||||
version, and compiler name to determine the path for a given
|
||||
package. This view selects all packages that depend on MPI, and
|
||||
excludes those built with the PGI compiler at version 18.5.
|
||||
All the dependencies of each root spec in the environment will be linked
|
||||
in the view due to the command ``link: all`` and the files in the view will
|
||||
be symlinks to the spack install directories.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -755,16 +737,11 @@ be symlinks to the spack install directories.
|
||||
exclude: ['%pgi@18.5']
|
||||
projections:
|
||||
all: {name}/{version}-{compiler.name}
|
||||
link: all
|
||||
link_type: symlink
|
||||
|
||||
For more information on using view projections, see the section on
|
||||
:ref:`adding_projections_to_views`. The default for the ``select`` and
|
||||
``exclude`` values is to select everything and exclude nothing. The
|
||||
default projection is the default view projection (``{}``). The ``link``
|
||||
defaults to ``all`` but can also be ``roots`` when only the root specs
|
||||
in the environment are desired in the view. The ``link_type`` defaults
|
||||
to ``symlink`` but can also take the value of ``hardlink`` or ``copy``.
|
||||
default projection is the default view projection (``{}``).
|
||||
|
||||
Any number of views may be defined under the ``view`` heading in a
|
||||
Spack Environment.
|
||||
|
||||
@@ -9,16 +9,21 @@
|
||||
Getting Started
|
||||
===============
|
||||
|
||||
--------------------
|
||||
System Prerequisites
|
||||
--------------------
|
||||
-------------
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
Spack has the following minimum system requirements, which are assumed to
|
||||
be present on the machine where Spack is run:
|
||||
Spack has the following minimum requirements, which must be installed
|
||||
before Spack is run:
|
||||
|
||||
.. csv-table:: System prerequisites for Spack
|
||||
:file: tables/system_prerequisites.csv
|
||||
:header-rows: 1
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
|
||||
#. A C/C++ compiler for building
|
||||
#. The ``make`` executable for building
|
||||
#. The ``tar``, ``gzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
executables for extracting source code
|
||||
#. The ``patch`` command to apply patches
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
|
||||
|
||||
These requirements can be easily installed on most modern Linux systems;
|
||||
on macOS, XCode is required. Spack is designed to run on HPC
|
||||
@@ -65,13 +70,7 @@ Sourcing these files will put the ``spack`` command in your ``PATH``, set
|
||||
up your ``MODULEPATH`` to use Spack's packages, and add other useful
|
||||
shell integration for :ref:`certain commands <packaging-shell-support>`,
|
||||
:ref:`environments <environments>`, and :ref:`modules <modules>`. For
|
||||
``bash`` and ``zsh``, it also sets up tab completion.
|
||||
|
||||
In order to know which directory to add to your ``MODULEPATH``, these scripts
|
||||
query the ``spack`` command. On shared filesystems, this can be a bit slow,
|
||||
especially if you log in frequently. If you don't use modules, or want to set
|
||||
``MODULEPATH`` manually instead, you can set the ``SPACK_SKIP_MODULES``
|
||||
environment variable to skip this step and speed up sourcing the file.
|
||||
``bash``, it also sets up tab completion.
|
||||
|
||||
If you do not want to use Spack's shell support, you can always just run
|
||||
the ``spack`` command directly from ``spack/bin/spack``.
|
||||
@@ -84,151 +83,6 @@ sourcing time, ensuring future invocations of the ``spack`` command will
|
||||
continue to use the same consistent python version regardless of changes in
|
||||
the environment.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using ``clingo`` as an external solver to compute which software
|
||||
needs to be installed. The default configuration allows Spack to install
|
||||
``clingo`` from a public buildcache, created by a Github Action workflow. In this
|
||||
case the bootstrapping procedure is transparent to the user, except for a
|
||||
slightly long waiting time on the first concretization of a spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find -b
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 0 installed packages
|
||||
|
||||
$ time spack solve zlib
|
||||
==> Best of 2 considered solutions.
|
||||
==> Optimization Criteria:
|
||||
Priority Criterion Value
|
||||
1 deprecated versions used 0
|
||||
2 version weight 0
|
||||
3 number of non-default variants (roots) 0
|
||||
4 multi-valued variants 0
|
||||
5 preferred providers for roots 0
|
||||
6 number of non-default variants (non-roots) 0
|
||||
7 preferred providers (non-roots) 0
|
||||
8 compiler mismatches 0
|
||||
9 version badness 0
|
||||
10 count of non-root multi-valued variants 0
|
||||
11 non-preferred compilers 0
|
||||
12 target mismatches 0
|
||||
13 non-preferred targets 0
|
||||
|
||||
zlib@1.2.11%gcc@11.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
real 0m30,618s
|
||||
user 0m27,278s
|
||||
sys 0m1,549s
|
||||
|
||||
After this command you'll see that ``clingo`` has been installed for Spack's own use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find -b
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 2 installed packages
|
||||
-- linux-rhel5-x86_64 / gcc@9.3.0 -------------------------------
|
||||
clingo-bootstrap@spack python@3.6
|
||||
|
||||
Subsequent calls to the concretizer will then be much faster:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ time spack solve zlib
|
||||
[ ... ]
|
||||
real 0m1,222s
|
||||
user 0m1,146s
|
||||
sys 0m0,059s
|
||||
|
||||
If for security or for other reasons you don't want to or can't install precompiled
|
||||
binaries, Spack can fall-back to bootstrap ``clingo`` from source files. To forbid
|
||||
Spack from retrieving binaries from the bootstrapping buildcache, the following
|
||||
command must be given:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap untrust github-actions
|
||||
==> "github-actions" is now untrusted and will not be used for bootstrapping
|
||||
|
||||
since an "untrusted" way of bootstrapping software will not be considered
|
||||
by Spack. You can verify the new settings are effective with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap list
|
||||
Name: github-actions UNTRUSTED
|
||||
|
||||
Type: buildcache
|
||||
|
||||
Info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
|
||||
Description:
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
|
||||
|
||||
Name: spack-install TRUSTED
|
||||
|
||||
Type: install
|
||||
|
||||
Description:
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
|
||||
When bootstrapping from sources, Spack requires a compiler with support
|
||||
for C++14 (GCC on ``linux``, Apple Clang on ``darwin``) and static C++
|
||||
standard libraries on ``linux``. Spack will build the required software
|
||||
on the first request to concretize a spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
.. tip::
|
||||
|
||||
If you want to speed-up bootstrapping ``clingo`` from sources, you may try to
|
||||
search for ``cmake`` and ``bison`` on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
"""""""""""""""""""
|
||||
The Bootstrap Store
|
||||
"""""""""""""""""""
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check Installation
|
||||
@@ -257,6 +111,53 @@ environment*, especially for ``PATH``. Only software that comes with
|
||||
the system, or that you know you wish to use with Spack, should be
|
||||
included. This procedure will avoid many strange build errors.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using clingo as an external solver to compute which software
|
||||
needs to be installed. If you have a default compiler supporting C++14 Spack
|
||||
can automatically bootstrap this tool from sources the first time it is
|
||||
needed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
|
||||
on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Alternate Prefix
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -460,34 +361,6 @@ then inject those flags into the compiler command. Compiler flags
|
||||
entered from the command line will be discussed in more detail in the
|
||||
following section.
|
||||
|
||||
Some compilers also require additional environment configuration.
|
||||
Examples include Intels oneAPI and AMDs AOCC compiler suites,
|
||||
which have custom scripts for loading environment variables and setting paths.
|
||||
These variables should be specified in the ``environment`` section of the compiler
|
||||
specification. The operations available to modify the environment are ``set``, ``unset``,
|
||||
``prepend_path``, ``append_path``, and ``remove_path``. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
modules: []
|
||||
operating_system: centos6
|
||||
paths:
|
||||
cc: /opt/intel/oneapi/compiler/latest/linux/bin/icx
|
||||
cxx: /opt/intel/oneapi/compiler/latest/linux/bin/icpx
|
||||
f77: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
fc: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
spec: oneapi@latest
|
||||
environment:
|
||||
set:
|
||||
MKL_ROOT: "/path/to/mkl/root"
|
||||
unset: # A list of environment variables to unset
|
||||
- CC
|
||||
prepend_path: # Similar for append|remove_path
|
||||
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build Your Own Compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -642,9 +515,8 @@ Fortran.
|
||||
#. Run ``spack compiler find`` to locate Clang.
|
||||
|
||||
#. There are different ways to get ``gfortran`` on macOS. For example, you can
|
||||
install GCC with Spack (``spack install gcc``), with Homebrew (``brew install
|
||||
gcc``), or from a `DMG installer
|
||||
<https://github.com/fxcoudert/gfortran-for-macOS/releases>`_.
|
||||
install GCC with Spack (``spack install gcc``) or with Homebrew
|
||||
(``brew install gcc``).
|
||||
|
||||
#. The only thing left to do is to edit ``~/.spack/darwin/compilers.yaml`` to provide
|
||||
the path to ``gfortran``:
|
||||
@@ -665,8 +537,7 @@ Fortran.
|
||||
If you used Spack to install GCC, you can get the installation prefix by
|
||||
``spack location -i gcc`` (this will only work if you have a single version
|
||||
of GCC installed). Whereas for Homebrew, GCC is installed in
|
||||
``/usr/local/Cellar/gcc/x.y.z``. With the DMG installer, the correct path
|
||||
will be ``/usr/local/gfortran``.
|
||||
``/usr/local/Cellar/gcc/x.y.z``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Compiler Verification
|
||||
@@ -900,7 +771,7 @@ an OpenMPI installed in /opt/local, one would use:
|
||||
buildable: False
|
||||
|
||||
In general, Spack is easier to use and more reliable if it builds all of
|
||||
its own dependencies. However, there are several packages for which one
|
||||
its own dependencies. However, there are two packages for which one
|
||||
commonly needs to use system versions:
|
||||
|
||||
^^^
|
||||
@@ -1248,33 +1119,6 @@ Secret keys may also be later exported using the
|
||||
<https://www.digitalocean.com/community/tutorials/how-to-setup-additional-entropy-for-cloud-servers-using-haveged>`_
|
||||
provides a good overview of sources of randomness.
|
||||
|
||||
Here is an example of creating a key. Note that we provide a name for the key first
|
||||
(which we can use to reference the key later) and an email address:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
|
||||
If you want to export the key as you create it:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create --export key.pub dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
Or the private key:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create --export-secret key.priv dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
|
||||
You can include both ``--export`` and ``--export-secret``, each with
|
||||
an output file of choice, to export both.
|
||||
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Listing keys
|
||||
^^^^^^^^^^^^
|
||||
@@ -1283,22 +1127,7 @@ In order to list the keys available in the keyring, the
|
||||
``spack gpg list`` command will list trusted keys with the ``--trusted`` flag
|
||||
and keys available for signing using ``--signing``. If you would like to
|
||||
remove keys from your keyring, ``spack gpg untrust <keyid>``. Key IDs can be
|
||||
email addresses, names, or (best) fingerprints. Here is an example of listing
|
||||
the key that we just created:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
/home/spackuser/spack/opt/spack/gpg/pubring.kbx
|
||||
----------------------------------------------------------
|
||||
pub rsa4096 2021-03-25 [SC]
|
||||
60D2685DAB647AD4DB54125961E09BB6F2A0ADCB
|
||||
uid [ultimate] dinosaur (GPG created for Spack) <dinosaur@thedinosaurthings.com>
|
||||
|
||||
|
||||
Note that the name "dinosaur" can be seen under the uid, which is the unique
|
||||
id. We might need this reference if we want to export or otherwise reference the key.
|
||||
|
||||
email addresses, names, or (best) fingerprints.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Signing and Verifying Packages
|
||||
@@ -1313,38 +1142,6 @@ may also be used to create a signed file which contains the contents, but it
|
||||
is not recommended. Signed packages may be verified by using
|
||||
``spack gpg verify <file>``.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Exporting Keys
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
You likely might want to export a public key, and that looks like this. Let's
|
||||
use the previous example and ask spack to export the key with uid "dinosaur."
|
||||
We will provide an output location (typically a `*.pub` file) and the name of
|
||||
the key.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg export dinosaur.pub dinosaur
|
||||
|
||||
You can then look at the created file, `dinosaur.pub`, to see the exported key.
|
||||
If you want to include the private key, then just add `--secret`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg export --secret dinosaur.priv dinosaur
|
||||
|
||||
This will write the private key to the file `dinosaur.priv`.
|
||||
|
||||
.. warning::
|
||||
|
||||
You should be very careful about exporting private keys. You likely would
|
||||
only want to do this in the context of moving your spack installation to
|
||||
a different server, and wanting to preserve keys for a buildcache. If you
|
||||
are unsure about exporting, you can ask your local system administrator
|
||||
or for help on an issue or the Spack slack.
|
||||
|
||||
|
||||
.. _cray-support:
|
||||
|
||||
-------------
|
||||
|
||||
@@ -67,7 +67,6 @@ or refer to the full manual below.
|
||||
build_settings
|
||||
environments
|
||||
containers
|
||||
monitoring
|
||||
mirrors
|
||||
module_file_support
|
||||
repositories
|
||||
@@ -78,12 +77,6 @@ or refer to the full manual below.
|
||||
extensions
|
||||
pipelines
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Research
|
||||
|
||||
analyze
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contributing
|
||||
|
||||
@@ -159,27 +159,6 @@ can supply a file with specs in it, one per line:
|
||||
This is useful if there is a specific suite of software managed by
|
||||
your site.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Mirror environment
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a mirror of all packages required by a concerte environment, activate the environment and call ``spack mirror create -a``.
|
||||
This is especially useful to create a mirror of an environment concretized on another machine.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[remote] $ spack env create myenv
|
||||
[remote] $ spack env activate myenv
|
||||
[remote] $ spack add ...
|
||||
[remote] $ spack concretize
|
||||
|
||||
$ sftp remote:/spack/var/environment/myenv/spack.lock
|
||||
$ spack env create myenv spack.lock
|
||||
$ spack env activate myenv
|
||||
$ spack mirror create -a
|
||||
|
||||
|
||||
|
||||
.. _cmd-spack-mirror-add:
|
||||
|
||||
--------------------
|
||||
|
||||
@@ -71,24 +71,9 @@ Module file customization
|
||||
-------------------------
|
||||
|
||||
Module files are generated by post-install hooks after the successful
|
||||
installation of a package.
|
||||
|
||||
.. note::
|
||||
|
||||
Spack only generates modulefiles when a package is installed. If
|
||||
you attempt to install a package and it is already installed, Spack
|
||||
will not regenerate modulefiles for the package. This may to
|
||||
inconsistent modulefiles if the Spack module configuration has
|
||||
changed since the package was installed, either by editing a file
|
||||
or changing scopes or environments.
|
||||
|
||||
Later in this section there is a subsection on :ref:`regenerating
|
||||
modules <cmd-spack-module-refresh>` that will allow you to bring
|
||||
your modules to a consistent state.
|
||||
|
||||
The table below summarizes the essential information associated with
|
||||
the different file formats that can be generated by Spack:
|
||||
|
||||
installation of a package. The table below summarizes the essential
|
||||
information associated with the different file formats
|
||||
that can be generated by Spack:
|
||||
|
||||
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
|
||||
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
|
||||
@@ -145,8 +130,9 @@ list of environment modifications.
|
||||
to the corresponding environment variables:
|
||||
|
||||
================== =================================
|
||||
LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
|
||||
CPATH ``self.prefix/rlib/R/include``
|
||||
================== =================================
|
||||
|
||||
with the following snippet:
|
||||
@@ -178,46 +164,6 @@ the installation folder of each package for the presence of a set of subdirector
|
||||
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
|
||||
to the environment variables listed below the folder name.
|
||||
|
||||
Spack modules can be configured for multiple module sets. The default
|
||||
module set is named ``default``. All Spack commands which operate on
|
||||
modules default to apply the ``default`` module set, but can be
|
||||
applied to any module set in the configuration. Settings applied at
|
||||
the root of the configuration (e.g. ``modules:enable`` rather than
|
||||
``modules:default:enable``) are applied to the default module set for
|
||||
backwards compatibility.
|
||||
|
||||
"""""""""""""""""""""""""
|
||||
Changing the modules root
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
As shown in the table above, the default module root for ``lmod`` is
|
||||
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
|
||||
``$spack/share/spack/modules``. This can be overridden for any module
|
||||
set by changing the ``roots`` key of the configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
roots:
|
||||
tcl: /path/to/install/tcl/modules
|
||||
my_custom_lmod_modules:
|
||||
roots:
|
||||
lmod: /path/to/install/custom/lmod/modules
|
||||
...
|
||||
|
||||
This configuration will create two module sets. The default module set
|
||||
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
|
||||
(and still install its lmod modules, if any, to the default
|
||||
location). The set ``my_custom_lmod_modules`` will install its lmod
|
||||
modules to ``/path/to/install/custom/lmod/modules`` (and still install
|
||||
its tcl modules, if any, to the default location).
|
||||
|
||||
Obviously, having multiple module sets install modules to the default
|
||||
location could be confusing to users of your modules. In the next
|
||||
section, we will discuss enabling and disabling module types (module
|
||||
file generators) for each module set.
|
||||
|
||||
""""""""""""""""""""
|
||||
Activate other hooks
|
||||
""""""""""""""""""""
|
||||
@@ -233,14 +179,13 @@ to the generator being customized:
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
enable:
|
||||
- tcl
|
||||
- lmod
|
||||
tcl:
|
||||
# contains environment modules specific customizations
|
||||
lmod:
|
||||
# contains lmod specific customizations
|
||||
enable:
|
||||
- tcl
|
||||
- lmod
|
||||
tcl:
|
||||
# contains environment modules specific customizations
|
||||
lmod:
|
||||
# contains lmod specific customizations
|
||||
|
||||
In general, the configuration options that you can use in ``modules.yaml`` will
|
||||
either change the layout of the module files on the filesystem, or they will affect
|
||||
@@ -455,16 +400,10 @@ that are already in the LMod hierarchy.
|
||||
Customize environment modifications
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
|
||||
You can control which prefixes in a Spack package are added to
|
||||
environment variables with the ``prefix_inspections`` section; this
|
||||
section maps relative prefixes to the list of environment variables
|
||||
which should be updated with those prefixes.
|
||||
|
||||
The ``prefix_inspections`` configuration is different from other
|
||||
settings in that a ``prefix_inspections`` configuration at the
|
||||
``modules`` level of the configuration file applies to all module
|
||||
sets. This allows users to make general overrides to the default
|
||||
inspections and customize them per-module-set.
|
||||
You can control which prefixes in a Spack package are added to environment
|
||||
variables with the ``prefix_inspections`` section; this section maps relative
|
||||
prefixes to the list of environment variables which should be updated with
|
||||
those prefixes.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -477,66 +416,10 @@ inspections and customize them per-module-set.
|
||||
'':
|
||||
- CMAKE_PREFIX_PATH
|
||||
|
||||
Prefix inspections are only applied if the relative path inside the
|
||||
installation prefix exists. In this case, for a Spack package ``foo``
|
||||
installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
|
||||
``bin`` but no libraries in ``lib``, the generated module file for
|
||||
``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and
|
||||
``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not
|
||||
update ``LIBRARY_PATH``.
|
||||
|
||||
There is a special case for prefix inspections relative to environment
|
||||
views. If all of the following conditions hold for a module set
|
||||
configuration:
|
||||
|
||||
#. The configuration is for an :ref:`environment <environments>` and
|
||||
will never be applied outside the environment,
|
||||
#. The environment in question is configured to use a :ref:`view
|
||||
<filesystem-views>`,
|
||||
#. The :ref:`environment view is configured
|
||||
<configuring_environment_views>` with a projection that ensures
|
||||
every package is linked to a unique directory,
|
||||
|
||||
then the module set may be configured to create modules relative to
|
||||
the environment view. This is specified by the ``use_view``
|
||||
configuration option in the module set. If ``True``, the module set is
|
||||
constructed relative to the default view of the
|
||||
environment. Otherwise, the value must be the name of the environment
|
||||
view relative to which to construct modules, or ``False-ish`` to
|
||||
disable the feature explicitly (the default is ``False``).
|
||||
|
||||
If the ``use_view`` value is set in the config, then the prefix
|
||||
inspections for the package are done relative to the package's path in
|
||||
the view.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
modules:
|
||||
view_relative_modules:
|
||||
use_view: my_view
|
||||
prefix_inspections:
|
||||
bin:
|
||||
- PATH
|
||||
view:
|
||||
my_view:
|
||||
projections:
|
||||
root: /path/to/my/view
|
||||
all: '{name}-{hash}'
|
||||
|
||||
The ``spack`` key is relevant to :ref:`environment <environments>`
|
||||
configuration, and the view key is discussed in detail in the section
|
||||
on :ref:`Configuring environment views
|
||||
<configuring_environment_views>`. With this configuration the
|
||||
generated module for package ``foo`` would set ``PATH`` to include
|
||||
``/path/to/my/view/foo-<hash>/bin`` instead of
|
||||
In this case, for a Spack package ``foo`` installed to ``/spack/prefix/foo``,
|
||||
the generated module file for ``foo`` would update ``PATH`` to contain
|
||||
``/spack/prefix/foo/bin``.
|
||||
|
||||
The ``use_view`` option is useful when deploying a large software
|
||||
stack to users who are likely to inspect the modules to find full
|
||||
paths to software, when it is desirable to present the users with a
|
||||
simpler set of paths than those generated by the Spack install tree.
|
||||
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
Filter out environment modifications
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _monitoring:
|
||||
|
||||
==========
|
||||
Monitoring
|
||||
==========
|
||||
|
||||
You can use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
server to store a database of your packages, builds, and associated metadata
|
||||
for provenance, research, or some other kind of development. You should
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
-------------------
|
||||
Analysis Monitoring
|
||||
-------------------
|
||||
|
||||
To read about how to monitor an analysis (meaning you want to send analysis results
|
||||
to a server) see :ref:`analyze_monitoring`.
|
||||
|
||||
---------------------
|
||||
Monitoring An Install
|
||||
---------------------
|
||||
|
||||
Since an install is typically when you build packages, we logically want
|
||||
to tell spack to monitor during this step. Let's start with an example
|
||||
where we want to monitor the install of hdf5. Unless you have disabled authentication
|
||||
for the server, we first want to export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor hdf5
|
||||
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io hdf5
|
||||
|
||||
|
||||
As a precaution, we cut out early in the spack client if you have not provided
|
||||
authentication credentials. For example, if you run the command above without
|
||||
exporting your username or token, you'll see:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
==> Error: You are required to export SPACKMON_TOKEN and SPACKMON_USER
|
||||
|
||||
This extra check is to ensure that we don't start any builds,
|
||||
and then discover that you forgot to export your token. However, if
|
||||
your monitoring server has authentication disabled, you can tell this to
|
||||
the client to skip this step:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-disable-auth hdf5
|
||||
|
||||
If the service is not running, you'll cleanly exit early - the install will
|
||||
not continue if you've asked it to monitor and there is no service.
|
||||
For example, here is what you'll see if the monitoring service is not running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[Errno 111] Connection refused
|
||||
|
||||
|
||||
If you want to continue builds (and stop monitoring) you can set the ``--monitor-keep-going``
|
||||
flag.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-keep-going hdf5
|
||||
|
||||
This could mean that if a request fails, you only have partial or no data
|
||||
added to your monitoring database. This setting will not be applied to the
|
||||
first request to check if the server is running, but to subsequent requests.
|
||||
If you don't have a monitor server running and you want to build, simply
|
||||
don't provide the ``--monitor`` flag! Finally, if you want to provide one or
|
||||
more tags to your build, you can do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# Add one tag, "pizza"
|
||||
$ spack install --monitor --monitor-tags pizza hdf5
|
||||
|
||||
# Add two tags, "pizza" and "pasta"
|
||||
$ spack install --monitor --monitor-tags pizza,pasta hdf5
|
||||
|
||||
|
||||
----------------------------
|
||||
Monitoring with Containerize
|
||||
----------------------------
|
||||
|
||||
The same argument group is available to add to a containerize command.
|
||||
|
||||
^^^^^^
|
||||
Docker
|
||||
^^^^^^
|
||||
|
||||
To add monitoring to a Docker container recipe generation using the defaults,
|
||||
and assuming a monitor server running on localhost, you would
|
||||
start with a spack.yaml in your present working directory:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- samtools
|
||||
|
||||
And then do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# preview first
|
||||
spack containerize --monitor
|
||||
|
||||
# and then write to a Dockerfile
|
||||
spack containerize --monitor > Dockerfile
|
||||
|
||||
|
||||
The install command will be edited to include commands for enabling monitoring.
|
||||
However, getting secrets into the container for your monitor server is something
|
||||
that should be done carefully. Specifically you should:
|
||||
|
||||
- Never try to define secrets as ENV, ARG, or using ``--build-arg``
|
||||
- Do not try to get the secret into the container via a "temporary" file that you remove (it in fact will still exist in a layer)
|
||||
|
||||
Instead, it's recommended to use buildkit `as explained here <https://pythonspeed.com/articles/docker-build-secrets/>`_.
|
||||
You'll need to again export environment variables for your spack monitor server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
And then use buildkit along with your build and identifying the name of the secret:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DOCKER_BUILDKIT=1 docker build --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
|
||||
|
||||
The secrets are expected to come from your environment, and then will be temporarily mounted and available
|
||||
at ``/run/secrets/<name>``. If you forget to supply them (and authentication is required) the build
|
||||
will fail. If you need to build on your host (and interact with a spack monitor at localhost) you'll
|
||||
need to tell Docker to use the host network:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DOCKER_BUILDKIT=1 docker build --network="host" --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
|
||||
|
||||
|
||||
^^^^^^^^^^^
|
||||
Singularity
|
||||
^^^^^^^^^^^
|
||||
|
||||
To add monitoring to a Singularity container build, the spack.yaml needs to
|
||||
be modified slightly to specify wanting a different format:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- samtools
|
||||
container:
|
||||
format: singularity
|
||||
|
||||
|
||||
Again, generate the recipe:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# preview first
|
||||
$ spack containerize --monitor
|
||||
|
||||
# then write to a Singularity recipe
|
||||
$ spack containerize --monitor > Singularity
|
||||
|
||||
|
||||
Singularity doesn't have a direct way to define secrets at build time, so we have
|
||||
to do a bit of a manual command to add a file, source secrets in it, and remove it.
|
||||
Since Singularity doesn't have layers like Docker, deleting a file will truly
|
||||
remove it from the container and history. So let's say we have this file,
|
||||
``secrets.sh``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# secrets.sh
|
||||
export SPACKMON_USER=spack
|
||||
export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
|
||||
|
||||
We would then generate the Singularity recipe, and add a files section,
|
||||
a source of that file at the start of ``%post``, and **importantly**
|
||||
a removal of the final at the end of that same section.
|
||||
|
||||
.. code-block::
|
||||
|
||||
Bootstrap: docker
|
||||
From: spack/ubuntu-bionic:latest
|
||||
Stage: build
|
||||
|
||||
%files
|
||||
secrets.sh /opt/secrets.sh
|
||||
|
||||
%post
|
||||
. /opt/secrets.sh
|
||||
|
||||
# spack install commands are here
|
||||
...
|
||||
|
||||
# Don't forget to remove here!
|
||||
rm /opt/secrets.sh
|
||||
|
||||
|
||||
You can then build the container as your normally would.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo singularity build container.sif Singularity
|
||||
|
||||
|
||||
------------------
|
||||
Monitoring Offline
|
||||
------------------
|
||||
|
||||
In the case that you want to save monitor results to your filesystem
|
||||
and then upload them later (perhaps you are in an environment where you don't
|
||||
have credentials or it isn't safe to use them) you can use the ``--monitor-save-local``
|
||||
flag.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-save-local hdf5
|
||||
|
||||
This will save results in a subfolder, "monitor" in your designated spack
|
||||
reports folder, which defaults to ``$HOME/.spack/reports/monitor``. When
|
||||
you are ready to upload them to a spack monitor server:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack monitor upload ~/.spack/reports/monitor
|
||||
|
||||
|
||||
You can choose the root directory of results as shown above, or a specific
|
||||
subdirectory. The command accepts other arguments to specify configuration
|
||||
for the monitor.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,18 +30,52 @@ at least one `runner <https://docs.gitlab.com/runner/>`_. Then the basic steps
|
||||
for setting up a build pipeline are as follows:
|
||||
|
||||
#. Create a repository on your gitlab instance
|
||||
#. Add a ``spack.yaml`` at the root containing your pipeline environment
|
||||
#. Add a ``spack.yaml`` at the root containing your pipeline environment (see
|
||||
below for details)
|
||||
#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate
|
||||
the pipeline dynamically, and one to run the generated jobs).
|
||||
the pipeline dynamically, and one to run the generated jobs), similar to
|
||||
this one:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- <custom-tag>
|
||||
script:
|
||||
- spack env activate --without-view .
|
||||
- spack ci generate
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
|
||||
build-jobs:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
|
||||
|
||||
#. Add any secrets required by the CI process to environment variables using the
|
||||
CI web ui
|
||||
#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above
|
||||
to the gitlab repository
|
||||
|
||||
See the :ref:`functional_example` section for a minimal working example. See also
|
||||
the :ref:`custom_Workflow` section for a link to an example of a custom workflow
|
||||
based on spack pipelines.
|
||||
The ``<custom-tag>``, above, is used to pick one of your configured runners to
|
||||
run the pipeline generation phase (this is implemented in the ``spack ci generate``
|
||||
command, which assumes the runner has an appropriate version of spack installed
|
||||
and configured for use). Of course, there are many ways to customize the process.
|
||||
You can configure CDash reporting on the progress of your builds, set up S3 buckets
|
||||
to mirror binaries built by the pipeline, clone a custom spack repository/ref for
|
||||
use by the pipeline, and more.
|
||||
|
||||
While it is possible to set up pipelines on gitlab.com, as illustrated above, the
|
||||
builds there are limited to 60 minutes and generic hardware. It is also possible to
|
||||
While it is possible to set up pipelines on gitlab.com, the builds there are
|
||||
limited to 60 minutes and generic hardware. It is also possible to
|
||||
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
|
||||
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
|
||||
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
|
||||
@@ -54,144 +88,21 @@ dynamically generated
|
||||
Note that the use of dynamic child pipelines requires running Gitlab version
|
||||
``>= 12.9``.
|
||||
|
||||
.. _functional_example:
|
||||
|
||||
------------------
|
||||
Functional Example
|
||||
------------------
|
||||
|
||||
The simplest fully functional standalone example of a working pipeline can be
|
||||
examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
|
||||
on gitlab.com.
|
||||
|
||||
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
|
||||
pipeline:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
|
||||
variables:
|
||||
SPACK_REPO: https://github.com/scottwittenburg/spack.git
|
||||
SPACK_REF: pipelines-reproducible-builds
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- docker
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
script:
|
||||
- spack env activate --without-view .
|
||||
- spack -d ci generate
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
build-jobs:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
|
||||
The key thing to note above is that there are two jobs: The first job to run,
|
||||
``generate-pipeline``, runs the ``spack ci generate`` command to generate a
|
||||
dynamic child pipeline and write it to a yaml file, which is then picked up
|
||||
by the second job, ``build-jobs``, and used to trigger the downstream pipeline.
|
||||
|
||||
And here's the spack environment built by the pipeline represented as a
|
||||
``spack.yaml`` file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
view: false
|
||||
concretization: separately
|
||||
|
||||
definitions:
|
||||
- pkgs:
|
||||
- zlib
|
||||
- bzip2
|
||||
- arch:
|
||||
- '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
|
||||
|
||||
specs:
|
||||
- matrix:
|
||||
- - $pkgs
|
||||
- - $arch
|
||||
|
||||
mirrors: { "mirror": "s3://spack-public/mirror" }
|
||||
|
||||
gitlab-ci:
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
script:
|
||||
- pushd ${SPACK_CONCRETE_ENV_DIR} && spack env activate --without-view . && popd
|
||||
- spack -d ci rebuild
|
||||
mappings:
|
||||
- match: ["os=ubuntu18.04"]
|
||||
runner-attributes:
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
tags:
|
||||
- docker
|
||||
enable-artifacts-buildcache: True
|
||||
rebuild-index: False
|
||||
|
||||
The elements of this file important to spack ci pipelines are described in more
|
||||
detail below, but there are a couple of things to note about the above working
|
||||
example:
|
||||
|
||||
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
|
||||
results in large binary artifacts getting transferred back and forth between
|
||||
gitlab and the runners. But in this example on gitlab.com where there is no
|
||||
shared, persistent file system, and where no secrets are stored for giving
|
||||
permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
|
||||
way to propagate binaries from jobs to their dependents.
|
||||
|
||||
Also, it is usually a good idea to let the pipeline generate a final "rebuild the
|
||||
buildcache index" job, so that subsequent pipeline generation can quickly determine
|
||||
which specs are up to date and which need to be rebuilt (it's a good idea for other
|
||||
reasons as well, but those are out of scope for this discussion). In this case we
|
||||
have disabled it (using ``rebuild-index: False``) because the index would only be
|
||||
generated in the artifacts mirror anyway, and consequently would not be available
|
||||
during subesequent pipeline runs.
|
||||
|
||||
.. note::
|
||||
With the addition of reproducible builds (#22887) a previously working
|
||||
pipeline will require some changes:
|
||||
|
||||
* In the build jobs (``runner-attributes``), the environment location changed.
|
||||
This will typically show as a ``KeyError`` in the failing job. Be sure to
|
||||
point to ``${SPACK_CONCRETE_ENV_DIR}``.
|
||||
|
||||
* When using ``include`` in your environment, be sure to make the included
|
||||
files available in the build jobs. This means adding those files to the
|
||||
artifact directory. Those files will also be missing in the reproducibility
|
||||
artifact.
|
||||
|
||||
* Because the location of the environment changed, including files with
|
||||
relative path may have to be adapted to work both in the project context
|
||||
(generation job) and in the concrete env dir context (build job).
|
||||
|
||||
-----------------------------------
|
||||
Spack commands supporting pipelines
|
||||
-----------------------------------
|
||||
|
||||
Spack provides a ``ci`` command with a few sub-commands supporting spack
|
||||
ci pipelines. These commands are covered in more detail in this section.
|
||||
Spack provides a command ``ci`` with two sub-commands: ``spack ci generate`` generates
|
||||
a pipeline (a .gitlab-ci.yml file) from a spack environment, and ``spack ci rebuild``
|
||||
checks a spec against a remote mirror and possibly rebuilds it from source and updates
|
||||
the binary mirror with the latest built package. Both ``spack ci ...`` commands must
|
||||
be run from within the same environment, as each one makes use of the environment for
|
||||
different purposes. Additionally, some options to the commands (or conditions present
|
||||
in the spack environment file) may require particular environment variables to be
|
||||
set in order to function properly. Examples of these are typically secrets
|
||||
needed for pipeline operation that should not be visible in a spack environment
|
||||
file. These environment variables are described in more detail
|
||||
:ref:`ci_environment_variables`.
|
||||
|
||||
.. _cmd-spack-ci:
|
||||
|
||||
@@ -210,17 +121,6 @@ pipeline jobs.
|
||||
|
||||
Concretizes the specs in the active environment, stages them (as described in
|
||||
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
|
||||
During concretization of the environment, ``spack ci generate`` also writes a
|
||||
``spack.lock`` file which is then provided to generated child jobs and made
|
||||
available in all generated job artifacts to aid in reproducing failed builds
|
||||
in a local environment. This means there are two artifacts that need to be
|
||||
exported in your pipeline generation job (defined in your ``.gitlab-ci.yml``).
|
||||
The first is the output yaml file of ``spack ci generate``, and the other is
|
||||
the directory containing the concrete environment files. In the
|
||||
:ref:`functional_example` section, we only mentioned one path in the
|
||||
``artifacts`` ``paths`` list because we used ``--artifacts-root`` as the
|
||||
top level directory containing both the generated pipeline yaml and the
|
||||
concrete environment.
|
||||
|
||||
Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are
|
||||
generated for specs that are already up to date on the mirror. If enabling
|
||||
@@ -228,16 +128,6 @@ DAG pruning using ``--prune-dag``, more information may be required in your
|
||||
``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding
|
||||
``service-job-attributes``.
|
||||
|
||||
The optional ``--check-index-only`` argument can be used to speed up pipeline
|
||||
generation by telling spack to consider only remote buildcache indices when
|
||||
checking the remote mirror to determine if each spec in the DAG is up to date
|
||||
or not. The default behavior is for spack to fetch the index and check it,
|
||||
but if the spec is not found in the index, to also perform a direct check for
|
||||
the spec on the mirror. If the remote buildcache index is out of date, which
|
||||
can easily happen if it is not updated frequently, this behavior ensures that
|
||||
spack has a way to know for certain about the status of any concrete spec on
|
||||
the remote mirror, but can slow down pipeline generation significantly.
|
||||
|
||||
The ``--optimize`` argument is experimental and runs the generated pipeline
|
||||
document through a series of optimization passes designed to reduce the size
|
||||
of the generated file.
|
||||
@@ -253,64 +143,19 @@ The optional ``--output-file`` argument should be an absolute path (including
|
||||
file name) to the generated pipeline, and if not given, the default is
|
||||
``./.gitlab-ci.yml``.
|
||||
|
||||
While optional, the ``--artifacts-root`` argument is used to determine where
|
||||
the concretized environment directory should be located. This directory will
|
||||
be created by ``spack ci generate`` and will contain the ``spack.yaml`` and
|
||||
generated ``spack.lock`` which are then passed to all child jobs as an
|
||||
artifact. This directory will also be the root directory for all artifacts
|
||||
generated by jobs in the pipeline.
|
||||
|
||||
.. _cmd-spack-ci-rebuild:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci rebuild``
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The purpose of the ``spack ci rebuild`` is straightforward: take its assigned
|
||||
spec job, check whether the target mirror already has a binary for that spec,
|
||||
and if not, build the spec from source and push the binary to the mirror. To
|
||||
accomplish this in a reproducible way, the sub-command prepares a ``spack install``
|
||||
command line to build a single spec in the DAG, saves that command in a
|
||||
shell script, ``install.sh``, in the current working directory, and then runs
|
||||
it to install the spec. The shell script is also exported as an artifact to
|
||||
aid in reproducing the build outside of the CI environment.
|
||||
This sub-command is responsible for ensuring a single spec from the release
|
||||
environment is up to date on the remote mirror configured in the environment,
|
||||
and as such, corresponds to a single job in the ``.gitlab-ci.yml`` file.
|
||||
|
||||
If it was necessary to install the spec from source, ``spack ci rebuild`` will
|
||||
also subsequently create a binary package for the spec and try to push it to the
|
||||
mirror.
|
||||
|
||||
The ``spack ci rebuild`` sub-command mainly expects its "input" to come either
|
||||
from environment variables or from the ``gitlab-ci`` section of the ``spack.yaml``
|
||||
environment file. There are two main sources of the environment variables, some
|
||||
are written into ``.gitlab-ci.yml`` by ``spack ci generate``, and some are
|
||||
provided by the GitLab CI runtime.
|
||||
|
||||
.. _cmd-spack-ci-rebuild-index:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci rebuild-index``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is a convenience command to rebuild the buildcache index associated with
|
||||
the mirror in the active, gitlab-enabled environment (specifying the mirror
|
||||
url or name is not required).
|
||||
|
||||
.. _cmd-spack-ci-reproduce-build:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci reproduce-build``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Given the url to a gitlab pipeline rebuild job, downloads and unzips the
|
||||
artifacts into a local directory (which can be specified with the optional
|
||||
``--working-dir`` argument), then finds the target job in the generated
|
||||
pipeline to extract details about how it was run. Assuming the job used a
|
||||
docker image, the command prints a ``docker run`` command line and some basic
|
||||
instructions on how to reproduce the build locally.
|
||||
|
||||
Note that jobs failing in the pipeline will print messages giving the
|
||||
arguments you can pass to ``spack ci reproduce-build`` in order to reproduce
|
||||
a particular build locally.
|
||||
Rather than taking command-line arguments, this sub-command expects information
|
||||
to be communicated via environment variables, which will typically come via the
|
||||
``.gitlab-ci.yml`` job as ``variables``.
|
||||
|
||||
------------------------------------
|
||||
A pipeline-enabled spack environment
|
||||
@@ -395,13 +240,6 @@ takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``).
|
||||
|
||||
The optional ``broken-specs-url`` key tells Spack to check against a list of
|
||||
specs that are known to be currently broken in ``develop``. If any such specs
|
||||
are found, the ``spack ci generate`` command will fail with an error message
|
||||
informing the user what broken specs were encountered. This allows the pipeline
|
||||
to fail early and avoid wasting compute resources attempting to build packages
|
||||
that will not succeed.
|
||||
|
||||
The optional ``cdash`` section provides information that will be used by the
|
||||
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
|
||||
to CDash. All the jobs generated from this environment will belong to a
|
||||
@@ -519,9 +357,8 @@ scheduled on that runner. This allows users to do any custom preparation or
|
||||
cleanup tasks that fit their particular workflow, as well as completely
|
||||
customize the rebuilding of a spec if they so choose. Spack will not generate
|
||||
a ``before_script`` or ``after_script`` for jobs, but if you do not provide
|
||||
a custom ``script``, spack will generate one for you that assumes the concrete
|
||||
environment directory is located within your ``--artifacts_root`` (or if not
|
||||
provided, within your ``$CI_PROJECT_DIR``), activates that environment for
|
||||
a custom ``script``, spack will generate one for you that assumes your
|
||||
``spack.yaml`` is at the root of the repository, activates that environment for
|
||||
you, and invokes ``spack ci rebuild``.
|
||||
|
||||
.. _staging_algorithm:
|
||||
@@ -646,15 +483,14 @@ Using a custom spack in your pipeline
|
||||
If your runners will not have a version of spack ready to invoke, or if for some
|
||||
other reason you want to use a custom version of spack to run your pipelines,
|
||||
this section provides an example of how you could take advantage of
|
||||
user-provided pipeline scripts to accomplish this fairly simply. First, consider
|
||||
specifying the source and version of spack you want to use with variables, either
|
||||
written directly into your ``.gitlab-ci.yml``, or provided by CI variables defined
|
||||
in the gitlab UI or from some upstream pipeline. Let's say you choose the variable
|
||||
names ``SPACK_REPO`` and ``SPACK_REF`` to refer to the particular fork of spack
|
||||
and branch you want for running your pipeline. You can then refer to those in a
|
||||
custom shell script invoked both from your pipeline generation job and your rebuild
|
||||
user-provided pipeline scripts to accomplish this fairly simply. First, you
|
||||
could use the GitLab user interface to create CI environment variables
|
||||
containing the url and branch or tag you want to use (calling them, for
|
||||
example, ``SPACK_REPO`` and ``SPACK_REF``), then refer to those in a custom shell
|
||||
script invoked both from your pipeline generation job, as well as in your rebuild
|
||||
jobs. Here's the ``generate-pipeline`` job from the top of this document,
|
||||
updated to clone and source a custom spack:
|
||||
updated to invoke a custom shell script that will clone and source a custom
|
||||
spack:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -662,24 +498,34 @@ updated to clone and source a custom spack:
|
||||
tags:
|
||||
- <some-other-tag>
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- ./cloneSpack.sh
|
||||
script:
|
||||
- spack env activate --without-view .
|
||||
- spack ci generate --check-index-only
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
- spack ci generate
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
after_script:
|
||||
- rm -rf ./spack
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
|
||||
That takes care of getting the desired version of spack when your pipeline is
|
||||
generated by ``spack ci generate``. You also want your generated rebuild jobs
|
||||
(all of them) to clone that version of spack, so next you would update your
|
||||
``spack.yaml`` from above as follows:
|
||||
And the ``cloneSpack.sh`` script could contain:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
git clone ${SPACK_REPO}
|
||||
pushd ./spack
|
||||
git checkout ${SPACK_REF}
|
||||
popd
|
||||
|
||||
. "./spack/share/spack/setup-env.sh"
|
||||
|
||||
spack --version
|
||||
|
||||
Finally, you would also want your generated rebuild jobs to clone that version
|
||||
of spack, so you would update your ``spack.yaml`` from above as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -694,21 +540,21 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
|
||||
- spack-kube
|
||||
image: spack/ubuntu-bionic
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- ./cloneSpack.sh
|
||||
script:
|
||||
- spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR}
|
||||
- spack env activate --without-view .
|
||||
- spack -d ci rebuild
|
||||
after_script:
|
||||
- rm -rf ./spack
|
||||
|
||||
Now all of the generated rebuild jobs will use the same shell script to clone
|
||||
spack before running their actual workload.
|
||||
spack before running their actual workload. Note in the above example the
|
||||
provision of a custom ``script`` section. The reason for this is to run
|
||||
``spack ci rebuild`` in debug mode to get more information when builds fail.
|
||||
|
||||
Now imagine you have long pipelines with many specs to be built, and you
|
||||
are pointing to a spack repository and branch that has a tendency to change
|
||||
frequently, such as the main repo and its ``develop`` branch. If each child
|
||||
frequently, such as the main repo and it's ``develop`` branch. If each child
|
||||
job checks out the ``develop`` branch, that could result in some jobs running
|
||||
with one SHA of spack, while later jobs run with another. To help avoid this
|
||||
issue, the pipeline generation process saves global variables called
|
||||
@@ -718,32 +564,13 @@ simply contains the human-readable value produced by ``spack -V`` at pipeline
|
||||
generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a
|
||||
``git checkout`` command to make sure all child jobs checkout the same version
|
||||
of spack used to generate the pipeline. To take advantage of this, you could
|
||||
simply replace ``git checkout ${SPACK_REF}`` in the example ``spack.yaml``
|
||||
above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
|
||||
simply replace ``git checkout ${SPACK_REF}`` in the example ``cloneSpack.sh``
|
||||
script above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
|
||||
|
||||
On the other hand, if you're pointing to a spack repository and branch under your
|
||||
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
|
||||
and you can instead just clone using the variables you define (``SPACK_REPO``
|
||||
and ``SPACK_REF`` in the example aboves).
|
||||
|
||||
.. _custom_workflow:
|
||||
|
||||
---------------
|
||||
Custom Workflow
|
||||
---------------
|
||||
|
||||
There are many ways to take advantage of spack CI pipelines to achieve custom
|
||||
workflows for building packages or other resources. One example of a custom
|
||||
pipelines workflow is the spack tutorial container
|
||||
`repo <https://github.com/spack/spack-tutorial-container>`_. This project uses
|
||||
GitHub (for source control), GitLab (for automated spack ci pipelines), and
|
||||
DockerHub automated builds to build Docker images (complete with fully populate
|
||||
binary mirror) used by instructors and participants of a spack tutorial.
|
||||
|
||||
Take a look a the repo to see how it is accomplished using spack CI pipelines,
|
||||
and see the following markdown files at the root of the repository for
|
||||
descriptions and documentation describing the workflow: ``DESCRIPTION.md``,
|
||||
``DOCKERHUB_SETUP.md``, ``GITLAB_SETUP.md``, and ``UPDATING.md``.
|
||||
and you can instead just clone using the project CI variables you set (in the
|
||||
earlier example these were ``SPACK_REPO`` and ``SPACK_REF``).
|
||||
|
||||
.. _ci_environment_variables:
|
||||
|
||||
@@ -760,33 +587,28 @@ environment variables used by the pipeline infrastructure are described here.
|
||||
AWS_ACCESS_KEY_ID
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket.
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
AWS_SECRET_ACCESS_KEY
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket.
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
S3_ENDPOINT_URL
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket that is *not* on AWS.
|
||||
Needed when binary mirror is an S3 bucket that is *not* on AWS.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
CDASH_AUTH_TOKEN
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed in order to report build groups to CDash.
|
||||
Needed in order to report build groups to CDash.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
SPACK_SIGNING_KEY
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed if you want ``spack ci rebuild`` to trust the key you
|
||||
store in this variable, in which case, it will subsequently be used to sign and
|
||||
verify binary packages (when installing or creating buildcaches). You could
|
||||
also have already trusted a key spack know about, or if no key is present anywhere,
|
||||
spack will install specs using ``--no-check-signature`` and create buildcaches
|
||||
using ``-u`` (for unsigned binaries).
|
||||
Needed to sign/verify binary packages from the remote binary mirror.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# These dependencies should be installed using pip in order
|
||||
# to build the documentation.
|
||||
|
||||
sphinx>=3.4,!=4.1.2
|
||||
sphinx
|
||||
sphinxcontrib-programoutput
|
||||
sphinx-rtd-theme
|
||||
python-levenshtein
|
||||
|
||||
@@ -8,20 +8,12 @@
|
||||
# these commands in this directory to install Sphinx and its plugins,
|
||||
# then build the docs:
|
||||
#
|
||||
# spack env activate .
|
||||
# spack install
|
||||
# spack env activate .
|
||||
# make
|
||||
#
|
||||
spack:
|
||||
specs:
|
||||
# Sphinx
|
||||
- "py-sphinx@3.4:4.1.1,4.1.3:"
|
||||
- py-sphinx
|
||||
- py-sphinxcontrib-programoutput
|
||||
- py-sphinx-rtd-theme
|
||||
# VCS
|
||||
- git
|
||||
- mercurial
|
||||
- subversion
|
||||
# Plotting
|
||||
- graphviz
|
||||
concretization: together
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
Name, Supported Versions, Notes, Requirement Reason
|
||||
Python, 2.6/2.7/3.5-3.9, , Interpreter for Spack
|
||||
C/C++ Compilers, , , Building software
|
||||
make, , , Build software
|
||||
patch, , , Build software
|
||||
bash, , , Compiler wrappers
|
||||
tar, , , Extract/create archives
|
||||
gzip, , , Compress/Decompress archives
|
||||
unzip, , , Compress/Decompress archives
|
||||
bzip, , , Compress/Decompress archives
|
||||
xz, , , Compress/Decompress archives
|
||||
zstd, , Optional, Compress/Decompress archives
|
||||
file, , , Create/Use Buildcaches
|
||||
gnupg2, , , Sign/Verify Buildcaches
|
||||
git, , , Manage Software Repositories
|
||||
svn, , Optional, Manage Software Repositories
|
||||
hg, , Optional, Manage Software Repositories
|
||||
|
@@ -387,7 +387,7 @@ some nice features:
|
||||
Spack-built compiler can be given to an IDE without requiring the
|
||||
IDE to load that compiler's module.
|
||||
|
||||
Unfortunately, Spack's RPATH support does not work in every case. For example:
|
||||
Unfortunately, Spack's RPATH support does not work in all case. For example:
|
||||
|
||||
#. Software comes in many forms --- not just compiled ELF binaries,
|
||||
but also as interpreted code in Python, R, JVM bytecode, etc.
|
||||
@@ -543,8 +543,7 @@ specified from the command line using the ``--projection-file`` option
|
||||
to the ``spack view`` command.
|
||||
|
||||
The projections configuration file is a mapping of partial specs to
|
||||
spec format strings, defined by the :meth:`~spack.spec.Spec.format`
|
||||
function, as shown in the example below.
|
||||
spec format strings, as shown in the example below.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
||||
69
lib/spack/env/cc
vendored
69
lib/spack/env/cc
vendored
@@ -40,14 +40,6 @@ parameters=(
|
||||
SPACK_SYSTEM_DIRS
|
||||
)
|
||||
|
||||
# Optional parameters that aren't required to be set
|
||||
|
||||
# Boolean (true/false/custom) if we want to add debug flags
|
||||
# SPACK_ADD_DEBUG_FLAGS
|
||||
|
||||
# If a custom flag is requested, it will be defined
|
||||
# SPACK_DEBUG_FLAGS
|
||||
|
||||
# The compiler input variables are checked for sanity later:
|
||||
# SPACK_CC, SPACK_CXX, SPACK_F77, SPACK_FC
|
||||
# The default compiler flags are passed from these variables:
|
||||
@@ -95,25 +87,6 @@ for param in "${parameters[@]}"; do
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if optional parameters are defined
|
||||
# If we aren't asking for debug flags, don't add them
|
||||
if [[ -z ${SPACK_ADD_DEBUG_FLAGS+x} ]]; then
|
||||
SPACK_ADD_DEBUG_FLAGS="false"
|
||||
fi
|
||||
|
||||
# SPACK_ADD_DEBUG_FLAGS must be true/false/custom
|
||||
is_valid="false"
|
||||
for param in "true" "false" "custom"; do
|
||||
if [ "$param" == "$SPACK_ADD_DEBUG_FLAGS" ]; then
|
||||
is_valid="true"
|
||||
fi
|
||||
done
|
||||
|
||||
# Exit with error if we are given an incorrect value
|
||||
if [ "$is_valid" == "false" ]; then
|
||||
die "SPACK_ADD_DEBUG_FLAGS, if defined, must be one of 'true' 'false' or 'custom'"
|
||||
fi
|
||||
|
||||
# Figure out the type of compiler, the language, and the mode so that
|
||||
# the compiler script knows what to do.
|
||||
#
|
||||
@@ -133,37 +106,32 @@ comp="CC"
|
||||
case "$command" in
|
||||
cpp)
|
||||
mode=cpp
|
||||
debug_flags="-g"
|
||||
;;
|
||||
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc)
|
||||
command="$SPACK_CC"
|
||||
language="C"
|
||||
comp="CC"
|
||||
lang_flags=C
|
||||
debug_flags="-g"
|
||||
;;
|
||||
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC)
|
||||
command="$SPACK_CXX"
|
||||
language="C++"
|
||||
comp="CXX"
|
||||
lang_flags=CXX
|
||||
debug_flags="-g"
|
||||
;;
|
||||
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt)
|
||||
command="$SPACK_FC"
|
||||
language="Fortran 90"
|
||||
comp="FC"
|
||||
lang_flags=F
|
||||
debug_flags="-g"
|
||||
;;
|
||||
f77|xlf|xlf_r|pgf77)
|
||||
command="$SPACK_F77"
|
||||
language="Fortran 77"
|
||||
comp="F77"
|
||||
lang_flags=F
|
||||
debug_flags="-g"
|
||||
;;
|
||||
ld|ld.gold|ld.lld)
|
||||
ld)
|
||||
mode=ld
|
||||
;;
|
||||
*)
|
||||
@@ -247,7 +215,7 @@ export PATH=""
|
||||
for dir in "${env_path[@]}"; do
|
||||
addpath=true
|
||||
for env_dir in "${spack_env_dirs[@]}"; do
|
||||
if [[ "${dir%%/}" == "$env_dir" ]]; then
|
||||
if [[ "$dir" == "$env_dir" ]]; then
|
||||
addpath=false
|
||||
break
|
||||
fi
|
||||
@@ -309,18 +277,10 @@ other_args=()
|
||||
isystem_system_includes=()
|
||||
isystem_includes=()
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
|
||||
while [ -n "$1" ]; do
|
||||
# an RPATH to be added after the case statement.
|
||||
rp=""
|
||||
|
||||
# Multiple consecutive spaces in the command line can
|
||||
# result in blank arguments
|
||||
if [ -z "$1" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
-isystem*)
|
||||
arg="${1#-isystem}"
|
||||
@@ -351,16 +311,6 @@ while [ $# -ne 0 ]; do
|
||||
fi
|
||||
;;
|
||||
-l*)
|
||||
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
|
||||
# and passed by ifx to the linker, which confuses it with a
|
||||
# library. Filter it out.
|
||||
# TODO: generalize filtering of args with an env var, so that
|
||||
# TODO: we do not have to special case this here.
|
||||
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
|
||||
&& [ "$1" != "${1#-loopopt}" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
arg="${1#-l}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
other_args+=("-l$arg")
|
||||
@@ -447,16 +397,6 @@ done
|
||||
#
|
||||
flags=()
|
||||
|
||||
# Add debug flags
|
||||
if [ "${SPACK_ADD_DEBUG_FLAGS}" == "true" ]; then
|
||||
flags=("${flags[@]}" "${debug_flags}")
|
||||
|
||||
# If a custom flag is requested, derive from environment
|
||||
elif [ "$SPACK_ADD_DEBUG_FLAGS" == "custom" ]; then
|
||||
IFS=' ' read -ra SPACK_DEBUG_FLAGS <<< "$SPACK_DEBUG_FLAGS"
|
||||
flags=("${flags[@]}" "${SPACK_DEBUG_FLAGS[@]}")
|
||||
fi
|
||||
|
||||
# Fortran flags come before CPPFLAGS
|
||||
case "$mode" in
|
||||
cc|ccld)
|
||||
@@ -616,9 +556,6 @@ if [[ $SPACK_TEST_COMMAND == dump-args ]]; then
|
||||
IFS="
|
||||
" && echo "${full_command[*]}"
|
||||
exit
|
||||
elif [[ $SPACK_TEST_COMMAND =~ dump-env-* ]]; then
|
||||
var=${SPACK_TEST_COMMAND#dump-env-}
|
||||
echo "$0: $var: ${!var}"
|
||||
elif [[ -n $SPACK_TEST_COMMAND ]]; then
|
||||
die "ERROR: Unknown test command"
|
||||
fi
|
||||
|
||||
1
lib/spack/env/ld.gold
vendored
1
lib/spack/env/ld.gold
vendored
@@ -1 +0,0 @@
|
||||
cc
|
||||
1
lib/spack/env/ld.lld
vendored
1
lib/spack/env/ld.lld
vendored
@@ -1 +0,0 @@
|
||||
cc
|
||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -11,7 +11,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.1.2 (commit 4dbf253daf37e4a008e4beb6489f347b4a35aed4)
|
||||
* Version: 0.1.2 (commit 0389e83e87d3dc5043a7ac08172bd970706524d6)
|
||||
|
||||
argparse
|
||||
--------
|
||||
|
||||
39
lib/spack/external/archspec/cpu/detect.py
vendored
39
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -99,29 +99,17 @@ def sysctl_info_dict():
|
||||
def sysctl(*args):
|
||||
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
|
||||
|
||||
if platform.machine() == "x86_64":
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
)
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
else:
|
||||
model = (
|
||||
"m1" if "Apple" in sysctl("-n", "machdep.cpu.brand_string") else "unknown"
|
||||
)
|
||||
info = {
|
||||
"vendor_id": "Apple",
|
||||
"flags": [],
|
||||
"model": model,
|
||||
"CPU implementer": "Apple",
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
)
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
return info
|
||||
|
||||
|
||||
@@ -185,11 +173,6 @@ def compatible_microarchitectures(info):
|
||||
info (dict): dictionary containing information on the host cpu
|
||||
"""
|
||||
architecture_family = platform.machine()
|
||||
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
|
||||
# so we should normalize the name here
|
||||
if architecture_family == "arm64":
|
||||
architecture_family = "aarch64"
|
||||
|
||||
# If a tester is not registered, be conservative and assume no known
|
||||
# target is compatible with the host
|
||||
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
|
||||
|
||||
@@ -91,166 +91,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v2": {
|
||||
"from": ["x86_64"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v2",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "4.6:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v2",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v3": {
|
||||
"from": ["x86_64_v2"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt",
|
||||
"avx",
|
||||
"avx2",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"f16c",
|
||||
"fma",
|
||||
"abm",
|
||||
"movbe",
|
||||
"xsave"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v3",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "4.8:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v3",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v4": {
|
||||
"from": ["x86_64_v3"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt",
|
||||
"avx",
|
||||
"avx2",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"f16c",
|
||||
"fma",
|
||||
"abm",
|
||||
"movbe",
|
||||
"xsave",
|
||||
"avx512f",
|
||||
"avx512bw",
|
||||
"avx512cd",
|
||||
"avx512dq",
|
||||
"avx512vl"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "6.0:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"nocona": {
|
||||
"from": ["x86_64"],
|
||||
"vendor": "GenuineIntel",
|
||||
@@ -337,7 +177,7 @@
|
||||
}
|
||||
},
|
||||
"nehalem": {
|
||||
"from": ["core2", "x86_64_v2"],
|
||||
"from": ["core2"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -554,7 +394,7 @@
|
||||
}
|
||||
},
|
||||
"haswell": {
|
||||
"from": ["ivybridge", "x86_64_v3"],
|
||||
"from": ["ivybridge"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -802,7 +642,7 @@
|
||||
}
|
||||
},
|
||||
"skylake_avx512": {
|
||||
"from": ["skylake", "x86_64_v4"],
|
||||
"from": ["skylake"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -1146,7 +986,7 @@
|
||||
}
|
||||
},
|
||||
"bulldozer": {
|
||||
"from": ["x86_64_v2"],
|
||||
"from": ["x86_64"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -1305,7 +1145,7 @@
|
||||
}
|
||||
},
|
||||
"excavator": {
|
||||
"from": ["steamroller", "x86_64_v3"],
|
||||
"from": ["steamroller"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -1364,7 +1204,7 @@
|
||||
}
|
||||
},
|
||||
"zen": {
|
||||
"from": ["x86_64_v3"],
|
||||
"from": ["x86_64"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"bmi1",
|
||||
@@ -1519,24 +1359,9 @@
|
||||
"popcnt",
|
||||
"clwb",
|
||||
"vaes",
|
||||
"vpclmulqdq",
|
||||
"pku"
|
||||
"vpclmulqdq"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "10.3:",
|
||||
"name": "znver3",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "znver3",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"aocc": [
|
||||
{
|
||||
"versions": "3.0:",
|
||||
@@ -1719,18 +1544,6 @@
|
||||
"versions": ":",
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
],
|
||||
"arm": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1834,12 +1647,6 @@
|
||||
"versions": "5:",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||
}
|
||||
],
|
||||
"arm": [
|
||||
{
|
||||
"versions": "20:",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1942,40 +1749,9 @@
|
||||
"versions": "5:",
|
||||
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
|
||||
}
|
||||
],
|
||||
"arm" : [
|
||||
{
|
||||
"versions": "20:",
|
||||
"flags" : "-march=armv8.2-a+fp16+rcpc+dotprod+crypto"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"m1": {
|
||||
"from": ["aarch64"],
|
||||
"vendor": "Apple",
|
||||
"features": [],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"flags" : "-march=armv8.4-a -mtune=generic"
|
||||
}
|
||||
],
|
||||
"clang" : [
|
||||
{
|
||||
"versions": "9.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "11.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"arm": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
|
||||
2
lib/spack/external/py2/typing.py
vendored
2
lib/spack/external/py2/typing.py
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import re
|
||||
import argparse
|
||||
import errno
|
||||
import re
|
||||
import sys
|
||||
|
||||
from six import StringIO
|
||||
@@ -326,7 +326,7 @@ def end_function(self, prog=None):
|
||||
"""Returns the syntax needed to end a function definition.
|
||||
|
||||
Parameters:
|
||||
prog (str or None): the command name
|
||||
prog (str, optional): the command name
|
||||
|
||||
Returns:
|
||||
str: the function definition ending
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import collections
|
||||
import errno
|
||||
import hashlib
|
||||
import glob
|
||||
import grp
|
||||
import hashlib
|
||||
import itertools
|
||||
import numbers
|
||||
import os
|
||||
@@ -19,12 +19,11 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
import six
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.lang import dedupe, memoized
|
||||
|
||||
from spack.util.executable import Executable
|
||||
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from collections.abc import Sequence # novm
|
||||
else:
|
||||
@@ -444,7 +443,7 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
|
||||
src (str): the directory to copy
|
||||
dest (str): the destination directory
|
||||
symlinks (bool): whether or not to preserve symlinks
|
||||
ignore (typing.Callable): function indicating which files to ignore
|
||||
ignore (function): function indicating which files to ignore
|
||||
_permissions (bool): for internal use only
|
||||
|
||||
Raises:
|
||||
@@ -518,7 +517,7 @@ def install_tree(src, dest, symlinks=True, ignore=None):
|
||||
src (str): the directory to install
|
||||
dest (str): the destination directory
|
||||
symlinks (bool): whether or not to preserve symlinks
|
||||
ignore (typing.Callable): function indicating which files to ignore
|
||||
ignore (function): function indicating which files to ignore
|
||||
|
||||
Raises:
|
||||
IOError: if *src* does not match any files or directories
|
||||
@@ -557,12 +556,12 @@ def mkdirp(*paths, **kwargs):
|
||||
paths (str): paths to create with mkdirp
|
||||
|
||||
Keyword Aguments:
|
||||
mode (permission bits or None): optional permissions to set
|
||||
mode (permission bits or None, optional): optional permissions to set
|
||||
on the created directory -- use OS default if not provided
|
||||
group (group name or None): optional group for permissions of
|
||||
group (group name or None, optional): optional group for permissions of
|
||||
final created directory -- use OS default if not provided. Only
|
||||
used if world write permissions are not set
|
||||
default_perms (str or None): one of 'parents' or 'args'. The default permissions
|
||||
default_perms ('parents' or 'args', optional): The default permissions
|
||||
that are set for directories that are not themselves an argument
|
||||
for mkdirp. 'parents' means intermediate directories get the
|
||||
permissions of their direct parent directory, 'args' means
|
||||
@@ -692,7 +691,7 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
|
||||
try:
|
||||
yield tmp_dir
|
||||
except (Exception, KeyboardInterrupt, SystemExit):
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as e:
|
||||
# Delete what was there, before copying back the original content
|
||||
if os.path.exists(directory_name):
|
||||
shutil.rmtree(directory_name)
|
||||
@@ -701,7 +700,10 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
dst=os.path.dirname(directory_name)
|
||||
)
|
||||
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
|
||||
raise
|
||||
|
||||
msg = 'the transactional move of "{0}" failed.'
|
||||
msg += '\n ' + str(e)
|
||||
raise RuntimeError(msg.format(directory_name))
|
||||
else:
|
||||
# Otherwise delete the temporary directory
|
||||
shutil.rmtree(tmp_dir)
|
||||
@@ -863,7 +865,7 @@ def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
|
||||
Keyword Arguments:
|
||||
order (str): Whether to do pre- or post-order traversal. Accepted
|
||||
values are 'pre' and 'post'
|
||||
ignore (typing.Callable): function indicating which files to ignore
|
||||
ignore (function): function indicating which files to ignore
|
||||
follow_nonexisting (bool): Whether to descend into directories in
|
||||
``src`` that do not exit in ``dest``. Default is True
|
||||
follow_links (bool): Whether to descend into symlinks in ``src``
|
||||
@@ -1099,23 +1101,23 @@ def find(root, files, recursive=True):
|
||||
|
||||
Accepts any glob characters accepted by fnmatch:
|
||||
|
||||
========== ====================================
|
||||
Pattern Meaning
|
||||
========== ====================================
|
||||
``*`` matches everything
|
||||
``?`` matches any single character
|
||||
``[seq]`` matches any character in ``seq``
|
||||
``[!seq]`` matches any character not in ``seq``
|
||||
========== ====================================
|
||||
======= ====================================
|
||||
Pattern Meaning
|
||||
======= ====================================
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in ``seq``
|
||||
[!seq] matches any character not in ``seq``
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
root (str): The root directory to start searching from
|
||||
files (str or Sequence): Library name(s) to search for
|
||||
recursive (bool): if False search only root folder,
|
||||
recurse (bool, optional): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to True.
|
||||
|
||||
Returns:
|
||||
list: The files that have been found
|
||||
list of strings: The files that have been found
|
||||
"""
|
||||
if isinstance(files, six.string_types):
|
||||
files = [files]
|
||||
@@ -1197,7 +1199,7 @@ def directories(self):
|
||||
['/dir1', '/dir2']
|
||||
|
||||
Returns:
|
||||
list: A list of directories
|
||||
list of strings: A list of directories
|
||||
"""
|
||||
return list(dedupe(
|
||||
os.path.dirname(x) for x in self.files if os.path.dirname(x)
|
||||
@@ -1215,7 +1217,7 @@ def basenames(self):
|
||||
['a.h', 'b.h']
|
||||
|
||||
Returns:
|
||||
list: A list of base-names
|
||||
list of strings: A list of base-names
|
||||
"""
|
||||
return list(dedupe(os.path.basename(x) for x in self.files))
|
||||
|
||||
@@ -1302,7 +1304,7 @@ def headers(self):
|
||||
"""Stable de-duplication of the headers.
|
||||
|
||||
Returns:
|
||||
list: A list of header files
|
||||
list of strings: A list of header files
|
||||
"""
|
||||
return self.files
|
||||
|
||||
@@ -1315,7 +1317,7 @@ def names(self):
|
||||
['a', 'b']
|
||||
|
||||
Returns:
|
||||
list: A list of files without extensions
|
||||
list of strings: A list of files without extensions
|
||||
"""
|
||||
names = []
|
||||
|
||||
@@ -1406,9 +1408,9 @@ def find_headers(headers, root, recursive=False):
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
headers (str or list): Header name(s) to search for
|
||||
headers (str or list of str): Header name(s) to search for
|
||||
root (str): The root directory to start searching from
|
||||
recursive (bool): if False search only root folder,
|
||||
recursive (bool, optional): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to False.
|
||||
|
||||
Returns:
|
||||
@@ -1444,7 +1446,7 @@ def find_all_headers(root):
|
||||
in the directory passed as argument.
|
||||
|
||||
Args:
|
||||
root (str): directory where to look recursively for header files
|
||||
root (path): directory where to look recursively for header files
|
||||
|
||||
Returns:
|
||||
List of all headers found in ``root`` and subdirectories.
|
||||
@@ -1464,7 +1466,7 @@ def libraries(self):
|
||||
"""Stable de-duplication of library files.
|
||||
|
||||
Returns:
|
||||
list: A list of library files
|
||||
list of strings: A list of library files
|
||||
"""
|
||||
return self.files
|
||||
|
||||
@@ -1477,7 +1479,7 @@ def names(self):
|
||||
['a', 'b']
|
||||
|
||||
Returns:
|
||||
list: A list of library names
|
||||
list of strings: A list of library names
|
||||
"""
|
||||
names = []
|
||||
|
||||
@@ -1562,8 +1564,8 @@ def find_system_libraries(libraries, shared=True):
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
libraries (str or list): Library name(s) to search for
|
||||
shared (bool): if True searches for shared libraries,
|
||||
libraries (str or list of str): Library name(s) to search for
|
||||
shared (bool, optional): if True searches for shared libraries,
|
||||
otherwise for static. Defaults to True.
|
||||
|
||||
Returns:
|
||||
@@ -1613,11 +1615,11 @@ def find_libraries(libraries, root, shared=True, recursive=False):
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
libraries (str or list): Library name(s) to search for
|
||||
libraries (str or list of str): Library name(s) to search for
|
||||
root (str): The root directory to start searching from
|
||||
shared (bool): if True searches for shared libraries,
|
||||
shared (bool, optional): if True searches for shared libraries,
|
||||
otherwise for static. Defaults to True.
|
||||
recursive (bool): if False search only root folder,
|
||||
recursive (bool, optional): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to False.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -5,20 +5,15 @@
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import functools
|
||||
import inspect
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from six import string_types
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
from itertools import izip_longest # novm
|
||||
zip_longest = izip_longest
|
||||
else:
|
||||
from itertools import zip_longest # novm
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from collections.abc import Hashable, MutableMapping # novm
|
||||
@@ -30,6 +25,23 @@
|
||||
ignore_modules = [r'^\.#', '~$']
|
||||
|
||||
|
||||
# On macOS, Python 3.8 multiprocessing now defaults to the 'spawn' start
|
||||
# method. Spack cannot currently handle this, so force the process to start
|
||||
# using the 'fork' start method.
|
||||
#
|
||||
# TODO: This solution is not ideal, as the 'fork' start method can lead to
|
||||
# crashes of the subprocess. Figure out how to make 'spawn' work.
|
||||
#
|
||||
# See:
|
||||
# * https://github.com/spack/spack/pull/18124
|
||||
# * https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # noqa: E501
|
||||
# * https://bugs.python.org/issue33725
|
||||
if sys.version_info >= (3,): # novm
|
||||
fork_context = multiprocessing.get_context('fork')
|
||||
else:
|
||||
fork_context = multiprocessing
|
||||
|
||||
|
||||
def index_by(objects, *funcs):
|
||||
"""Create a hierarchy of dictionaries by splitting the supplied
|
||||
set of objects on unique values of the supplied functions.
|
||||
@@ -215,31 +227,6 @@ def list_modules(directory, **kwargs):
|
||||
yield re.sub('.py$', '', name)
|
||||
|
||||
|
||||
def decorator_with_or_without_args(decorator):
|
||||
"""Allows a decorator to be used with or without arguments, e.g.::
|
||||
|
||||
# Calls the decorator function some args
|
||||
@decorator(with, arguments, and=kwargs)
|
||||
|
||||
or::
|
||||
|
||||
# Calls the decorator function with zero arguments
|
||||
@decorator
|
||||
|
||||
"""
|
||||
# See https://stackoverflow.com/questions/653368 for more on this
|
||||
@functools.wraps(decorator)
|
||||
def new_dec(*args, **kwargs):
|
||||
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
|
||||
# actual decorated function
|
||||
return decorator(args[0])
|
||||
else:
|
||||
# decorator arguments
|
||||
return lambda realf: decorator(realf, *args, **kwargs)
|
||||
|
||||
return new_dec
|
||||
|
||||
|
||||
def key_ordering(cls):
|
||||
"""Decorates a class with extra methods that implement rich comparison
|
||||
operations and ``__hash__``. The decorator assumes that the class
|
||||
@@ -281,197 +268,7 @@ def setter(name, value):
|
||||
return cls
|
||||
|
||||
|
||||
#: sentinel for testing that iterators are done in lazy_lexicographic_ordering
|
||||
done = object()
|
||||
|
||||
|
||||
def tuplify(seq):
|
||||
"""Helper for lazy_lexicographic_ordering()."""
|
||||
return tuple((tuplify(x) if callable(x) else x) for x in seq())
|
||||
|
||||
|
||||
def lazy_eq(lseq, rseq):
|
||||
"""Equality comparison for two lazily generated sequences.
|
||||
|
||||
See ``lazy_lexicographic_ordering``.
|
||||
"""
|
||||
liter = lseq() # call generators
|
||||
riter = rseq()
|
||||
|
||||
# zip_longest is implemented in native code, so use it for speed.
|
||||
# use zip_longest instead of zip because it allows us to tell
|
||||
# which iterator was longer.
|
||||
for left, right in zip_longest(liter, riter, fillvalue=done):
|
||||
if (left is done) or (right is done):
|
||||
return False
|
||||
|
||||
# recursively enumerate any generators, otherwise compare
|
||||
equal = lazy_eq(left, right) if callable(left) else left == right
|
||||
if not equal:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def lazy_lt(lseq, rseq):
|
||||
"""Less-than comparison for two lazily generated sequences.
|
||||
|
||||
See ``lazy_lexicographic_ordering``.
|
||||
"""
|
||||
liter = lseq()
|
||||
riter = rseq()
|
||||
|
||||
for left, right in zip_longest(liter, riter, fillvalue=done):
|
||||
if (left is done) or (right is done):
|
||||
return left is done # left was shorter than right
|
||||
|
||||
sequence = callable(left)
|
||||
equal = lazy_eq(left, right) if sequence else left == right
|
||||
if equal:
|
||||
continue
|
||||
|
||||
if sequence:
|
||||
return lazy_lt(left, right)
|
||||
if left is None:
|
||||
return True
|
||||
if right is None:
|
||||
return False
|
||||
|
||||
return left < right
|
||||
|
||||
return False # if equal, return False
|
||||
|
||||
|
||||
@decorator_with_or_without_args
|
||||
def lazy_lexicographic_ordering(cls, set_hash=True):
|
||||
"""Decorates a class with extra methods that implement rich comparison.
|
||||
|
||||
This is a lazy version of the tuple comparison used frequently to
|
||||
implement comparison in Python. Given some objects with fields, you
|
||||
might use tuple keys to implement comparison, e.g.::
|
||||
|
||||
class Widget:
|
||||
def _cmp_key(self):
|
||||
return (
|
||||
self.a,
|
||||
self.b,
|
||||
(self.c, self.d),
|
||||
self.e
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._cmp_key() == other._cmp_key()
|
||||
|
||||
def __lt__(self):
|
||||
return self._cmp_key() < other._cmp_key()
|
||||
|
||||
# etc.
|
||||
|
||||
Python would compare ``Widgets`` lexicographically based on their
|
||||
tuples. The issue there for simple comparators is that we have to
|
||||
bulid the tuples *and* we have to generate all the values in them up
|
||||
front. When implementing comparisons for large data structures, this
|
||||
can be costly.
|
||||
|
||||
Lazy lexicographic comparison maps the tuple comparison shown above
|
||||
to generator functions. Instead of comparing based on pre-constructed
|
||||
tuple keys, users of this decorator can compare using elements from a
|
||||
generator. So, you'd write::
|
||||
|
||||
@lazy_lexicographic_ordering
|
||||
class Widget:
|
||||
def _cmp_iter(self):
|
||||
yield a
|
||||
yield b
|
||||
def cd_fun():
|
||||
yield c
|
||||
yield d
|
||||
yield cd_fun
|
||||
yield e
|
||||
|
||||
# operators are added by decorator
|
||||
|
||||
There are no tuples preconstructed, and the generator does not have
|
||||
to complete. Instead of tuples, we simply make functions that lazily
|
||||
yield what would've been in the tuple. The
|
||||
``@lazy_lexicographic_ordering`` decorator handles the details of
|
||||
implementing comparison operators, and the ``Widget`` implementor
|
||||
only has to worry about writing ``_cmp_iter``, and making sure the
|
||||
elements in it are also comparable.
|
||||
|
||||
Some things to note:
|
||||
|
||||
* If a class already has ``__eq__``, ``__ne__``, ``__lt__``,
|
||||
``__le__``, ``__gt__``, ``__ge__``, or ``__hash__`` defined, this
|
||||
decorator will overwrite them.
|
||||
|
||||
* If ``set_hash`` is ``False``, this will not overwrite
|
||||
``__hash__``.
|
||||
|
||||
* This class uses Python 2 None-comparison semantics. If you yield
|
||||
None and it is compared to a non-None type, None will always be
|
||||
less than the other object.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class does not have a ``_cmp_iter`` method
|
||||
|
||||
"""
|
||||
if not has_method(cls, "_cmp_iter"):
|
||||
raise TypeError("'%s' doesn't define _cmp_iter()." % cls.__name__)
|
||||
|
||||
# comparison operators are implemented in terms of lazy_eq and lazy_lt
|
||||
def eq(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is not None) and lazy_eq(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def lt(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is not None) and lazy_lt(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def ne(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is None) or not lazy_eq(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def gt(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is None) or lazy_lt(other._cmp_iter, self._cmp_iter)
|
||||
|
||||
def le(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is not None) and not lazy_lt(other._cmp_iter,
|
||||
self._cmp_iter)
|
||||
|
||||
def ge(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is None) or not lazy_lt(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def h(self):
|
||||
return hash(tuplify(self._cmp_iter))
|
||||
|
||||
def add_func_to_class(name, func):
|
||||
"""Add a function to a class with a particular name."""
|
||||
func.__name__ = name
|
||||
setattr(cls, name, func)
|
||||
|
||||
add_func_to_class("__eq__", eq)
|
||||
add_func_to_class("__ne__", ne)
|
||||
add_func_to_class("__lt__", lt)
|
||||
add_func_to_class("__le__", le)
|
||||
add_func_to_class("__gt__", gt)
|
||||
add_func_to_class("__ge__", ge)
|
||||
if set_hash:
|
||||
add_func_to_class("__hash__", h)
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
@lazy_lexicographic_ordering
|
||||
@key_ordering
|
||||
class HashableMap(MutableMapping):
|
||||
"""This is a hashable, comparable dictionary. Hash is performed on
|
||||
a tuple of the values in the dictionary."""
|
||||
@@ -494,9 +291,8 @@ def __len__(self):
|
||||
def __delitem__(self, key):
|
||||
del self.dict[key]
|
||||
|
||||
def _cmp_iter(self):
|
||||
for _, v in sorted(self.items()):
|
||||
yield v
|
||||
def _cmp_key(self):
|
||||
return tuple(sorted(self.values()))
|
||||
|
||||
def copy(self):
|
||||
"""Type-agnostic clone method. Preserves subclass type."""
|
||||
@@ -596,8 +392,8 @@ def pretty_date(time, now=None):
|
||||
"""Convert a datetime or timestamp to a pretty, relative date.
|
||||
|
||||
Args:
|
||||
time (datetime.datetime or int): date to print prettily
|
||||
now (datetime.datetime): datetime for 'now', i.e. the date the pretty date
|
||||
time (datetime or int): date to print prettily
|
||||
now (datetime): dateimte for 'now', i.e. the date the pretty date
|
||||
is relative to (default is datetime.now())
|
||||
|
||||
Returns:
|
||||
@@ -671,7 +467,7 @@ def pretty_string_to_date(date_str, now=None):
|
||||
or be a *pretty date* (like ``yesterday`` or ``two months ago``)
|
||||
|
||||
Returns:
|
||||
(datetime.datetime): datetime object corresponding to ``date_str``
|
||||
(datetime): datetime object corresponding to ``date_str``
|
||||
"""
|
||||
|
||||
pattern = {}
|
||||
@@ -828,9 +624,6 @@ def __repr__(self):
|
||||
def load_module_from_file(module_name, module_path):
|
||||
"""Loads a python module from the path of the corresponding file.
|
||||
|
||||
If the module is already in ``sys.modules`` it will be returned as
|
||||
is and not reloaded.
|
||||
|
||||
Args:
|
||||
module_name (str): namespace where the python module will be loaded,
|
||||
e.g. ``foo.bar``
|
||||
@@ -843,28 +636,12 @@ def load_module_from_file(module_name, module_path):
|
||||
ImportError: when the module can't be loaded
|
||||
FileNotFoundError: when module_path doesn't exist
|
||||
"""
|
||||
if module_name in sys.modules:
|
||||
return sys.modules[module_name]
|
||||
|
||||
# This recipe is adapted from https://stackoverflow.com/a/67692/771663
|
||||
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location( # novm
|
||||
module_name, module_path)
|
||||
module = importlib.util.module_from_spec(spec) # novm
|
||||
# The module object needs to exist in sys.modules before the
|
||||
# loader executes the module code.
|
||||
#
|
||||
# See https://docs.python.org/3/reference/import.html#loading
|
||||
sys.modules[spec.name] = module
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
except BaseException:
|
||||
try:
|
||||
del sys.modules[spec.name]
|
||||
except KeyError:
|
||||
pass
|
||||
raise
|
||||
spec.loader.exec_module(module)
|
||||
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
|
||||
import importlib.machinery
|
||||
loader = importlib.machinery.SourceFileLoader( # novm
|
||||
@@ -915,19 +692,3 @@ class Devnull(object):
|
||||
"""
|
||||
def write(self, *_):
|
||||
pass
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
elide_list([1,2,3,4,5,6], 4)
|
||||
|
||||
gives::
|
||||
|
||||
[1, 2, 3, '...', 6]
|
||||
"""
|
||||
if len(line_list) > max_num:
|
||||
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
|
||||
else:
|
||||
return line_list
|
||||
|
||||
@@ -7,12 +7,12 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import filecmp
|
||||
import os
|
||||
import shutil
|
||||
import filecmp
|
||||
|
||||
from llnl.util.filesystem import traverse_tree, mkdirp, touch
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp, touch, traverse_tree
|
||||
|
||||
__all__ = ['LinkTree']
|
||||
|
||||
|
||||
@@ -3,31 +3,20 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import socket
|
||||
import fcntl
|
||||
import errno
|
||||
import time
|
||||
import socket
|
||||
from datetime import datetime
|
||||
from typing import Dict, Tuple # novm
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.util.string
|
||||
|
||||
__all__ = [
|
||||
'Lock',
|
||||
'LockDowngradeError',
|
||||
'LockUpgradeError',
|
||||
'LockTransaction',
|
||||
'WriteTransaction',
|
||||
'ReadTransaction',
|
||||
'LockError',
|
||||
'LockTimeoutError',
|
||||
'LockPermissionError',
|
||||
'LockROFileError',
|
||||
'CantCreateLockError'
|
||||
]
|
||||
|
||||
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
|
||||
'LockError', 'LockTimeoutError',
|
||||
'LockPermissionError', 'LockROFileError', 'CantCreateLockError']
|
||||
|
||||
#: Mapping of supported locks to description
|
||||
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
|
||||
@@ -37,126 +26,6 @@
|
||||
true_fn = lambda: True
|
||||
|
||||
|
||||
class OpenFile(object):
|
||||
"""Record for keeping track of open lockfiles (with reference counting).
|
||||
|
||||
There's really only one ``OpenFile`` per inode, per process, but we record the
|
||||
filehandle here as it's the thing we end up using in python code. You can get
|
||||
the file descriptor from the file handle if needed -- or we could make this track
|
||||
file descriptors as well in the future.
|
||||
"""
|
||||
def __init__(self, fh):
|
||||
self.fh = fh
|
||||
self.refs = 0
|
||||
|
||||
|
||||
class OpenFileTracker(object):
|
||||
"""Track open lockfiles, to minimize number of open file descriptors.
|
||||
|
||||
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
|
||||
This is convenient, because if a process exits, it releases its locks.
|
||||
Unfortunately, this also means that if you close a file, *all* locks associated
|
||||
with that file's inode are released, regardless of whether the process has any
|
||||
other open file descriptors on it.
|
||||
|
||||
Because of this, we need to track open lock files so that we only close them when
|
||||
a process no longer needs them. We do this by tracking each lockfile by its
|
||||
inode and process id. This has several nice properties:
|
||||
|
||||
1. Tracking by pid ensures that, if we fork, we don't inadvertently track the parent
|
||||
process's lockfiles. ``fcntl`` locks are not inherited across forks, so we'll
|
||||
just track new lockfiles in the child.
|
||||
2. Tracking by inode ensures that referencs are counted per inode, and that we don't
|
||||
inadvertently close a file whose inode still has open locks.
|
||||
3. Tracking by both pid and inode ensures that we only open lockfiles the minimum
|
||||
number of times necessary for the locks we have.
|
||||
|
||||
Note: as mentioned elsewhere, these locks aren't thread safe -- they're designed to
|
||||
work in Python and assume the GIL.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new ``OpenFileTracker``."""
|
||||
self._descriptors = {} # type: Dict[Tuple[int, int], OpenFile]
|
||||
|
||||
def get_fh(self, path):
|
||||
"""Get a filehandle for a lockfile.
|
||||
|
||||
This routine will open writable files for read/write even if you're asking
|
||||
for a shared (read-only) lock. This is so that we can upgrade to an exclusive
|
||||
(write) lock later if requested.
|
||||
|
||||
Arguments:
|
||||
path (str): path to lock file we want a filehandle for
|
||||
"""
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
|
||||
pid = os.getpid()
|
||||
open_file = None # OpenFile object, if there is one
|
||||
stat = None # stat result for the lockfile, if it exists
|
||||
|
||||
try:
|
||||
# see whether we've seen this inode/pid before
|
||||
stat = os.stat(path)
|
||||
key = (stat.st_ino, pid)
|
||||
open_file = self._descriptors.get(key)
|
||||
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
|
||||
# path does not exist -- fail if we won't be able to create it
|
||||
parent = os.path.dirname(path) or '.'
|
||||
if not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(path)
|
||||
|
||||
# if there was no already open file, we'll need to open one
|
||||
if not open_file:
|
||||
if stat and not os.access(path, os.W_OK):
|
||||
# we know path exists but not if it's writable. If it's read-only,
|
||||
# only open the file for reading (and fail if we're trying to get
|
||||
# an exclusive (write) lock on it)
|
||||
os_mode, fh_mode = os.O_RDONLY, 'r'
|
||||
|
||||
fd = os.open(path, os_mode)
|
||||
fh = os.fdopen(fd, fh_mode)
|
||||
open_file = OpenFile(fh)
|
||||
|
||||
# if we just created the file, we'll need to get its inode here
|
||||
if not stat:
|
||||
inode = os.fstat(fd).st_ino
|
||||
key = (inode, pid)
|
||||
|
||||
self._descriptors[key] = open_file
|
||||
|
||||
open_file.refs += 1
|
||||
return open_file.fh
|
||||
|
||||
def release_fh(self, path):
|
||||
"""Release a filehandle, only closing it if there are no more references."""
|
||||
try:
|
||||
inode = os.stat(path).st_ino
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
inode = None # this will not be in self._descriptors
|
||||
|
||||
key = (inode, os.getpid())
|
||||
open_file = self._descriptors.get(key)
|
||||
assert open_file, "Attempted to close non-existing lock path: %s" % path
|
||||
|
||||
open_file.refs -= 1
|
||||
if not open_file.refs:
|
||||
del self._descriptors[key]
|
||||
open_file.fh.close()
|
||||
|
||||
|
||||
#: Open file descriptors for locks in this process. Used to prevent one process
|
||||
#: from opening the sam file many times for different byte range locks
|
||||
file_tracker = OpenFileTracker()
|
||||
|
||||
|
||||
def _attempts_str(wait_time, nattempts):
|
||||
# Don't print anything if we succeeded on the first try
|
||||
if nattempts <= 1:
|
||||
@@ -177,8 +46,7 @@ class Lock(object):
|
||||
Note that this is for managing contention over resources *between*
|
||||
processes and not for managing contention between threads in a process: the
|
||||
functions of this object are not thread-safe. A process also must not
|
||||
maintain multiple locks on the same file (or, more specifically, on
|
||||
overlapping byte ranges in the same file).
|
||||
maintain multiple locks on the same file.
|
||||
"""
|
||||
|
||||
def __init__(self, path, start=0, length=0, default_timeout=None,
|
||||
@@ -283,10 +151,25 @@ def _lock(self, op, timeout=None):
|
||||
|
||||
# Create file and parent directories if they don't exist.
|
||||
if self._file is None:
|
||||
self._ensure_parent_directory()
|
||||
self._file = file_tracker.get_fh(self.path)
|
||||
parent = self._ensure_parent_directory()
|
||||
|
||||
if op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
if os.path.exists(self.path):
|
||||
if not os.access(self.path, os.W_OK):
|
||||
if op == fcntl.LOCK_SH:
|
||||
# can still lock read-only files if we open 'r'
|
||||
os_mode, fd_mode = os.O_RDONLY, 'r'
|
||||
else:
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
elif not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(self.path)
|
||||
|
||||
fd = os.open(self.path, os_mode)
|
||||
self._file = os.fdopen(fd, fd_mode)
|
||||
|
||||
elif op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
# Attempt to upgrade to write lock w/a read-only file.
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
@@ -381,7 +264,7 @@ def _write_log_debug_data(self):
|
||||
self.old_host = self.host
|
||||
|
||||
self.pid = os.getpid()
|
||||
self.host = socket.gethostname()
|
||||
self.host = socket.getfqdn()
|
||||
|
||||
# write pid, host to disk to sync over FS
|
||||
self._file.seek(0)
|
||||
@@ -399,8 +282,7 @@ def _unlock(self):
|
||||
"""
|
||||
fcntl.lockf(self._file, fcntl.LOCK_UN,
|
||||
self._length, self._start, os.SEEK_SET)
|
||||
|
||||
file_tracker.release_fh(self.path)
|
||||
self._file.close()
|
||||
self._file = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
@@ -519,7 +401,7 @@ def release_read(self, release_fn=None):
|
||||
"""Releases a read lock.
|
||||
|
||||
Arguments:
|
||||
release_fn (typing.Callable): function to call *before* the last recursive
|
||||
release_fn (callable): function to call *before* the last recursive
|
||||
lock (read or write) is released.
|
||||
|
||||
If the last recursive lock will be released, then this will call
|
||||
@@ -555,7 +437,7 @@ def release_write(self, release_fn=None):
|
||||
"""Releases a write lock.
|
||||
|
||||
Arguments:
|
||||
release_fn (typing.Callable): function to call before the last recursive
|
||||
release_fn (callable): function to call before the last recursive
|
||||
write is released.
|
||||
|
||||
If the last recursive *write* lock will be released, then this
|
||||
@@ -651,10 +533,10 @@ class LockTransaction(object):
|
||||
Arguments:
|
||||
lock (Lock): underlying lock for this transaction to be accquired on
|
||||
enter and released on exit
|
||||
acquire (typing.Callable or contextlib.contextmanager): function to be called
|
||||
after lock is acquired, or contextmanager to enter after acquire and leave
|
||||
acquire (callable or contextmanager): function to be called after lock
|
||||
is acquired, or contextmanager to enter after acquire and leave
|
||||
before release.
|
||||
release (typing.Callable): function to be called before release. If
|
||||
release (callable): function to be called before release. If
|
||||
``acquire`` is a contextmanager, this will be called *after*
|
||||
exiting the nexted context and before the lock is released.
|
||||
timeout (float): number of seconds to set for the timeout when
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import contextlib
|
||||
import fcntl
|
||||
import os
|
||||
import struct
|
||||
@@ -13,13 +12,12 @@
|
||||
import termios
|
||||
import textwrap
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
|
||||
import six
|
||||
from datetime import datetime
|
||||
from six import StringIO
|
||||
from six.moves import input
|
||||
|
||||
from llnl.util.tty.color import cescape, clen, cprint, cwrite
|
||||
from llnl.util.tty.color import cprint, cwrite, cescape, clen
|
||||
|
||||
# Globals
|
||||
_debug = 0
|
||||
@@ -29,7 +27,6 @@
|
||||
_msg_enabled = True
|
||||
_warn_enabled = True
|
||||
_error_enabled = True
|
||||
_output_filter = lambda s: s
|
||||
indent = " "
|
||||
|
||||
|
||||
@@ -92,18 +89,6 @@ def error_enabled():
|
||||
return _error_enabled
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def output_filter(filter_fn):
|
||||
"""Context manager that applies a filter to all output."""
|
||||
global _output_filter
|
||||
saved_filter = _output_filter
|
||||
try:
|
||||
_output_filter = filter_fn
|
||||
yield
|
||||
finally:
|
||||
_output_filter = saved_filter
|
||||
|
||||
|
||||
class SuppressOutput:
|
||||
"""Class for disabling output in a scope using 'with' keyword"""
|
||||
|
||||
@@ -180,23 +165,13 @@ def msg(message, *args, **kwargs):
|
||||
if _stacktrace:
|
||||
st_text = process_stacktrace(2)
|
||||
if newline:
|
||||
cprint(
|
||||
"@*b{%s==>} %s%s" % (
|
||||
st_text,
|
||||
get_timestamp(),
|
||||
cescape(_output_filter(message))
|
||||
)
|
||||
)
|
||||
cprint("@*b{%s==>} %s%s" % (
|
||||
st_text, get_timestamp(), cescape(message)))
|
||||
else:
|
||||
cwrite(
|
||||
"@*b{%s==>} %s%s" % (
|
||||
st_text,
|
||||
get_timestamp(),
|
||||
cescape(_output_filter(message))
|
||||
)
|
||||
)
|
||||
cwrite("@*b{%s==>} %s%s" % (
|
||||
st_text, get_timestamp(), cescape(message)))
|
||||
for arg in args:
|
||||
print(indent + _output_filter(six.text_type(arg)))
|
||||
print(indent + six.text_type(arg))
|
||||
|
||||
|
||||
def info(message, *args, **kwargs):
|
||||
@@ -212,29 +187,18 @@ def info(message, *args, **kwargs):
|
||||
st_text = ""
|
||||
if _stacktrace:
|
||||
st_text = process_stacktrace(st_countback)
|
||||
cprint(
|
||||
"@%s{%s==>} %s%s" % (
|
||||
format,
|
||||
st_text,
|
||||
get_timestamp(),
|
||||
cescape(_output_filter(six.text_type(message)))
|
||||
),
|
||||
stream=stream
|
||||
)
|
||||
cprint("@%s{%s==>} %s%s" % (
|
||||
format, st_text, get_timestamp(), cescape(six.text_type(message))
|
||||
), stream=stream)
|
||||
for arg in args:
|
||||
if wrap:
|
||||
lines = textwrap.wrap(
|
||||
_output_filter(six.text_type(arg)),
|
||||
initial_indent=indent,
|
||||
subsequent_indent=indent,
|
||||
break_long_words=break_long_words
|
||||
)
|
||||
six.text_type(arg), initial_indent=indent,
|
||||
subsequent_indent=indent, break_long_words=break_long_words)
|
||||
for line in lines:
|
||||
stream.write(line + '\n')
|
||||
else:
|
||||
stream.write(
|
||||
indent + _output_filter(six.text_type(arg)) + '\n'
|
||||
)
|
||||
stream.write(indent + six.text_type(arg) + '\n')
|
||||
|
||||
|
||||
def verbose(message, *args, **kwargs):
|
||||
|
||||
@@ -10,11 +10,10 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from six import StringIO, text_type
|
||||
|
||||
from llnl.util.tty import terminal_size
|
||||
from llnl.util.tty.color import cextra, clen
|
||||
from llnl.util.tty.color import clen, cextra
|
||||
|
||||
|
||||
class ColumnConfig:
|
||||
@@ -109,17 +108,19 @@ def colify(elts, **options):
|
||||
using ``str()``.
|
||||
|
||||
Keyword Arguments:
|
||||
output (typing.IO): A file object to write to. Default is ``sys.stdout``
|
||||
indent (int): Optionally indent all columns by some number of spaces
|
||||
padding (int): Spaces between columns. Default is 2
|
||||
width (int): Width of the output. Default is 80 if tty not detected
|
||||
cols (int): Force number of columns. Default is to size to terminal, or
|
||||
single-column if no tty
|
||||
tty (bool): Whether to attempt to write to a tty. Default is to autodetect a
|
||||
tty. Set to False to force single-column output
|
||||
method (str): Method to use to fit columns. Options are variable or uniform.
|
||||
Variable-width columns are tighter, uniform columns are all the same width
|
||||
and fit less data on the screen
|
||||
output (stream): A file object to write to. Default is ``sys.stdout``
|
||||
indent (int): Optionally indent all columns by some number of spaces
|
||||
padding (int): Spaces between columns. Default is 2
|
||||
width (int): Width of the output. Default is 80 if tty not detected
|
||||
cols (int): Force number of columns. Default is to size to
|
||||
terminal, or single-column if no tty
|
||||
tty (bool): Whether to attempt to write to a tty. Default is to
|
||||
autodetect a tty. Set to False to force single-column
|
||||
output
|
||||
method (str): Method to use to fit columns. Options are variable or
|
||||
uniform. Variable-width columns are tighter, uniform
|
||||
columns are all the same width and fit less data on
|
||||
the screen
|
||||
"""
|
||||
# Get keyword arguments or set defaults
|
||||
cols = options.pop("cols", 0)
|
||||
|
||||
@@ -60,9 +60,9 @@
|
||||
To output an @, use '@@'. To output a } inside braces, use '}}'.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import six
|
||||
|
||||
@@ -13,14 +13,15 @@
|
||||
import os
|
||||
import re
|
||||
import select
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
import signal
|
||||
from contextlib import contextmanager
|
||||
from types import ModuleType # novm
|
||||
from typing import Optional # novm
|
||||
from six import string_types
|
||||
from six import StringIO
|
||||
|
||||
from six import StringIO, string_types
|
||||
from typing import Optional # novm
|
||||
from types import ModuleType # novm
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -320,10 +321,7 @@ def __init__(self, file_like):
|
||||
def unwrap(self):
|
||||
if self.open:
|
||||
if self.file_like:
|
||||
if sys.version_info < (3,):
|
||||
self.file = open(self.file_like, 'w')
|
||||
else:
|
||||
self.file = open(self.file_like, 'w', encoding='utf-8')
|
||||
self.file = open(self.file_like, 'w')
|
||||
else:
|
||||
self.file = StringIO()
|
||||
return self.file
|
||||
@@ -436,7 +434,7 @@ class log_output(object):
|
||||
"""
|
||||
|
||||
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
|
||||
env=None, filter_fn=None):
|
||||
env=None):
|
||||
"""Create a new output log context manager.
|
||||
|
||||
Args:
|
||||
@@ -446,8 +444,6 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
|
||||
debug (int): positive to enable tty debug mode during logging
|
||||
buffer (bool): pass buffer=True to skip unbuffering output; note
|
||||
this doesn't set up any *new* buffering
|
||||
filter_fn (callable, optional): Callable[str] -> str to filter each
|
||||
line of output
|
||||
|
||||
log_output can take either a file object or a filename. If a
|
||||
filename is passed, the file will be opened and closed entirely
|
||||
@@ -467,7 +463,6 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
|
||||
self.debug = debug
|
||||
self.buffer = buffer
|
||||
self.env = env # the environment to use for _writer_daemon
|
||||
self.filter_fn = filter_fn
|
||||
|
||||
self._active = False # used to prevent re-entry
|
||||
|
||||
@@ -533,22 +528,20 @@ def __enter__(self):
|
||||
# Sets a daemon that writes to file what it reads from a pipe
|
||||
try:
|
||||
# need to pass this b/c multiprocessing closes stdin in child.
|
||||
input_multiprocess_fd = None
|
||||
try:
|
||||
if sys.stdin.isatty():
|
||||
input_multiprocess_fd = MultiProcessFd(
|
||||
os.dup(sys.stdin.fileno())
|
||||
)
|
||||
input_multiprocess_fd = MultiProcessFd(
|
||||
os.dup(sys.stdin.fileno())
|
||||
)
|
||||
except BaseException:
|
||||
# just don't forward input if this fails
|
||||
pass
|
||||
input_multiprocess_fd = None
|
||||
|
||||
with replace_environment(self.env):
|
||||
self.process = multiprocessing.Process(
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_multiprocess_fd, read_multiprocess_fd, write_fd,
|
||||
self.echo, self.log_file, child_pipe, self.filter_fn
|
||||
self.echo, self.log_file, child_pipe
|
||||
)
|
||||
)
|
||||
self.process.daemon = True # must set before start()
|
||||
@@ -672,7 +665,7 @@ def force_echo(self):
|
||||
|
||||
|
||||
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
log_file_wrapper, control_pipe, filter_fn):
|
||||
log_file_wrapper, control_pipe):
|
||||
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
|
||||
|
||||
The daemon receives output from the parent process and writes it both
|
||||
@@ -717,7 +710,6 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
log_file_wrapper (FileWrapper): file to log all output
|
||||
control_pipe (Pipe): multiprocessing pipe on which to send control
|
||||
information to the parent
|
||||
filter_fn (callable, optional): function to filter each line of output
|
||||
|
||||
"""
|
||||
# If this process was forked, then it will inherit file descriptors from
|
||||
@@ -730,11 +722,7 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
if sys.version_info < (3,):
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
|
||||
else:
|
||||
# Python 3.x before 3.7 does not open with UTF-8 encoding by default
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
|
||||
|
||||
if stdin_multiprocess_fd:
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd)
|
||||
@@ -776,42 +764,28 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
raise
|
||||
|
||||
if in_pipe in rlist:
|
||||
line_count = 0
|
||||
try:
|
||||
while line_count < 100:
|
||||
# Handle output from the calling process.
|
||||
line = _retry(in_pipe.readline)()
|
||||
if not line:
|
||||
return
|
||||
line_count += 1
|
||||
# Handle output from the calling process.
|
||||
line = _retry(in_pipe.readline)()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# find control characters and strip them.
|
||||
clean_line, num_controls = control.subn('', line)
|
||||
# find control characters and strip them.
|
||||
controls = control.findall(line)
|
||||
line = control.sub('', line)
|
||||
|
||||
# Echo to stdout if requested or forced.
|
||||
if echo or force_echo:
|
||||
output_line = clean_line
|
||||
if filter_fn:
|
||||
output_line = filter_fn(clean_line)
|
||||
sys.stdout.write(output_line)
|
||||
# Echo to stdout if requested or forced.
|
||||
if echo or force_echo:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(clean_line))
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(line))
|
||||
log_file.flush()
|
||||
|
||||
if num_controls > 0:
|
||||
controls = control.findall(line)
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
if not _input_available(in_pipe):
|
||||
break
|
||||
finally:
|
||||
if line_count > 0:
|
||||
if echo or force_echo:
|
||||
sys.stdout.flush()
|
||||
log_file.flush()
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
except BaseException:
|
||||
tty.error("Exception occurred in writer daemon!")
|
||||
@@ -863,7 +837,3 @@ def wrapped(*args, **kwargs):
|
||||
continue
|
||||
raise
|
||||
return wrapped
|
||||
|
||||
|
||||
def _input_available(f):
|
||||
return f in select.select([f], [], [], 0)[0]
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import multiprocessing
|
||||
import re
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
#: major, minor, patch version for Spack, in a tuple
|
||||
spack_version_info = (0, 16, 2)
|
||||
spack_version_info = (0, 16, 1)
|
||||
|
||||
#: String containing Spack version joined with .'s
|
||||
spack_version = '.'.join(str(v) for v in spack_version_info)
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
from llnl.util.lang import memoized
|
||||
|
||||
import spack.spec
|
||||
from spack.compilers.clang import Clang
|
||||
from spack.spec import CompilerSpec
|
||||
from spack.util.executable import Executable, ProcessError
|
||||
from spack.compilers.clang import Clang
|
||||
|
||||
|
||||
class ABI(object):
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""This package contains code for creating analyzers to extract Application
|
||||
Binary Interface (ABI) information, along with simple analyses that just load
|
||||
existing metadata.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.paths
|
||||
import spack.util.classes
|
||||
|
||||
mod_path = spack.paths.analyzers_path
|
||||
analyzers = spack.util.classes.list_classes("spack.analyzers", mod_path)
|
||||
|
||||
# The base analyzer does not have a name, and cannot do dict comprehension
|
||||
analyzer_types = {}
|
||||
for a in analyzers:
|
||||
if not hasattr(a, "name"):
|
||||
continue
|
||||
analyzer_types[a.name] = a
|
||||
|
||||
|
||||
def list_all():
|
||||
"""A helper function to list all analyzers and their descriptions
|
||||
"""
|
||||
for name, analyzer in analyzer_types.items():
|
||||
print("%-25s: %-35s" % (name, analyzer.description))
|
||||
|
||||
|
||||
def get_analyzer(name):
|
||||
"""Courtesy function to retrieve an analyzer, and exit on error if it
|
||||
does not exist.
|
||||
"""
|
||||
if name in analyzer_types:
|
||||
return analyzer_types[name]
|
||||
tty.die("Analyzer %s does not exist" % name)
|
||||
@@ -1,116 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""An analyzer base provides basic functions to run the analysis, save results,
|
||||
and (optionally) interact with a Spack Monitor
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.config
|
||||
import spack.hooks
|
||||
import spack.monitor
|
||||
import spack.util.path
|
||||
|
||||
|
||||
def get_analyzer_dir(spec, analyzer_dir=None):
|
||||
"""
|
||||
Given a spec, return the directory to save analyzer results.
|
||||
|
||||
We create the directory if it does not exist. We also check that the
|
||||
spec has an associated package. An analyzer cannot be run if the spec isn't
|
||||
associated with a package. If the user provides a custom analyzer_dir,
|
||||
we use it over checking the config and the default at ~/.spack/analyzers
|
||||
"""
|
||||
# An analyzer cannot be run if the spec isn't associated with a package
|
||||
if not hasattr(spec, "package") or not spec.package:
|
||||
tty.die("A spec can only be analyzed with an associated package.")
|
||||
|
||||
# The top level directory is in the user home, or a custom location
|
||||
if not analyzer_dir:
|
||||
analyzer_dir = spack.util.path.canonicalize_path(
|
||||
spack.config.get('config:analyzers_dir', '~/.spack/analyzers'))
|
||||
|
||||
# We follow the same convention as the spec install (this could be better)
|
||||
package_prefix = os.sep.join(spec.package.prefix.split('/')[-3:])
|
||||
meta_dir = os.path.join(analyzer_dir, package_prefix)
|
||||
return meta_dir
|
||||
|
||||
|
||||
class AnalyzerBase(object):
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
"""
|
||||
Verify that the analyzer has correct metadata.
|
||||
|
||||
An Analyzer is intended to run on one spec install, so the spec
|
||||
with its associated package is required on init. The child analyzer
|
||||
class should define an init function that super's the init here, and
|
||||
also check that the analyzer has all dependencies that it
|
||||
needs. If an analyzer subclass does not have dependencies, it does not
|
||||
need to define an init. An Analyzer should not be allowed to proceed
|
||||
if one or more dependencies are missing. The dirname, if defined,
|
||||
is an optional directory name to save to (instead of the default meta
|
||||
spack directory).
|
||||
"""
|
||||
self.spec = spec
|
||||
self.dirname = dirname
|
||||
self.meta_dir = os.path.dirname(spec.package.install_log_path)
|
||||
|
||||
for required in ["name", "outfile", "description"]:
|
||||
if not hasattr(self, required):
|
||||
tty.die("Please add a %s attribute on the analyzer." % required)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Given a spec with an installed package, run the analyzer on it.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def output_dir(self):
|
||||
"""
|
||||
The full path to the output directory.
|
||||
|
||||
This includes the nested analyzer directory structure. This function
|
||||
does not create anything.
|
||||
"""
|
||||
if not hasattr(self, "_output_dir"):
|
||||
output_dir = get_analyzer_dir(self.spec, self.dirname)
|
||||
self._output_dir = os.path.join(output_dir, self.name)
|
||||
|
||||
return self._output_dir
|
||||
|
||||
def save_result(self, result, overwrite=False):
|
||||
"""
|
||||
Save a result to the associated spack monitor, if defined.
|
||||
|
||||
This function is on the level of the analyzer because it might be
|
||||
the case that the result is large (appropriate for a single request)
|
||||
or that the data is organized differently (e.g., more than one
|
||||
request per result). If an analyzer subclass needs to over-write
|
||||
this function with a custom save, that is appropriate to do (see abi).
|
||||
"""
|
||||
# We maintain the structure in json with the analyzer as key so
|
||||
# that in the future, we could upload to a monitor server
|
||||
if result[self.name]:
|
||||
|
||||
outfile = os.path.join(self.output_dir, self.outfile)
|
||||
|
||||
# Only try to create the results directory if we have a result
|
||||
if not os.path.exists(self._output_dir):
|
||||
os.makedirs(self._output_dir)
|
||||
|
||||
# Don't overwrite an existing result if overwrite is False
|
||||
if os.path.exists(outfile) and not overwrite:
|
||||
tty.info("%s exists and overwrite is False, skipping." % outfile)
|
||||
else:
|
||||
tty.info("Writing result to %s" % outfile)
|
||||
spack.monitor.write_json(result[self.name], outfile)
|
||||
|
||||
# This hook runs after a save result
|
||||
spack.hooks.on_analyzer_save(self.spec.package, result)
|
||||
@@ -1,33 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""A configargs analyzer is a class of analyzer that typically just uploads
|
||||
already existing metadata about config args from a package spec install
|
||||
directory."""
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import spack.monitor
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
|
||||
class ConfigArgs(AnalyzerBase):
|
||||
|
||||
name = "config_args"
|
||||
outfile = "spack-analyzer-config-args.json"
|
||||
description = "config args loaded from spack-configure-args.txt"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load the configure-args.txt and save in json.
|
||||
|
||||
The run function will find the spack-config-args.txt file in the
|
||||
package install directory, and read it into a json structure that has
|
||||
the name of the analyzer as the key.
|
||||
"""
|
||||
config_file = os.path.join(self.meta_dir, "spack-configure-args.txt")
|
||||
return {self.name: spack.monitor.read_file(config_file)}
|
||||
@@ -1,51 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""An environment analyzer will read and parse the environment variables
|
||||
file in the installed package directory, generating a json file that has
|
||||
an index of key, value pairs for environment variables."""
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
|
||||
class EnvironmentVariables(AnalyzerBase):
|
||||
|
||||
name = "environment_variables"
|
||||
outfile = "spack-analyzer-environment-variables.json"
|
||||
description = "environment variables parsed from spack-build-env.txt"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load, parse, and save spack-build-env.txt to analyzers.
|
||||
|
||||
Read in the spack-build-env.txt file from the package install
|
||||
directory and parse the environment variables into key value pairs.
|
||||
The result should have the key for the analyzer, the name.
|
||||
"""
|
||||
env_file = os.path.join(self.meta_dir, "spack-build-env.txt")
|
||||
return {self.name: self._read_environment_file(env_file)}
|
||||
|
||||
def _read_environment_file(self, filename):
|
||||
"""
|
||||
Read and parse the environment file.
|
||||
|
||||
Given an environment file, we want to read it, split by semicolons
|
||||
and new lines, and then parse down to the subset of SPACK_* variables.
|
||||
We assume that all spack prefix variables are not secrets, and unlike
|
||||
the install_manifest.json, we don't (at least to start) parse the values
|
||||
to remove path prefixes specific to user systems.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
return
|
||||
|
||||
mods = EnvironmentModifications.from_sourcing_file(filename)
|
||||
env = {}
|
||||
mods.apply_modifications(env)
|
||||
return env
|
||||
@@ -1,31 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""The install files json file (install_manifest.json) already exists in
|
||||
the package install folder, so this analyzer simply moves it to the user
|
||||
analyzer folder for further processing."""
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import spack.monitor
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
|
||||
class InstallFiles(AnalyzerBase):
|
||||
|
||||
name = "install_files"
|
||||
outfile = "spack-analyzer-install-files.json"
|
||||
description = "install file listing read from install_manifest.json"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load in the install_manifest.json and save to analyzers.
|
||||
|
||||
We write it out to the analyzers folder, with key as the analyzer name.
|
||||
"""
|
||||
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
|
||||
return {self.name: spack.monitor.read_json(manifest_file)}
|
||||
@@ -1,116 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack
|
||||
import spack.binary_distribution
|
||||
import spack.bootstrap
|
||||
import spack.error
|
||||
import spack.hooks
|
||||
import spack.monitor
|
||||
import spack.package
|
||||
import spack.repo
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
|
||||
class Libabigail(AnalyzerBase):
|
||||
|
||||
name = "libabigail"
|
||||
outfile = "spack-analyzer-libabigail.json"
|
||||
description = "Application Binary Interface (ABI) features for objects"
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
"""
|
||||
init for an analyzer ensures we have all needed dependencies.
|
||||
|
||||
For the libabigail analyzer, this means Libabigail.
|
||||
Since the output for libabigail is one file per object, we communicate
|
||||
with the monitor multiple times.
|
||||
"""
|
||||
super(Libabigail, self).__init__(spec, dirname)
|
||||
|
||||
# This doesn't seem to work to import on the module level
|
||||
tty.debug("Preparing to use Libabigail, will install if missing.")
|
||||
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
|
||||
# libabigail won't install lib/bin/share without docs
|
||||
spec = spack.spec.Spec("libabigail+docs")
|
||||
spec.concretize()
|
||||
|
||||
self.abidw = spack.bootstrap.get_executable(
|
||||
"abidw", spec=spec, install=True)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Run libabigail, and save results to filename.
|
||||
|
||||
This run function differs in that we write as we generate and then
|
||||
return a dict with the analyzer name as the key, and the value of a
|
||||
dict of results, where the key is the object name, and the value is
|
||||
the output file written to.
|
||||
"""
|
||||
manifest = spack.binary_distribution.get_buildfile_manifest(self.spec)
|
||||
|
||||
# This result will store a path to each file
|
||||
result = {}
|
||||
|
||||
# Generate an output file for each binary or object
|
||||
for obj in manifest.get("binary_to_relocate_fullpath", []):
|
||||
|
||||
# We want to preserve the path in the install directory in case
|
||||
# a library has an equivalenly named lib or executable, for example
|
||||
outdir = os.path.dirname(obj.replace(self.spec.package.prefix,
|
||||
'').strip(os.path.sep))
|
||||
outfile = "spack-analyzer-libabigail-%s.xml" % os.path.basename(obj)
|
||||
outfile = os.path.join(self.output_dir, outdir, outfile)
|
||||
outdir = os.path.dirname(outfile)
|
||||
|
||||
# Create the output directory
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
|
||||
# Sometimes libabigail segfaults and dumps
|
||||
try:
|
||||
self.abidw(obj, "--out-file", outfile)
|
||||
result[obj] = outfile
|
||||
tty.info("Writing result to %s" % outfile)
|
||||
except spack.error.SpackError:
|
||||
tty.warn("Issue running abidw for %s" % obj)
|
||||
|
||||
return {self.name: result}
|
||||
|
||||
def save_result(self, result, overwrite=False):
|
||||
"""
|
||||
Read saved ABI results and upload to monitor server.
|
||||
|
||||
ABI results are saved to individual files, so each one needs to be
|
||||
read and uploaded. Result here should be the lookup generated in run(),
|
||||
the key is the analyzer name, and each value is the result file.
|
||||
We currently upload the entire xml as text because libabigail can't
|
||||
easily read gzipped xml, but this will be updated when it can.
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
name = self.spec.package.name
|
||||
|
||||
for obj, filename in result.get(self.name, {}).items():
|
||||
|
||||
# Don't include the prefix
|
||||
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
|
||||
|
||||
# We've already saved the results to file during run
|
||||
content = spack.monitor.read_file(filename)
|
||||
|
||||
# A result needs an analyzer, value or binary_value, and name
|
||||
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
|
||||
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
|
||||
spack.hooks.on_analyzer_save(self.spec.package, {"libabigail": [data]})
|
||||
@@ -2,24 +2,28 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Aggregate the target processor, the operating system and the target
|
||||
platform into an architecture object.
|
||||
|
||||
"""
|
||||
This module contains all the elements that are required to create an
|
||||
architecture object. These include, the target processor, the operating system,
|
||||
and the architecture platform (i.e. cray, darwin, linux, etc) classes.
|
||||
|
||||
On a multiple architecture machine, the architecture spec field can be set to
|
||||
build a package against any target and operating system that is present on the
|
||||
platform. On Cray platforms or any other architecture that has different front
|
||||
and back end environments, the operating system will determine the method of
|
||||
compiler detection.
|
||||
compiler
|
||||
detection.
|
||||
|
||||
There are two different types of compiler detection:
|
||||
|
||||
1. Through the $PATH env variable (front-end detection)
|
||||
2. Through the module system. (back-end detection)
|
||||
2. Through the tcl module system. (back-end detection)
|
||||
|
||||
Depending on which operating system is specified, the compiler will be detected
|
||||
using one of those methods.
|
||||
|
||||
For platforms such as linux and darwin, the operating system is autodetected.
|
||||
For platforms such as linux and darwin, the operating system is autodetected
|
||||
and the target is set to be x86_64.
|
||||
|
||||
The command line syntax for specifying an architecture is as follows:
|
||||
|
||||
@@ -29,8 +33,10 @@
|
||||
the command line and Spack will concretize using the default. These defaults
|
||||
are set in the 'platforms/' directory which contains the different subclasses
|
||||
for platforms. If the machine has multiple architectures, the user can
|
||||
also enter frontend, or fe or backend or be. These settings will concretize
|
||||
to their respective frontend and backend targets and operating systems.
|
||||
also enter front-end, or fe or back-end or be. These settings will concretize
|
||||
to their respective front-end and back-end targets and operating systems.
|
||||
Additional platforms can be added by creating a subclass of Platform
|
||||
and adding it inside the platform directory.
|
||||
|
||||
Platforms are an abstract class that are extended by subclasses. If the user
|
||||
wants to add a new type of platform (such as cray_xe), they can create a
|
||||
@@ -41,33 +47,333 @@
|
||||
new platform is added and the user wants that to be detected first.
|
||||
|
||||
Targets are created inside the platform subclasses. Most architecture
|
||||
(like linux, and darwin) will have only one target family (x86_64) but in the case of
|
||||
(like linux, and darwin) will have only one target (x86_64) but in the case of
|
||||
Cray machines, there is both a frontend and backend processor. The user can
|
||||
specify which targets are present on front-end and back-end architecture
|
||||
|
||||
Depending on the platform, operating systems are either autodetected or are
|
||||
set. The user can set the frontend and backend operating setting by the class
|
||||
Depending on the platform, operating systems are either auto-detected or are
|
||||
set. The user can set the front-end and back-end operating setting by the class
|
||||
attributes front_os and back_os. The operating system as described earlier,
|
||||
will be responsible for compiler detection.
|
||||
"""
|
||||
import contextlib
|
||||
import functools
|
||||
import inspect
|
||||
import warnings
|
||||
|
||||
import archspec.cpu
|
||||
import six
|
||||
|
||||
import llnl.util.lang as lang
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized, list_modules, key_ordering
|
||||
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.operating_systems
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.target
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.paths
|
||||
import spack.error as serr
|
||||
import spack.util.executable
|
||||
import spack.version
|
||||
from spack.util.naming import mod_to_class
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class NoPlatformError(serr.SpackError):
|
||||
def __init__(self):
|
||||
super(NoPlatformError, self).__init__(
|
||||
"Could not determine a platform for this machine.")
|
||||
|
||||
|
||||
def _ensure_other_is_target(method):
|
||||
"""Decorator to be used in dunder methods taking a single argument to
|
||||
ensure that the argument is an instance of ``Target`` too.
|
||||
"""
|
||||
@functools.wraps(method)
|
||||
def _impl(self, other):
|
||||
if isinstance(other, six.string_types):
|
||||
other = Target(other)
|
||||
|
||||
if not isinstance(other, Target):
|
||||
return NotImplemented
|
||||
|
||||
return method(self, other)
|
||||
|
||||
return _impl
|
||||
|
||||
|
||||
class Target(object):
|
||||
def __init__(self, name, module_name=None):
|
||||
"""Target models microarchitectures and their compatibility.
|
||||
|
||||
Args:
|
||||
name (str or Microarchitecture):micro-architecture of the
|
||||
target
|
||||
module_name (str): optional module name to get access to the
|
||||
current target. This is typically used on machines
|
||||
like Cray (e.g. craype-compiler)
|
||||
"""
|
||||
if not isinstance(name, archspec.cpu.Microarchitecture):
|
||||
name = archspec.cpu.TARGETS.get(
|
||||
name, archspec.cpu.generic_microarchitecture(name)
|
||||
)
|
||||
self.microarchitecture = name
|
||||
self.module_name = module_name
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.microarchitecture.name
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __eq__(self, other):
|
||||
return self.microarchitecture == other.microarchitecture and \
|
||||
self.module_name == other.module_name
|
||||
|
||||
def __ne__(self, other):
|
||||
# This method is necessary as long as we support Python 2. In Python 3
|
||||
# __ne__ defaults to the implementation below
|
||||
return not self == other
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __lt__(self, other):
|
||||
# TODO: In the future it would be convenient to say
|
||||
# TODO: `spec.architecture.target < other.architecture.target`
|
||||
# TODO: and change the semantic of the comparison operators
|
||||
|
||||
# This is needed to sort deterministically specs in a list.
|
||||
# It doesn't implement a total ordering semantic.
|
||||
return self.microarchitecture.name < other.microarchitecture.name
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self.module_name))
|
||||
|
||||
@staticmethod
|
||||
def from_dict_or_value(dict_or_value):
|
||||
# A string here represents a generic target (like x86_64 or ppc64) or
|
||||
# a custom micro-architecture
|
||||
if isinstance(dict_or_value, six.string_types):
|
||||
return Target(dict_or_value)
|
||||
|
||||
# TODO: From a dict we actually retrieve much more information than
|
||||
# TODO: just the name. We can use that information to reconstruct an
|
||||
# TODO: "old" micro-architecture or check the current definition.
|
||||
target_info = dict_or_value
|
||||
return Target(target_info['name'])
|
||||
|
||||
def to_dict_or_value(self):
|
||||
"""Returns a dict or a value representing the current target.
|
||||
|
||||
String values are used to keep backward compatibility with generic
|
||||
targets, like e.g. x86_64 or ppc64. More specific micro-architectures
|
||||
will return a dictionary which contains information on the name,
|
||||
features, vendor, generation and parents of the current target.
|
||||
"""
|
||||
# Generic targets represent either an architecture
|
||||
# family (like x86_64) or a custom micro-architecture
|
||||
if self.microarchitecture.vendor == 'generic':
|
||||
return str(self)
|
||||
|
||||
return syaml_dict(
|
||||
self.microarchitecture.to_dict(return_list_of_items=True)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
cls_name = self.__class__.__name__
|
||||
fmt = cls_name + '({0}, {1})'
|
||||
return fmt.format(repr(self.microarchitecture),
|
||||
repr(self.module_name))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.microarchitecture)
|
||||
|
||||
def __contains__(self, cpu_flag):
|
||||
return cpu_flag in self.microarchitecture
|
||||
|
||||
def optimization_flags(self, compiler):
|
||||
"""Returns the flags needed to optimize for this target using
|
||||
the compiler passed as argument.
|
||||
|
||||
Args:
|
||||
compiler (CompilerSpec or Compiler): object that contains both the
|
||||
name and the version of the compiler we want to use
|
||||
"""
|
||||
# Mixed toolchains are not supported yet
|
||||
import spack.compilers
|
||||
if isinstance(compiler, spack.compiler.Compiler):
|
||||
if spack.compilers.is_mixed_toolchain(compiler):
|
||||
msg = ('microarchitecture specific optimizations are not '
|
||||
'supported yet on mixed compiler toolchains [check'
|
||||
' {0.name}@{0.version} for further details]')
|
||||
warnings.warn(msg.format(compiler))
|
||||
return ''
|
||||
|
||||
# Try to check if the current compiler comes with a version number or
|
||||
# has an unexpected suffix. If so, treat it as a compiler with a
|
||||
# custom spec.
|
||||
compiler_version = compiler.version
|
||||
version_number, suffix = archspec.cpu.version_components(
|
||||
compiler.version
|
||||
)
|
||||
if not version_number or suffix not in ('', 'apple'):
|
||||
# Try to deduce the underlying version of the compiler, regardless
|
||||
# of its name in compilers.yaml. Depending on where this function
|
||||
# is called we might get either a CompilerSpec or a fully fledged
|
||||
# compiler object.
|
||||
import spack.spec
|
||||
if isinstance(compiler, spack.spec.CompilerSpec):
|
||||
compiler = spack.compilers.compilers_for_spec(compiler).pop()
|
||||
try:
|
||||
compiler_version = compiler.real_version
|
||||
except spack.util.executable.ProcessError as e:
|
||||
# log this and just return compiler.version instead
|
||||
tty.debug(str(e))
|
||||
|
||||
return self.microarchitecture.optimization_flags(
|
||||
compiler.name, str(compiler_version)
|
||||
)
|
||||
|
||||
|
||||
@key_ordering
|
||||
class Platform(object):
|
||||
""" Abstract class that each type of Platform will subclass.
|
||||
Will return a instance of it once it is returned.
|
||||
"""
|
||||
|
||||
# Subclass sets number. Controls detection order
|
||||
priority = None # type: int
|
||||
|
||||
#: binary formats used on this platform; used by relocation logic
|
||||
binary_formats = ['elf']
|
||||
|
||||
front_end = None # type: str
|
||||
back_end = None # type: str
|
||||
default = None # type: str # The default back end target.
|
||||
|
||||
front_os = None # type: str
|
||||
back_os = None # type: str
|
||||
default_os = None # type: str
|
||||
|
||||
reserved_targets = ['default_target', 'frontend', 'fe', 'backend', 'be']
|
||||
reserved_oss = ['default_os', 'frontend', 'fe', 'backend', 'be']
|
||||
|
||||
def __init__(self, name):
|
||||
self.targets = {}
|
||||
self.operating_sys = {}
|
||||
self.name = name
|
||||
|
||||
def add_target(self, name, target):
|
||||
"""Used by the platform specific subclass to list available targets.
|
||||
Raises an error if the platform specifies a name
|
||||
that is reserved by spack as an alias.
|
||||
"""
|
||||
if name in Platform.reserved_targets:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of a target" % name)
|
||||
self.targets[name] = target
|
||||
|
||||
def target(self, name):
|
||||
"""This is a getter method for the target dictionary
|
||||
that handles defaulting based on the values provided by default,
|
||||
front-end, and back-end. This can be overwritten
|
||||
by a subclass for which we want to provide further aliasing options.
|
||||
"""
|
||||
# TODO: Check if we can avoid using strings here
|
||||
name = str(name)
|
||||
if name == 'default_target':
|
||||
name = self.default
|
||||
elif name == 'frontend' or name == 'fe':
|
||||
name = self.front_end
|
||||
elif name == 'backend' or name == 'be':
|
||||
name = self.back_end
|
||||
|
||||
return self.targets.get(name, None)
|
||||
|
||||
def add_operating_system(self, name, os_class):
|
||||
""" Add the operating_system class object into the
|
||||
platform.operating_sys dictionary
|
||||
"""
|
||||
if name in Platform.reserved_oss:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of an OS" % name)
|
||||
self.operating_sys[name] = os_class
|
||||
|
||||
def operating_system(self, name):
|
||||
if name == 'default_os':
|
||||
name = self.default_os
|
||||
if name == 'frontend' or name == "fe":
|
||||
name = self.front_os
|
||||
if name == 'backend' or name == 'be':
|
||||
name = self.back_os
|
||||
|
||||
return self.operating_sys.get(name, None)
|
||||
|
||||
@classmethod
|
||||
def setup_platform_environment(cls, pkg, env):
|
||||
""" Subclass can override this method if it requires any
|
||||
platform-specific build environment modifications.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
""" Subclass is responsible for implementing this method.
|
||||
Returns True if the Platform class detects that
|
||||
it is the current platform
|
||||
and False if it's not.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def _cmp_key(self):
|
||||
t_keys = ''.join(str(t._cmp_key()) for t in
|
||||
sorted(self.targets.values()))
|
||||
o_keys = ''.join(str(o._cmp_key()) for o in
|
||||
sorted(self.operating_sys.values()))
|
||||
return (self.name,
|
||||
self.default,
|
||||
self.front_end,
|
||||
self.back_end,
|
||||
self.default_os,
|
||||
self.front_os,
|
||||
self.back_os,
|
||||
t_keys,
|
||||
o_keys)
|
||||
|
||||
|
||||
@key_ordering
|
||||
class OperatingSystem(object):
|
||||
""" Operating System will be like a class similar to platform extended
|
||||
by subclasses for the specifics. Operating System will contain the
|
||||
compiler finding logic. Instead of calling two separate methods to
|
||||
find compilers we call find_compilers method for each operating system
|
||||
"""
|
||||
|
||||
def __init__(self, name, version):
|
||||
self.name = name.replace('-', '_')
|
||||
self.version = str(version).replace('-', '_')
|
||||
|
||||
def __str__(self):
|
||||
return "%s%s" % (self.name, self.version)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _cmp_key(self):
|
||||
return (self.name, self.version)
|
||||
|
||||
def to_dict(self):
|
||||
return syaml_dict([
|
||||
('name', self.name),
|
||||
('version', self.version)
|
||||
])
|
||||
|
||||
|
||||
@key_ordering
|
||||
class Arch(object):
|
||||
"""Architecture is now a class to help with setting attributes.
|
||||
|
||||
@@ -89,13 +395,11 @@ def __init__(self, plat=None, os=None, target=None):
|
||||
|
||||
@property
|
||||
def concrete(self):
|
||||
return all(
|
||||
(self.platform is not None,
|
||||
isinstance(self.platform, spack.platforms.Platform),
|
||||
self.os is not None,
|
||||
isinstance(self.os, spack.operating_systems.OperatingSystem),
|
||||
self.target is not None, isinstance(self.target, spack.target.Target))
|
||||
)
|
||||
return all((self.platform is not None,
|
||||
isinstance(self.platform, Platform),
|
||||
self.os is not None,
|
||||
isinstance(self.os, OperatingSystem),
|
||||
self.target is not None, isinstance(self.target, Target)))
|
||||
|
||||
def __str__(self):
|
||||
if self.platform or self.os or self.target:
|
||||
@@ -119,29 +423,28 @@ def __nonzero__(self):
|
||||
self.target is not None)
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _cmp_iter(self):
|
||||
if isinstance(self.platform, spack.platforms.Platform):
|
||||
yield self.platform.name
|
||||
def _cmp_key(self):
|
||||
if isinstance(self.platform, Platform):
|
||||
platform = self.platform.name
|
||||
else:
|
||||
yield self.platform
|
||||
|
||||
if isinstance(self.os, spack.operating_systems.OperatingSystem):
|
||||
yield self.os.name
|
||||
platform = self.platform
|
||||
if isinstance(self.os, OperatingSystem):
|
||||
os = self.os.name
|
||||
else:
|
||||
yield self.os
|
||||
|
||||
if isinstance(self.target, spack.target.Target):
|
||||
yield self.target.microarchitecture
|
||||
os = self.os
|
||||
if isinstance(self.target, Target):
|
||||
target = self.target.microarchitecture
|
||||
else:
|
||||
yield self.target
|
||||
target = self.target
|
||||
return (platform, os, target)
|
||||
|
||||
def to_dict(self):
|
||||
str_or_none = lambda v: str(v) if v else None
|
||||
d = syaml.syaml_dict([
|
||||
d = syaml_dict([
|
||||
('platform', str_or_none(self.platform)),
|
||||
('platform_os', str_or_none(self.os)),
|
||||
('target', self.target.to_dict_or_value())])
|
||||
return syaml.syaml_dict([('arch', d)])
|
||||
return syaml_dict([('arch', d)])
|
||||
|
||||
def to_spec(self):
|
||||
"""Convert this Arch to an anonymous Spec with architecture defined."""
|
||||
@@ -155,25 +458,79 @@ def from_dict(d):
|
||||
return arch_for_spec(spec)
|
||||
|
||||
|
||||
@memoized
|
||||
def get_platform(platform_name):
|
||||
"""Returns a platform object that corresponds to the given name."""
|
||||
platform_list = all_platforms()
|
||||
for p in platform_list:
|
||||
if platform_name.replace("_", "").lower() == p.__name__.lower():
|
||||
return p()
|
||||
|
||||
|
||||
def verify_platform(platform_name):
|
||||
""" Determines whether or not the platform with the given name is supported
|
||||
in Spack. For more information, see the 'spack.platforms' submodule.
|
||||
"""
|
||||
platform_name = platform_name.replace("_", "").lower()
|
||||
platform_names = [p.__name__.lower() for p in all_platforms()]
|
||||
|
||||
if platform_name not in platform_names:
|
||||
tty.die("%s is not a supported platform; supported platforms are %s" %
|
||||
(platform_name, platform_names))
|
||||
|
||||
|
||||
def arch_for_spec(arch_spec):
|
||||
"""Transforms the given architecture spec into an architecture object."""
|
||||
arch_spec = spack.spec.ArchSpec(arch_spec)
|
||||
assert arch_spec.concrete
|
||||
|
||||
arch_plat = spack.platforms.by_name(arch_spec.platform)
|
||||
arch_plat = get_platform(arch_spec.platform)
|
||||
if not (arch_plat.operating_system(arch_spec.os) and
|
||||
arch_plat.target(arch_spec.target)):
|
||||
sys_type = str(default_arch())
|
||||
msg = ("Can't recreate arch for spec {0} on current arch {1}; "
|
||||
"spec architecture is too different")
|
||||
raise ValueError(msg.format(arch_spec, sys_type))
|
||||
raise ValueError(
|
||||
"Can't recreate arch for spec %s on current arch %s; "
|
||||
"spec architecture is too different" % (arch_spec, sys_type()))
|
||||
|
||||
return Arch(arch_plat, arch_spec.os, arch_spec.target)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
@memoized
|
||||
def _all_platforms():
|
||||
classes = []
|
||||
mod_path = spack.paths.platform_path
|
||||
parent_module = "spack.platforms"
|
||||
|
||||
for name in list_modules(mod_path):
|
||||
mod_name = '%s.%s' % (parent_module, name)
|
||||
class_name = mod_to_class(name)
|
||||
mod = __import__(mod_name, fromlist=[class_name])
|
||||
if not hasattr(mod, class_name):
|
||||
tty.die('No class %s defined in %s' % (class_name, mod_name))
|
||||
cls = getattr(mod, class_name)
|
||||
if not inspect.isclass(cls):
|
||||
tty.die('%s.%s is not a class' % (mod_name, class_name))
|
||||
|
||||
classes.append(cls)
|
||||
|
||||
return classes
|
||||
|
||||
|
||||
@memoized
|
||||
def _platform():
|
||||
return spack.platforms.host()
|
||||
"""Detects the platform for this machine.
|
||||
|
||||
Gather a list of all available subclasses of platforms.
|
||||
Sorts the list according to their priority looking. Priority is
|
||||
an arbitrarily set number. Detects platform either using uname or
|
||||
a file path (/opt/cray...)
|
||||
"""
|
||||
# Try to create a Platform object using the config file FIRST
|
||||
platform_list = _all_platforms()
|
||||
platform_list.sort(key=lambda a: a.priority)
|
||||
|
||||
for platform_cls in platform_list:
|
||||
if platform_cls.detect():
|
||||
return platform_cls()
|
||||
|
||||
|
||||
#: The "real" platform of the host running Spack. This should not be changed
|
||||
@@ -184,23 +541,44 @@ def _platform():
|
||||
#: context manager.
|
||||
platform = _platform
|
||||
|
||||
#: The list of all platform classes. May be swapped by the use_platform
|
||||
#: context manager.
|
||||
all_platforms = _all_platforms
|
||||
|
||||
@lang.memoized
|
||||
|
||||
@memoized
|
||||
def default_arch():
|
||||
"""Default ``Arch`` object for this machine"""
|
||||
"""Default ``Arch`` object for this machine.
|
||||
|
||||
See ``sys_type()``.
|
||||
"""
|
||||
return Arch(platform(), 'default_os', 'default_target')
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def compatible_sys_types():
|
||||
"""Return a list of all the platform-os-target tuples compatible
|
||||
with the current host.
|
||||
def sys_type():
|
||||
"""Print out the "default" platform-os-target tuple for this machine.
|
||||
|
||||
On machines with only one target OS/target, prints out the
|
||||
platform-os-target for the frontend. For machines with a frontend
|
||||
and a backend, prints the default backend.
|
||||
|
||||
TODO: replace with use of more explicit methods to get *all* the
|
||||
backends, as client code should really be aware of cross-compiled
|
||||
architectures.
|
||||
|
||||
"""
|
||||
return str(default_arch())
|
||||
|
||||
|
||||
@memoized
|
||||
def compatible_sys_types():
|
||||
"""Returns a list of all the systypes compatible with the current host."""
|
||||
compatible_archs = []
|
||||
current_host = archspec.cpu.host()
|
||||
compatible_targets = [current_host] + current_host.ancestors
|
||||
compatible_archs = [
|
||||
str(Arch(platform(), 'default_os', target)) for target in compatible_targets
|
||||
]
|
||||
for target in compatible_targets:
|
||||
arch = Arch(platform(), 'default_os', target)
|
||||
compatible_archs.append(str(arch))
|
||||
return compatible_archs
|
||||
|
||||
|
||||
@@ -218,25 +596,23 @@ def __call__(self):
|
||||
|
||||
@contextlib.contextmanager
|
||||
def use_platform(new_platform):
|
||||
global platform
|
||||
global platform, all_platforms
|
||||
|
||||
msg = '"{0}" must be an instance of Platform'
|
||||
assert isinstance(new_platform, spack.platforms.Platform), msg.format(new_platform)
|
||||
assert isinstance(new_platform, Platform), msg.format(new_platform)
|
||||
|
||||
original_platform_fn = platform
|
||||
original_platform_fn, original_all_platforms_fn = platform, all_platforms
|
||||
platform = _PickleableCallable(new_platform)
|
||||
all_platforms = _PickleableCallable([type(new_platform)])
|
||||
|
||||
try:
|
||||
platform = _PickleableCallable(new_platform)
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
||||
yield new_platform
|
||||
|
||||
yield new_platform
|
||||
platform, all_platforms = original_platform_fn, original_all_platforms_fn
|
||||
|
||||
finally:
|
||||
platform = original_platform_fn
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
spack.compilers._cache_config_files = []
|
||||
|
||||
@@ -1,438 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Classes and functions to register audit checks for various parts of
|
||||
Spack and run them on-demand.
|
||||
|
||||
To register a new class of sanity checks (e.g. sanity checks for
|
||||
compilers.yaml), the first action required is to create a new AuditClass
|
||||
object:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
audit_cfgcmp = AuditClass(
|
||||
tag='CFG-COMPILER',
|
||||
description='Sanity checks on compilers.yaml',
|
||||
kwargs=()
|
||||
)
|
||||
|
||||
This object is to be used as a decorator to register functions
|
||||
that will perform each a single check:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@audit_cfgcmp
|
||||
def _search_duplicate_compilers(error_cls):
|
||||
pass
|
||||
|
||||
These functions need to take as argument the keywords declared when
|
||||
creating the decorator object plus an ``error_cls`` argument at the
|
||||
end, acting as a factory to create Error objects. It should return a
|
||||
(possibly empty) list of errors.
|
||||
|
||||
Calls to each of these functions are triggered by the ``run`` method of
|
||||
the decorator object, that will forward the keyword arguments passed
|
||||
as input.
|
||||
"""
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from six.moves.urllib.request import urlopen
|
||||
|
||||
try:
|
||||
from collections.abc import Sequence # novm
|
||||
except ImportError:
|
||||
from collections import Sequence
|
||||
|
||||
|
||||
#: Map an audit tag to a list of callables implementing checks
|
||||
CALLBACKS = {}
|
||||
|
||||
#: Map a group of checks to the list of related audit tags
|
||||
GROUPS = collections.defaultdict(list)
|
||||
|
||||
|
||||
class Error(object):
|
||||
"""Information on an error reported in a test."""
|
||||
def __init__(self, summary, details):
|
||||
self.summary = summary
|
||||
self.details = tuple(details)
|
||||
|
||||
def __str__(self):
|
||||
return self.summary + '\n' + '\n'.join([
|
||||
' ' + detail for detail in self.details
|
||||
])
|
||||
|
||||
def __eq__(self, other):
|
||||
if self.summary != other.summary or self.details != other.details:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __hash__(self):
|
||||
value = (self.summary, self.details)
|
||||
return hash(value)
|
||||
|
||||
|
||||
class AuditClass(Sequence):
|
||||
def __init__(self, group, tag, description, kwargs):
|
||||
"""Return an object that acts as a decorator to register functions
|
||||
associated with a specific class of sanity checks.
|
||||
|
||||
Args:
|
||||
group (str): group in which this check is to be inserted
|
||||
tag (str): tag uniquely identifying the class of sanity checks
|
||||
description (str): description of the sanity checks performed
|
||||
by this tag
|
||||
kwargs (tuple of str): keyword arguments that each registered
|
||||
function needs to accept
|
||||
"""
|
||||
if tag in CALLBACKS:
|
||||
msg = 'audit class "{0}" already registered'
|
||||
raise ValueError(msg.format(tag))
|
||||
|
||||
self.group = group
|
||||
self.tag = tag
|
||||
self.description = description
|
||||
self.kwargs = kwargs
|
||||
self.callbacks = []
|
||||
|
||||
# Init the list of hooks
|
||||
CALLBACKS[self.tag] = self
|
||||
|
||||
# Update the list of tags in the group
|
||||
GROUPS[self.group].append(self.tag)
|
||||
|
||||
def __call__(self, func):
|
||||
self.callbacks.append(func)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.callbacks[item]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.callbacks)
|
||||
|
||||
def run(self, **kwargs):
|
||||
msg = 'please pass "{0}" as keyword arguments'
|
||||
msg = msg.format(', '.join(self.kwargs))
|
||||
assert set(self.kwargs) == set(kwargs), msg
|
||||
|
||||
errors = []
|
||||
kwargs['error_cls'] = Error
|
||||
for fn in self.callbacks:
|
||||
errors.extend(fn(**kwargs))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def run_group(group, **kwargs):
|
||||
"""Run the checks that are part of the group passed as argument.
|
||||
|
||||
Args:
|
||||
group (str): group of checks to be run
|
||||
**kwargs: keyword arguments forwarded to the checks
|
||||
|
||||
Returns:
|
||||
List of (tag, errors) that failed.
|
||||
"""
|
||||
reports = []
|
||||
for check in GROUPS[group]:
|
||||
errors = run_check(check, **kwargs)
|
||||
reports.append((check, errors))
|
||||
return reports
|
||||
|
||||
|
||||
def run_check(tag, **kwargs):
|
||||
"""Run the checks associated with a single tag.
|
||||
|
||||
Args:
|
||||
tag (str): tag of the check
|
||||
**kwargs: keyword arguments forwarded to the checks
|
||||
|
||||
Returns:
|
||||
Errors occurred during the checks
|
||||
"""
|
||||
return CALLBACKS[tag].run(**kwargs)
|
||||
|
||||
|
||||
# TODO: For the generic check to be useful for end users,
|
||||
# TODO: we need to implement hooks like described in
|
||||
# TODO: https://github.com/spack/spack/pull/23053/files#r630265011
|
||||
#: Generic checks relying on global state
|
||||
generic = AuditClass(
|
||||
group='generic',
|
||||
tag='GENERIC',
|
||||
description='Generic checks relying on global variables',
|
||||
kwargs=()
|
||||
)
|
||||
|
||||
|
||||
#: Sanity checks on compilers.yaml
|
||||
config_compiler = AuditClass(
|
||||
group='configs',
|
||||
tag='CFG-COMPILER',
|
||||
description='Sanity checks on compilers.yaml',
|
||||
kwargs=()
|
||||
)
|
||||
|
||||
|
||||
@config_compiler
|
||||
def _search_duplicate_compilers(error_cls):
|
||||
"""Report compilers with the same spec and two different definitions"""
|
||||
import spack.config
|
||||
errors = []
|
||||
|
||||
compilers = list(sorted(
|
||||
spack.config.get('compilers'), key=lambda x: x['compiler']['spec']
|
||||
))
|
||||
for spec, group in itertools.groupby(
|
||||
compilers, key=lambda x: x['compiler']['spec']
|
||||
):
|
||||
group = list(group)
|
||||
if len(group) == 1:
|
||||
continue
|
||||
|
||||
error_msg = 'Compiler defined multiple times: {0}'
|
||||
try:
|
||||
details = [str(x._start_mark).strip() for x in group]
|
||||
except Exception:
|
||||
details = []
|
||||
errors.append(error_cls(
|
||||
summary=error_msg.format(spec), details=details
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
#: Sanity checks on packages.yaml
|
||||
config_packages = AuditClass(
|
||||
group='configs',
|
||||
tag='CFG-PACKAGES',
|
||||
description='Sanity checks on packages.yaml',
|
||||
kwargs=()
|
||||
)
|
||||
|
||||
|
||||
@config_packages
|
||||
def _search_duplicate_specs_in_externals(error_cls):
|
||||
"""Search for duplicate specs declared as externals"""
|
||||
import spack.config
|
||||
|
||||
errors, externals = [], collections.defaultdict(list)
|
||||
packages_yaml = spack.config.get('packages')
|
||||
|
||||
for name, pkg_config in packages_yaml.items():
|
||||
# No externals can be declared under all
|
||||
if name == 'all' or 'externals' not in pkg_config:
|
||||
continue
|
||||
|
||||
current_externals = pkg_config['externals']
|
||||
for entry in current_externals:
|
||||
# Ask for the string representation of the spec to normalize
|
||||
# aspects of the spec that may be represented in multiple ways
|
||||
# e.g. +foo or foo=true
|
||||
key = str(spack.spec.Spec(entry['spec']))
|
||||
externals[key].append(entry)
|
||||
|
||||
for spec, entries in sorted(externals.items()):
|
||||
# If there's a single external for a spec we are fine
|
||||
if len(entries) < 2:
|
||||
continue
|
||||
|
||||
# Otherwise wwe need to report an error
|
||||
error_msg = 'Multiple externals share the same spec: {0}'.format(spec)
|
||||
try:
|
||||
lines = [str(x._start_mark).strip() for x in entries]
|
||||
details = [
|
||||
'Please remove all but one of the following entries:'
|
||||
] + lines + [
|
||||
'as they might result in non-deterministic hashes'
|
||||
]
|
||||
except TypeError:
|
||||
details = []
|
||||
|
||||
errors.append(error_cls(summary=error_msg, details=details))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
#: Sanity checks on package directives
|
||||
package_directives = AuditClass(
|
||||
group='packages',
|
||||
tag='PKG-DIRECTIVES',
|
||||
description='Sanity checks on specs used in directives',
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
#: Sanity checks on linting
|
||||
# This can take some time, so it's run separately from packages
|
||||
package_https_directives = AuditClass(
|
||||
group='packages-https',
|
||||
tag='PKG-HTTPS-DIRECTIVES',
|
||||
description='Sanity checks on https checks of package urls, etc.',
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
|
||||
@package_https_directives
|
||||
def _linting_package_file(pkgs, error_cls):
|
||||
"""Check for correctness of links
|
||||
"""
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
|
||||
# Does the homepage have http, and if so, does https work?
|
||||
if pkg.homepage.startswith('http://'):
|
||||
https = re.sub("http", "https", pkg.homepage, 1)
|
||||
try:
|
||||
response = urlopen(https)
|
||||
except Exception as e:
|
||||
msg = 'Error with attempting https for "{0}": '
|
||||
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
|
||||
continue
|
||||
|
||||
if response.getcode() == 200:
|
||||
msg = 'Package "{0}" uses http but has a valid https endpoint.'
|
||||
errors.append(msg.format(pkg.name))
|
||||
|
||||
return llnl.util.lang.dedupe(errors)
|
||||
|
||||
|
||||
@package_directives
|
||||
def _unknown_variants_in_directives(pkgs, error_cls):
|
||||
"""Report unknown or wrong variants in directives for this package"""
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
|
||||
# Check "conflicts" directive
|
||||
for conflict, triggers in pkg.conflicts.items():
|
||||
for trigger, _ in triggers:
|
||||
vrn = spack.spec.Spec(conflict)
|
||||
try:
|
||||
vrn.constrain(trigger)
|
||||
except Exception as e:
|
||||
msg = 'Generic error in conflict for package "{0}": '
|
||||
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
|
||||
continue
|
||||
errors.extend(_analyze_variants_in_directive(
|
||||
pkg, vrn, directive='conflicts', error_cls=error_cls
|
||||
))
|
||||
|
||||
# Check "depends_on" directive
|
||||
for _, triggers in pkg.dependencies.items():
|
||||
triggers = list(triggers)
|
||||
for trigger in list(triggers):
|
||||
vrn = spack.spec.Spec(trigger)
|
||||
errors.extend(_analyze_variants_in_directive(
|
||||
pkg, vrn, directive='depends_on', error_cls=error_cls
|
||||
))
|
||||
|
||||
# Check "patch" directive
|
||||
for _, triggers in pkg.provided.items():
|
||||
triggers = [spack.spec.Spec(x) for x in triggers]
|
||||
for vrn in triggers:
|
||||
errors.extend(_analyze_variants_in_directive(
|
||||
pkg, vrn, directive='patch', error_cls=error_cls
|
||||
))
|
||||
|
||||
# Check "resource" directive
|
||||
for vrn in pkg.resources:
|
||||
errors.extend(_analyze_variants_in_directive(
|
||||
pkg, vrn, directive='resource', error_cls=error_cls
|
||||
))
|
||||
|
||||
return llnl.util.lang.dedupe(errors)
|
||||
|
||||
|
||||
@package_directives
|
||||
def _unknown_variants_in_dependencies(pkgs, error_cls):
|
||||
"""Report unknown dependencies and wrong variants for dependencies"""
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
filename = spack.repo.path.filename_for_package_name(pkg_name)
|
||||
for dependency_name, dependency_data in pkg.dependencies.items():
|
||||
# No need to analyze virtual packages
|
||||
if spack.repo.path.is_virtual(dependency_name):
|
||||
continue
|
||||
|
||||
try:
|
||||
dependency_pkg = spack.repo.get(dependency_name)
|
||||
except spack.repo.UnknownPackageError:
|
||||
# This dependency is completely missing, so report
|
||||
# and continue the analysis
|
||||
summary = (pkg_name + ": unknown package '{0}' in "
|
||||
"'depends_on' directive".format(dependency_name))
|
||||
details = [
|
||||
" in " + filename
|
||||
]
|
||||
errors.append(error_cls(summary=summary, details=details))
|
||||
continue
|
||||
|
||||
for _, dependency_edge in dependency_data.items():
|
||||
dependency_variants = dependency_edge.spec.variants
|
||||
for name, value in dependency_variants.items():
|
||||
try:
|
||||
dependency_pkg.variants[name].validate_or_raise(
|
||||
value, pkg=dependency_pkg
|
||||
)
|
||||
except Exception as e:
|
||||
summary = (pkg_name + ": wrong variant used for a "
|
||||
"dependency in a 'depends_on' directive")
|
||||
error_msg = str(e).strip()
|
||||
if isinstance(e, KeyError):
|
||||
error_msg = ('the variant {0} does not '
|
||||
'exist'.format(error_msg))
|
||||
error_msg += " in package '" + dependency_name + "'"
|
||||
|
||||
errors.append(error_cls(
|
||||
summary=summary, details=[error_msg, 'in ' + filename]
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
|
||||
import spack.variant
|
||||
variant_exceptions = (
|
||||
spack.variant.InconsistentValidationError,
|
||||
spack.variant.MultipleValuesInExclusiveVariantError,
|
||||
spack.variant.InvalidVariantValueError,
|
||||
KeyError
|
||||
)
|
||||
errors = []
|
||||
for name, v in constraint.variants.items():
|
||||
try:
|
||||
pkg.variants[name].validate_or_raise(v, pkg=pkg)
|
||||
except variant_exceptions as e:
|
||||
summary = pkg.name + ': wrong variant in "{0}" directive'
|
||||
summary = summary.format(directive)
|
||||
filename = spack.repo.path.filename_for_package_name(pkg.name)
|
||||
|
||||
error_msg = str(e).strip()
|
||||
if isinstance(e, KeyError):
|
||||
error_msg = 'the variant {0} does not exist'.format(error_msg)
|
||||
|
||||
err = error_cls(summary=summary, details=[
|
||||
error_msg, 'in ' + filename
|
||||
])
|
||||
|
||||
errors.append(err)
|
||||
|
||||
return errors
|
||||
@@ -4,21 +4,22 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import codecs
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tarfile
|
||||
import shutil
|
||||
import tempfile
|
||||
import traceback
|
||||
from contextlib import closing
|
||||
|
||||
import ruamel.yaml as yaml
|
||||
import hashlib
|
||||
import glob
|
||||
from ordereddict_backport import OrderedDict
|
||||
from six.moves.urllib.error import HTTPError, URLError
|
||||
|
||||
from contextlib import closing
|
||||
import ruamel.yaml as yaml
|
||||
|
||||
import json
|
||||
|
||||
from six.moves.urllib.error import URLError, HTTPError
|
||||
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty as tty
|
||||
@@ -28,21 +29,21 @@
|
||||
import spack.config as config
|
||||
import spack.database as spack_db
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks.sbang
|
||||
import spack.mirror
|
||||
import spack.platforms
|
||||
import spack.relocate as relocate
|
||||
import spack.util.file_cache as file_cache
|
||||
import spack.relocate as relocate
|
||||
import spack.util.gpg
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.mirror
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web as web_util
|
||||
from spack.caches import misc_cache_location
|
||||
from spack.spec import Spec
|
||||
from spack.stage import Stage
|
||||
|
||||
|
||||
#: default root, relative to the Spack install path
|
||||
default_binary_index_root = os.path.join(spack.paths.opt_path, 'spack')
|
||||
|
||||
_build_cache_relative_path = 'build_cache'
|
||||
_build_cache_keys_relative_path = '_pgp'
|
||||
|
||||
@@ -66,8 +67,9 @@ class BinaryCacheIndex(object):
|
||||
mean we should have paid the price to update the cache earlier?
|
||||
"""
|
||||
|
||||
def __init__(self, cache_root):
|
||||
self._index_cache_root = cache_root
|
||||
def __init__(self, cache_root=None):
|
||||
self._cache_root = cache_root or default_binary_index_root
|
||||
self._index_cache_root = os.path.join(self._cache_root, 'indices')
|
||||
|
||||
# the key associated with the serialized _local_index_cache
|
||||
self._index_contents_key = 'contents.json'
|
||||
@@ -157,7 +159,7 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
|
||||
with self._index_file_cache.read_transaction(cache_key):
|
||||
db._read_from_file(cache_path)
|
||||
|
||||
spec_list = db.query_local(installed=False, in_buildcache=True)
|
||||
spec_list = db.query_local(installed=False)
|
||||
|
||||
for indexed_spec in spec_list:
|
||||
dag_hash = indexed_spec.dag_hash()
|
||||
@@ -208,7 +210,7 @@ def find_built_spec(self, spec):
|
||||
The cache can be updated by calling ``update()`` on the cache.
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): Concrete spec to find
|
||||
spec (Spec): Concrete spec to find
|
||||
|
||||
Returns:
|
||||
An list of objects containing the found specs and mirror url where
|
||||
@@ -438,15 +440,13 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
return True
|
||||
|
||||
|
||||
def binary_index_location():
|
||||
"""Set up a BinaryCacheIndex for remote buildcache dbs in the user's homedir."""
|
||||
cache_root = os.path.join(misc_cache_location(), 'indices')
|
||||
return spack.util.path.canonicalize_path(cache_root)
|
||||
|
||||
|
||||
def _binary_index():
|
||||
"""Get the singleton store instance."""
|
||||
return BinaryCacheIndex(binary_index_location())
|
||||
cache_root = spack.config.get(
|
||||
'config:binary_index_root', default_binary_index_root)
|
||||
cache_root = spack.util.path.canonicalize_path(cache_root)
|
||||
|
||||
return BinaryCacheIndex(cache_root)
|
||||
|
||||
|
||||
#: Singleton binary_index instance
|
||||
@@ -551,82 +551,65 @@ def read_buildinfo_file(prefix):
|
||||
return buildinfo
|
||||
|
||||
|
||||
def get_buildfile_manifest(spec):
|
||||
"""
|
||||
Return a data structure with information about a build, including
|
||||
text_to_relocate, binary_to_relocate, binary_to_relocate_fullpath
|
||||
link_to_relocate, and other, which means it doesn't fit any of previous
|
||||
checks (and should not be relocated). We blacklist docs (man) and
|
||||
metadata (.spack). This can be used to find a particular kind of file
|
||||
in spack, or to generate the build metadata.
|
||||
"""
|
||||
data = {"text_to_relocate": [], "binary_to_relocate": [],
|
||||
"link_to_relocate": [], "other": [],
|
||||
"binary_to_relocate_fullpath": []}
|
||||
|
||||
blacklist = (".spack", "man")
|
||||
|
||||
# Do this at during tarball creation to save time when tarball unpacked.
|
||||
# Used by make_package_relative to determine binaries to change.
|
||||
for root, dirs, files in os.walk(spec.prefix, topdown=True):
|
||||
dirs[:] = [d for d in dirs if d not in blacklist]
|
||||
for filename in files:
|
||||
path_name = os.path.join(root, filename)
|
||||
m_type, m_subtype = relocate.mime_type(path_name)
|
||||
rel_path_name = os.path.relpath(path_name, spec.prefix)
|
||||
added = False
|
||||
|
||||
if os.path.islink(path_name):
|
||||
link = os.readlink(path_name)
|
||||
if os.path.isabs(link):
|
||||
# Relocate absolute links into the spack tree
|
||||
if link.startswith(spack.store.layout.root):
|
||||
data['link_to_relocate'].append(rel_path_name)
|
||||
added = True
|
||||
|
||||
if relocate.needs_binary_relocation(m_type, m_subtype):
|
||||
if ((m_subtype in ('x-executable', 'x-sharedlib', 'x-pie-executable')
|
||||
and sys.platform != 'darwin') or
|
||||
(m_subtype in ('x-mach-binary')
|
||||
and sys.platform == 'darwin') or
|
||||
(not filename.endswith('.o'))):
|
||||
data['binary_to_relocate'].append(rel_path_name)
|
||||
data['binary_to_relocate_fullpath'].append(path_name)
|
||||
added = True
|
||||
|
||||
if relocate.needs_text_relocation(m_type, m_subtype):
|
||||
data['text_to_relocate'].append(rel_path_name)
|
||||
added = True
|
||||
|
||||
if not added:
|
||||
data['other'].append(path_name)
|
||||
return data
|
||||
|
||||
|
||||
def write_buildinfo_file(spec, workdir, rel=False):
|
||||
"""
|
||||
Create a cache file containing information
|
||||
required for the relocation
|
||||
"""
|
||||
manifest = get_buildfile_manifest(spec)
|
||||
|
||||
prefix = spec.prefix
|
||||
text_to_relocate = []
|
||||
binary_to_relocate = []
|
||||
link_to_relocate = []
|
||||
blacklist = (".spack", "man")
|
||||
prefix_to_hash = dict()
|
||||
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
|
||||
deps = spack.build_environment.get_rpath_deps(spec.package)
|
||||
for d in deps:
|
||||
prefix_to_hash[str(d.prefix)] = d.dag_hash()
|
||||
# Do this at during tarball creation to save time when tarball unpacked.
|
||||
# Used by make_package_relative to determine binaries to change.
|
||||
for root, dirs, files in os.walk(prefix, topdown=True):
|
||||
dirs[:] = [d for d in dirs if d not in blacklist]
|
||||
for filename in files:
|
||||
path_name = os.path.join(root, filename)
|
||||
m_type, m_subtype = relocate.mime_type(path_name)
|
||||
if os.path.islink(path_name):
|
||||
link = os.readlink(path_name)
|
||||
if os.path.isabs(link):
|
||||
# Relocate absolute links into the spack tree
|
||||
if link.startswith(spack.store.layout.root):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
link_to_relocate.append(rel_path_name)
|
||||
else:
|
||||
msg = 'Absolute link %s to %s ' % (path_name, link)
|
||||
msg += 'outside of prefix %s ' % prefix
|
||||
msg += 'should not be relocated.'
|
||||
tty.warn(msg)
|
||||
|
||||
if relocate.needs_binary_relocation(m_type, m_subtype):
|
||||
if ((m_subtype in ('x-executable', 'x-sharedlib')
|
||||
and sys.platform != 'darwin') or
|
||||
(m_subtype in ('x-mach-binary')
|
||||
and sys.platform == 'darwin') or
|
||||
(not filename.endswith('.o'))):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
binary_to_relocate.append(rel_path_name)
|
||||
if relocate.needs_text_relocation(m_type, m_subtype):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
text_to_relocate.append(rel_path_name)
|
||||
|
||||
# Create buildinfo data and write it to disk
|
||||
import spack.hooks.sbang as sbang
|
||||
buildinfo = {}
|
||||
buildinfo['sbang_install_path'] = spack.hooks.sbang.sbang_install_path()
|
||||
buildinfo['sbang_install_path'] = sbang.sbang_install_path()
|
||||
buildinfo['relative_rpaths'] = rel
|
||||
buildinfo['buildpath'] = spack.store.layout.root
|
||||
buildinfo['spackprefix'] = spack.paths.prefix
|
||||
buildinfo['relative_prefix'] = os.path.relpath(
|
||||
spec.prefix, spack.store.layout.root)
|
||||
buildinfo['relocate_textfiles'] = manifest['text_to_relocate']
|
||||
buildinfo['relocate_binaries'] = manifest['binary_to_relocate']
|
||||
buildinfo['relocate_links'] = manifest['link_to_relocate']
|
||||
prefix, spack.store.layout.root)
|
||||
buildinfo['relocate_textfiles'] = text_to_relocate
|
||||
buildinfo['relocate_binaries'] = binary_to_relocate
|
||||
buildinfo['relocate_links'] = link_to_relocate
|
||||
buildinfo['prefix_to_hash'] = prefix_to_hash
|
||||
filename = buildinfo_file_name(workdir)
|
||||
with open(filename, 'w') as outfile:
|
||||
@@ -709,14 +692,20 @@ def generate_package_index(cache_prefix):
|
||||
"""Create the build cache index page.
|
||||
|
||||
Creates (or replaces) the "index.json" page at the location given in
|
||||
cache_prefix. This page contains a link for each binary package (.yaml or
|
||||
.json) under cache_prefix.
|
||||
cache_prefix. This page contains a link for each binary package (.yaml)
|
||||
under cache_prefix.
|
||||
"""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
db_root_dir = os.path.join(tmpdir, 'db_root')
|
||||
db = spack_db.Database(None, db_dir=db_root_dir,
|
||||
enable_transaction_locking=False,
|
||||
record_fields=['spec', 'ref_count'])
|
||||
|
||||
try:
|
||||
file_list = (
|
||||
entry
|
||||
for entry in web_util.list_url(cache_prefix)
|
||||
if entry.endswith('.yaml') or entry.endswith('spec.json'))
|
||||
if entry.endswith('.yaml'))
|
||||
except KeyError as inst:
|
||||
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
|
||||
tty.warn(msg)
|
||||
@@ -730,97 +719,23 @@ def generate_package_index(cache_prefix):
|
||||
tty.warn(msg)
|
||||
return
|
||||
|
||||
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
|
||||
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
|
||||
cache_prefix))
|
||||
|
||||
all_mirror_specs = {}
|
||||
|
||||
for file_path in file_list:
|
||||
try:
|
||||
spec_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(spec_url))
|
||||
_, _, spec_file = web_util.read_from_url(spec_url)
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
# Need full spec.json name or this gets confused with index.json.
|
||||
if spec_url.endswith('.json'):
|
||||
spec_dict = sjson.load(spec_file_contents)
|
||||
s = Spec.from_json(spec_file_contents)
|
||||
elif spec_url.endswith('.yaml'):
|
||||
spec_dict = syaml.load(spec_file_contents)
|
||||
s = Spec.from_yaml(spec_file_contents)
|
||||
all_mirror_specs[s.dag_hash()] = {
|
||||
'spec_url': spec_url,
|
||||
'spec': s,
|
||||
'num_deps': len(list(s.traverse(root=False))),
|
||||
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
|
||||
'buildinfo': spec_dict['buildinfo'],
|
||||
}
|
||||
yaml_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(yaml_url))
|
||||
_, _, yaml_file = web_util.read_from_url(yaml_url)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
# yaml_obj = syaml.load(yaml_contents)
|
||||
# s = Spec.from_yaml(yaml_obj)
|
||||
s = Spec.from_yaml(yaml_contents)
|
||||
db.add(s, None)
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Error reading specfile: {0}'.format(file_path))
|
||||
tty.error('Error reading spec.yaml: {0}'.format(file_path))
|
||||
tty.error(url_err)
|
||||
|
||||
sorted_specs = sorted(all_mirror_specs.keys(),
|
||||
key=lambda k: all_mirror_specs[k]['num_deps'])
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
db_root_dir = os.path.join(tmpdir, 'db_root')
|
||||
db = spack_db.Database(None, db_dir=db_root_dir,
|
||||
enable_transaction_locking=False,
|
||||
record_fields=['spec', 'ref_count', 'in_buildcache'])
|
||||
|
||||
try:
|
||||
tty.debug('Specs sorted by number of dependencies:')
|
||||
for dag_hash in sorted_specs:
|
||||
spec_record = all_mirror_specs[dag_hash]
|
||||
s = spec_record['spec']
|
||||
num_deps = spec_record['num_deps']
|
||||
tty.debug(' {0}/{1} -> {2}'.format(
|
||||
s.name, dag_hash[:7], num_deps))
|
||||
if num_deps > 0:
|
||||
# Check each of this spec's dependencies (which we have already
|
||||
# processed), as they are the source of truth for their own
|
||||
# full hash. If the full hash we have for any deps does not
|
||||
# match what those deps have themselves, then we need to splice
|
||||
# this spec with those deps, and push this spliced spec
|
||||
# (spec.json file) back to the mirror, as well as update the
|
||||
# all_mirror_specs dictionary with this spliced spec.
|
||||
to_splice = []
|
||||
for dep in s.dependencies():
|
||||
dep_dag_hash = dep.dag_hash()
|
||||
if dep_dag_hash in all_mirror_specs:
|
||||
true_dep = all_mirror_specs[dep_dag_hash]['spec']
|
||||
if true_dep.full_hash() != dep.full_hash():
|
||||
to_splice.append(true_dep)
|
||||
|
||||
if to_splice:
|
||||
tty.debug(' needs the following deps spliced:')
|
||||
for true_dep in to_splice:
|
||||
tty.debug(' {0}/{1}'.format(
|
||||
true_dep.name, true_dep.dag_hash()[:7]))
|
||||
s = s.splice(true_dep, True)
|
||||
|
||||
# Push this spliced spec back to the mirror
|
||||
spliced_spec_dict = s.to_dict(hash=ht.full_hash)
|
||||
for key in ['binary_cache_checksum', 'buildinfo']:
|
||||
spliced_spec_dict[key] = spec_record[key]
|
||||
|
||||
temp_json_path = os.path.join(tmpdir, 'spliced.spec.json')
|
||||
with open(temp_json_path, 'w') as fd:
|
||||
fd.write(sjson.dump(spliced_spec_dict))
|
||||
|
||||
spliced_spec_url = spec_record['spec_url']
|
||||
web_util.push_to_url(
|
||||
temp_json_path, spliced_spec_url, keep_original=False)
|
||||
tty.debug(' spliced and wrote {0}'.format(
|
||||
spliced_spec_url))
|
||||
spec_record['spec'] = s
|
||||
|
||||
db.add(s, None)
|
||||
db.mark(s, 'in_buildcache', True)
|
||||
|
||||
# Now that we have fixed any old specfiles that might have had the wrong
|
||||
# full hash for their dependencies, we can generate the index, compute
|
||||
# the hash, and push those files to the mirror.
|
||||
index_json_path = os.path.join(db_root_dir, 'index.json')
|
||||
with open(index_json_path, 'w') as f:
|
||||
db._write_to_file(f)
|
||||
@@ -852,7 +767,6 @@ def generate_package_index(cache_prefix):
|
||||
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
|
||||
cache_prefix, err)
|
||||
tty.warn(msg)
|
||||
tty.debug('\n' + traceback.format_exc())
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@@ -954,27 +868,19 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# need to copy the spec file so the build cache can be downloaded
|
||||
# without concretizing with the current spack packages
|
||||
# and preferences
|
||||
|
||||
spec_file = spack.store.layout.spec_file_path(spec)
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
|
||||
deprecated_specfile_path = specfile_path.replace('.spec.json', '.spec.yaml')
|
||||
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.realpath(
|
||||
os.path.join(cache_prefix, specfile_name))
|
||||
|
||||
remote_specfile_path = url_util.join(
|
||||
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
|
||||
remote_specfile_path_deprecated = url_util.join(
|
||||
outdir, os.path.relpath(deprecated_specfile_path,
|
||||
os.path.realpath(tmpdir)))
|
||||
|
||||
# If force and exists, overwrite. Otherwise raise exception on collision.
|
||||
if force:
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
if force:
|
||||
web_util.remove_url(remote_specfile_path)
|
||||
if web_util.url_exists(remote_specfile_path_deprecated):
|
||||
web_util.remove_url(remote_specfile_path_deprecated)
|
||||
elif (web_util.url_exists(remote_specfile_path) or
|
||||
web_util.url_exists(remote_specfile_path_deprecated)):
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
else:
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
|
||||
# make a copy of the install directory to work with
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
@@ -1022,23 +928,15 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# get the sha256 checksum of the tarball
|
||||
checksum = checksum_tarball(tarfile_path)
|
||||
|
||||
# add sha256 checksum to spec.json
|
||||
|
||||
# add sha256 checksum to spec.yaml
|
||||
with open(spec_file, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
if spec_file.endswith('.yaml'):
|
||||
spec_dict = yaml.load(content)
|
||||
elif spec_file.endswith('.json'):
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
raise ValueError(
|
||||
'{0} not a valid spec file type (json or yaml)'.format(
|
||||
spec_file))
|
||||
spec_dict = yaml.load(content)
|
||||
bchecksum = {}
|
||||
bchecksum['hash_algorithm'] = 'sha256'
|
||||
bchecksum['hash'] = checksum
|
||||
spec_dict['binary_cache_checksum'] = bchecksum
|
||||
# Add original install prefix relative to layout root to spec.json.
|
||||
# Add original install prefix relative to layout root to spec.yaml.
|
||||
# This will be used to determine is the directory layout has changed.
|
||||
buildinfo = {}
|
||||
buildinfo['relative_prefix'] = os.path.relpath(
|
||||
@@ -1047,7 +945,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
spec_dict['buildinfo'] = buildinfo
|
||||
|
||||
with open(specfile_path, 'w') as outfile:
|
||||
outfile.write(sjson.dump(spec_dict))
|
||||
outfile.write(syaml.dump(spec_dict))
|
||||
|
||||
# sign the tarball and spec file with gpg
|
||||
if not unsigned:
|
||||
@@ -1101,14 +999,14 @@ def download_tarball(spec, preferred_mirrors=None):
|
||||
path to downloaded tarball if successful, None otherwise.
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): Concrete spec
|
||||
spec (Spec): Concrete spec
|
||||
preferred_mirrors (list): If provided, this is a list of preferred
|
||||
mirror urls. Other configured mirrors will only be used if the
|
||||
tarball can't be retrieved from one of these.
|
||||
mirror urls. Other configured mirrors will only be used if the
|
||||
tarball can't be retrieved from one of these.
|
||||
|
||||
Returns:
|
||||
Path to the downloaded tarball, or ``None`` if the tarball could not
|
||||
be downloaded from any configured mirrors.
|
||||
be downloaded from any configured mirrors.
|
||||
"""
|
||||
if not spack.mirror.MirrorCollection():
|
||||
tty.die("Please add a spack mirror to allow " +
|
||||
@@ -1157,7 +1055,7 @@ def make_package_relative(workdir, spec, allow_root):
|
||||
orig_path_names.append(os.path.join(prefix, filename))
|
||||
cur_path_names.append(os.path.join(workdir, filename))
|
||||
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.make_macho_binaries_relative(
|
||||
cur_path_names, orig_path_names, old_layout_root)
|
||||
@@ -1191,6 +1089,8 @@ def relocate_package(spec, allow_root):
|
||||
"""
|
||||
Relocate the given package
|
||||
"""
|
||||
import spack.hooks.sbang as sbang
|
||||
|
||||
workdir = str(spec.prefix)
|
||||
buildinfo = read_buildinfo_file(workdir)
|
||||
new_layout_root = str(spack.store.layout.root)
|
||||
@@ -1229,8 +1129,7 @@ def relocate_package(spec, allow_root):
|
||||
prefix_to_prefix_bin = OrderedDict({})
|
||||
|
||||
if old_sbang_install_path:
|
||||
install_path = spack.hooks.sbang.sbang_install_path()
|
||||
prefix_to_prefix_text[old_sbang_install_path] = install_path
|
||||
prefix_to_prefix_text[old_sbang_install_path] = sbang.sbang_install_path()
|
||||
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
prefix_to_prefix_bin[old_prefix] = new_prefix
|
||||
@@ -1244,7 +1143,7 @@ def relocate_package(spec, allow_root):
|
||||
# now a POSIX script that lives in the install prefix. Old packages
|
||||
# will have the old sbang location in their shebangs.
|
||||
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(old_spack_prefix)
|
||||
new_sbang = spack.hooks.sbang.sbang_shebang_line()
|
||||
new_sbang = sbang.sbang_shebang_line()
|
||||
prefix_to_prefix_text[orig_sbang] = new_sbang
|
||||
|
||||
tty.debug("Relocating package from",
|
||||
@@ -1262,13 +1161,13 @@ def is_backup_file(file):
|
||||
text_names.append(text_name)
|
||||
|
||||
# If we are not installing back to the same install tree do the relocation
|
||||
if old_prefix != new_prefix:
|
||||
if old_layout_root != new_layout_root:
|
||||
files_to_relocate = [os.path.join(workdir, filename)
|
||||
for filename in buildinfo.get('relocate_binaries')
|
||||
]
|
||||
# If the buildcache was not created with relativized rpaths
|
||||
# do the relocation of path in binaries
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.relocate_macho_binaries(files_to_relocate,
|
||||
old_layout_root,
|
||||
@@ -1327,26 +1226,15 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spackfile_path = os.path.join(stagepath, spackfile_name)
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
specfile_is_json = True
|
||||
deprecated_yaml_name = tarball_name(spec, '.spec.yaml')
|
||||
deprecated_yaml_path = os.path.join(tmpdir, deprecated_yaml_name)
|
||||
json_name = tarball_name(spec, '.spec.json')
|
||||
json_path = os.path.join(tmpdir, json_name)
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(tmpdir, specfile_name)
|
||||
|
||||
with closing(tarfile.open(spackfile_path, 'r')) as tar:
|
||||
tar.extractall(tmpdir)
|
||||
# some buildcache tarfiles use bzip2 compression
|
||||
if not os.path.exists(tarfile_path):
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
|
||||
if os.path.exists(json_path):
|
||||
specfile_path = json_path
|
||||
elif os.path.exists(deprecated_yaml_path):
|
||||
specfile_is_json = False
|
||||
specfile_path = deprecated_yaml_path
|
||||
else:
|
||||
raise ValueError('Cannot find spec file for {0}.'.format(tmpdir))
|
||||
|
||||
if not unsigned:
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
try:
|
||||
@@ -1369,10 +1257,7 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spec_dict = {}
|
||||
with open(specfile_path, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
if specfile_is_json:
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
spec_dict = syaml.load(content)
|
||||
spec_dict = syaml.load(content)
|
||||
bchecksum = spec_dict['binary_cache_checksum']
|
||||
|
||||
# if the checksums don't match don't install
|
||||
@@ -1449,39 +1334,27 @@ def try_direct_fetch(spec, full_hash_match=False, mirrors=None):
|
||||
"""
|
||||
Try to find the spec directly on the configured mirrors
|
||||
"""
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
lenient = not full_hash_match
|
||||
found_specs = []
|
||||
spec_full_hash = spec.full_hash()
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors=mirrors).values():
|
||||
buildcache_fetch_url_yaml = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, deprecated_specfile_name)
|
||||
buildcache_fetch_url_json = url_util.join(
|
||||
buildcache_fetch_url = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, specfile_name)
|
||||
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url)
|
||||
fetched_spec_yaml = codecs.getreader('utf-8')(fs).read()
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err:
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_yaml)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err_y:
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_json), url_err)
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_yaml), url_err_y)
|
||||
continue
|
||||
specfile_contents = codecs.getreader('utf-8')(fs).read()
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url), url_err)
|
||||
continue
|
||||
|
||||
# read the spec from the build cache file. All specs in build caches
|
||||
# are concrete (as they are built) so we need to mark this spec
|
||||
# concrete on read-in.
|
||||
if specfile_is_json:
|
||||
fetched_spec = Spec.from_json(specfile_contents)
|
||||
else:
|
||||
fetched_spec = Spec.from_yaml(specfile_contents)
|
||||
fetched_spec = Spec.from_yaml(fetched_spec_yaml)
|
||||
fetched_spec._mark_concrete()
|
||||
|
||||
# Do not recompute the full hash for the fetched spec, instead just
|
||||
@@ -1502,14 +1375,14 @@ def get_mirrors_for_spec(spec=None, full_hash_match=False,
|
||||
indicating the mirrors on which it can be found
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): The spec to look for in binary mirrors
|
||||
spec (Spec): The spec to look for in binary mirrors
|
||||
full_hash_match (bool): If True, only includes mirrors where the spec
|
||||
full hash matches the locally computed full hash of the ``spec``
|
||||
argument. If False, any mirror which has a matching DAG hash
|
||||
is included in the results.
|
||||
mirrors_to_check (dict): Optionally override the configured mirrors
|
||||
with the mirrors in this dictionary.
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.json``
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.yaml``
|
||||
files from remote mirrors, only consider the indices.
|
||||
|
||||
Return:
|
||||
@@ -1667,9 +1540,7 @@ def push_keys(*mirrors, **kwargs):
|
||||
filename = fingerprint + '.pub'
|
||||
|
||||
export_target = os.path.join(prefix, filename)
|
||||
|
||||
# Export public keys (private is set to False)
|
||||
spack.util.gpg.export_keys(export_target, [fingerprint])
|
||||
spack.util.gpg.export_keys(export_target, fingerprint)
|
||||
|
||||
# If mirror is local, the above export writes directly to the
|
||||
# mirror (export_target points directly to the mirror).
|
||||
@@ -1706,91 +1577,57 @@ def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
|
||||
tty.debug(spec.tree())
|
||||
|
||||
# Try to retrieve the specfile directly, based on the known
|
||||
# Try to retrieve the .spec.yaml directly, based on the known
|
||||
# format of the name, in order to determine if the package
|
||||
# needs to be rebuilt.
|
||||
cache_prefix = build_cache_prefix(mirror_url)
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(cache_prefix, specfile_name)
|
||||
deprecated_specfile_path = os.path.join(cache_prefix,
|
||||
deprecated_specfile_name)
|
||||
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
|
||||
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
|
||||
|
||||
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
|
||||
spec.short_spec, '' if rebuild_on_errors else 'not ')
|
||||
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(specfile_path)
|
||||
_, _, yaml_file = web_util.read_from_url(file_path)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(deprecated_specfile_path)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError) as url_err_y:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1} or {2}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(
|
||||
spec.short_spec,
|
||||
specfile_path,
|
||||
deprecated_specfile_path))
|
||||
tty.debug(url_err)
|
||||
tty.debug(url_err_y)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
if not spec_file_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(
|
||||
specfile_path if specfile_is_json else deprecated_specfile_path))
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
|
||||
tty.debug(url_err)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_dict = (sjson.load(spec_file_contents)
|
||||
if specfile_is_json else syaml.load(spec_file_contents))
|
||||
if not yaml_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(file_path))
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
try:
|
||||
nodes = spec_dict['spec']['nodes']
|
||||
except KeyError:
|
||||
# Prior node dict format omitted 'nodes' key
|
||||
nodes = spec_dict['spec']
|
||||
spec_yaml = syaml.load(yaml_contents)
|
||||
|
||||
yaml_spec = spec_yaml['spec']
|
||||
name = spec.name
|
||||
|
||||
# In the old format:
|
||||
# The "spec" key represents a list of objects, each with a single
|
||||
# The "spec" key in the yaml is a list of objects, each with a single
|
||||
# key that is the package name. While the list usually just contains
|
||||
# a single object, we iterate over the list looking for the object
|
||||
# with the name of this concrete spec as a key, out of an abundance
|
||||
# of caution.
|
||||
# In format version 2:
|
||||
# ['spec']['nodes'] is still a list of objects, but with a
|
||||
# multitude of keys. The list will commonly contain many objects, and in the
|
||||
# case of build specs, it is highly likely that the same name will occur
|
||||
# once as the actual package, and then again as the build provenance of that
|
||||
# same package. Hence format version 2 matches on the dag hash, not name.
|
||||
if nodes and 'name' not in nodes[0]:
|
||||
# old style
|
||||
cached_pkg_specs = [item[name] for item in nodes if name in item]
|
||||
elif nodes and spec_dict['spec']['_meta']['version'] == 2:
|
||||
cached_pkg_specs = [item for item in nodes
|
||||
if item[ht.dag_hash.name] == spec.dag_hash()]
|
||||
cached_pkg_specs = [item[name] for item in yaml_spec if name in item]
|
||||
cached_target = cached_pkg_specs[0] if cached_pkg_specs else None
|
||||
|
||||
# If either the full_hash didn't exist in the specfile, or it
|
||||
# If either the full_hash didn't exist in the .spec.yaml file, or it
|
||||
# did, but didn't match the one we computed locally, then we should
|
||||
# just rebuild. This can be simplified once the dag_hash and the
|
||||
# full_hash become the same thing.
|
||||
rebuild = False
|
||||
|
||||
if not cached_target:
|
||||
reason = 'did not find spec in specfile contents'
|
||||
rebuild = True
|
||||
elif ht.full_hash.name not in cached_target:
|
||||
reason = 'full_hash was missing from remote specfile'
|
||||
if not cached_target or 'full_hash' not in cached_target:
|
||||
reason = 'full_hash was missing from remote spec.yaml'
|
||||
rebuild = True
|
||||
else:
|
||||
full_hash = cached_target[ht.full_hash.name]
|
||||
full_hash = cached_target['full_hash']
|
||||
if full_hash != pkg_full_hash:
|
||||
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
|
||||
full_hash, pkg_full_hash)
|
||||
@@ -1813,11 +1650,11 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
|
||||
Arguments:
|
||||
mirrors (dict): Mirrors to check against
|
||||
specs (typing.Iterable): Specs to check against mirrors
|
||||
output_file (str): Path to output file to be written. If provided,
|
||||
specs (iterable): Specs to check against mirrors
|
||||
output_file (string): Path to output file to be written. If provided,
|
||||
mirrors with missing or out-of-date specs will be formatted as a
|
||||
JSON object and written to this file.
|
||||
rebuild_on_errors (bool): Treat any errors encountered while
|
||||
rebuild_on_errors (boolean): Treat any errors encountered while
|
||||
checking specs as a signal to rebuild package.
|
||||
|
||||
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
|
||||
@@ -1852,23 +1689,24 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
|
||||
def _download_buildcache_entry(mirror_root, descriptions):
|
||||
for description in descriptions:
|
||||
description_url = os.path.join(mirror_root, description['url'])
|
||||
path = description['path']
|
||||
mkdirp(path)
|
||||
fail_if_missing = description['required']
|
||||
for url in description['url']:
|
||||
description_url = os.path.join(mirror_root, url)
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
try:
|
||||
stage.fetch()
|
||||
break
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
else:
|
||||
|
||||
mkdirp(path)
|
||||
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
|
||||
try:
|
||||
stage.fetch()
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
if fail_if_missing:
|
||||
tty.error('Failed to download required url {0}'.format(
|
||||
description_url))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@@ -1,334 +1,24 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from __future__ import print_function
|
||||
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
|
||||
try:
|
||||
import sysconfig # novm
|
||||
except ImportError:
|
||||
# Not supported on Python 2.6
|
||||
pass
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.architecture
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.environment
|
||||
import spack.main
|
||||
import spack.modules
|
||||
import spack.paths
|
||||
import spack.platforms
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.user_environment as uenv
|
||||
import spack.util.executable
|
||||
import spack.util.path
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
#: Map a bootstrapper type to the corresponding class
|
||||
_bootstrap_methods = {}
|
||||
|
||||
|
||||
def _bootstrapper(type):
|
||||
"""Decorator to register classes implementing bootstrapping
|
||||
methods.
|
||||
|
||||
Args:
|
||||
type (str): string identifying the class
|
||||
"""
|
||||
def _register(cls):
|
||||
_bootstrap_methods[type] = cls
|
||||
return cls
|
||||
return _register
|
||||
|
||||
|
||||
def _try_import_from_store(module, abstract_spec_str):
|
||||
"""Return True if the module can be imported from an already
|
||||
installed spec, False otherwise.
|
||||
|
||||
Args:
|
||||
module: Python module to be imported
|
||||
abstract_spec_str: abstract spec that may provide the module
|
||||
"""
|
||||
bincache_platform = spack.architecture.real_platform()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.linux.Linux()
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
abstract_spec_str = str(spack.spec.Spec(abstract_spec_str))
|
||||
|
||||
# We have to run as part of this python interpreter
|
||||
abstract_spec_str += ' ^' + spec_for_current_python()
|
||||
|
||||
installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
|
||||
|
||||
for candidate_spec in installed_specs:
|
||||
lib_spd = candidate_spec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, lib_spd),
|
||||
os.path.join(candidate_spec.prefix, lib64_spd)
|
||||
]
|
||||
sys.path.extend(module_paths)
|
||||
|
||||
try:
|
||||
_fix_ext_suffix(candidate_spec)
|
||||
if _python_import(module):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
|
||||
'provides the "{0}" Python module').format(
|
||||
module, abstract_spec_str, candidate_spec.dag_hash()
|
||||
)
|
||||
tty.debug(msg)
|
||||
return True
|
||||
except Exception as e:
|
||||
msg = ('unexpected error while trying to import module '
|
||||
'"{0}" from spec "{1}" [error="{2}"]')
|
||||
tty.warn(msg.format(module, candidate_spec, str(e)))
|
||||
else:
|
||||
msg = "Spec {0} did not provide module {1}"
|
||||
tty.warn(msg.format(candidate_spec, module))
|
||||
|
||||
sys.path = sys.path[:-2]
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _fix_ext_suffix(candidate_spec):
|
||||
"""Fix the external suffixes of Python extensions on the fly for
|
||||
platforms that may need it
|
||||
|
||||
Args:
|
||||
candidate_spec (Spec): installed spec with a Python module
|
||||
to be checked.
|
||||
"""
|
||||
# Here we map target families to the patterns expected
|
||||
# by pristine CPython. Only architectures with known issues
|
||||
# are included. Known issues:
|
||||
#
|
||||
# [RHEL + ppc64le]: https://github.com/spack/spack/issues/25734
|
||||
#
|
||||
_suffix_to_be_checked = {
|
||||
'ppc64le': {
|
||||
'glob': '*.cpython-*-powerpc64le-linux-gnu.so',
|
||||
're': r'.cpython-[\w]*-powerpc64le-linux-gnu.so',
|
||||
'fmt': r'{module}.cpython-{major}{minor}m-powerpc64le-linux-gnu.so'
|
||||
}
|
||||
}
|
||||
|
||||
# If the current architecture is not problematic return
|
||||
generic_target = archspec.cpu.host().family
|
||||
if str(generic_target) not in _suffix_to_be_checked:
|
||||
return
|
||||
|
||||
# If there's no EXT_SUFFIX (Python < 3.5) or the suffix matches
|
||||
# the expectations, return since the package is surely good
|
||||
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
||||
if ext_suffix is None:
|
||||
return
|
||||
|
||||
expected = _suffix_to_be_checked[str(generic_target)]
|
||||
if fnmatch.fnmatch(ext_suffix, expected['glob']):
|
||||
return
|
||||
|
||||
# If we are here it means the current interpreter expects different names
|
||||
# than pristine CPython. So:
|
||||
# 1. Find what we have installed
|
||||
# 2. Create symbolic links for the other names, it they're not there already
|
||||
|
||||
# Check if standard names are installed and if we have to create
|
||||
# link for this interpreter
|
||||
standard_extensions = fs.find(candidate_spec.prefix, expected['glob'])
|
||||
link_names = [re.sub(expected['re'], ext_suffix, s) for s in standard_extensions]
|
||||
for file_name, link_name in zip(standard_extensions, link_names):
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(file_name, link_name)
|
||||
|
||||
# Check if this interpreter installed something and we have to create
|
||||
# links for a standard CPython interpreter
|
||||
non_standard_extensions = fs.find(candidate_spec.prefix, '*' + ext_suffix)
|
||||
for abs_path in non_standard_extensions:
|
||||
directory, filename = os.path.split(abs_path)
|
||||
module = filename.split('.')[0]
|
||||
link_name = os.path.join(directory, expected['fmt'].format(
|
||||
module=module, major=sys.version_info[0], minor=sys.version_info[1])
|
||||
)
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(abs_path, link_name)
|
||||
|
||||
|
||||
@_bootstrapper(type='buildcache')
|
||||
class _BuildcacheBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from a buildcache."""
|
||||
def __init__(self, conf):
|
||||
self.name = conf['name']
|
||||
self.url = conf['info']['url']
|
||||
|
||||
def try_import(self, module, abstract_spec_str):
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
|
||||
# Try to install from an unsigned binary cache
|
||||
abstract_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
# On Cray we want to use Linux binaries if available from mirrors
|
||||
bincache_platform = spack.architecture.real_platform()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.Linux()
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
abstract_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
# Read information on verified clingo binaries
|
||||
json_filename = '{0}.json'.format(module)
|
||||
json_path = os.path.join(
|
||||
spack.paths.share_path, 'bootstrap', self.name, json_filename
|
||||
)
|
||||
with open(json_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
buildcache = spack.main.SpackCommand('buildcache')
|
||||
# Ensure we see only the buildcache being used to bootstrap
|
||||
mirror_scope = spack.config.InternalConfigScope(
|
||||
'bootstrap', {'mirrors:': {self.name: self.url}}
|
||||
)
|
||||
with spack.config.override(mirror_scope):
|
||||
# This index is currently needed to get the compiler used to build some
|
||||
# specs that wwe know by dag hash.
|
||||
spack.binary_distribution.binary_index.regenerate_spec_cache()
|
||||
index = spack.binary_distribution.update_cache_and_get_specs()
|
||||
for item in data['verified']:
|
||||
candidate_spec = item['spec']
|
||||
python_spec = item['python']
|
||||
# Skip specs which are not compatible
|
||||
if not abstract_spec.satisfies(candidate_spec):
|
||||
continue
|
||||
|
||||
if python_spec not in abstract_spec:
|
||||
continue
|
||||
|
||||
for pkg_name, pkg_hash, pkg_sha256 in item['binaries']:
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Try installing "{1}" from binary '
|
||||
'cache at "{2}"')
|
||||
tty.debug(msg.format(module, pkg_name, self.url))
|
||||
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
|
||||
# Reconstruct the compiler that we need to use for bootstrapping
|
||||
compiler_entry = {
|
||||
"modules": [],
|
||||
"operating_system": str(index_spec.os),
|
||||
"paths": {
|
||||
"cc": "/dev/null",
|
||||
"cxx": "/dev/null",
|
||||
"f77": "/dev/null",
|
||||
"fc": "/dev/null"
|
||||
},
|
||||
"spec": str(index_spec.compiler),
|
||||
"target": str(index_spec.target.family)
|
||||
}
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
with spack.config.override(
|
||||
'compilers', [{'compiler': compiler_entry}]
|
||||
):
|
||||
spec_str = '/' + pkg_hash
|
||||
install_args = [
|
||||
'install',
|
||||
'--sha256', pkg_sha256,
|
||||
'-a', '-u', '-o', '-f', spec_str
|
||||
]
|
||||
buildcache(*install_args, fail_on_error=False)
|
||||
# TODO: undo installations that didn't complete?
|
||||
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@_bootstrapper(type='install')
|
||||
class _SourceBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from sources."""
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
@staticmethod
|
||||
def try_import(module, abstract_spec_str):
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
|
||||
# Try to build and install from sources
|
||||
with spack_python_interpreter():
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.architecture.platform()) == 'cray':
|
||||
abstract_spec_str += ' os=fe'
|
||||
|
||||
concrete_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
if module == 'clingo':
|
||||
# TODO: remove when the old concretizer is deprecated
|
||||
concrete_spec._old_concretize()
|
||||
else:
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
||||
tty.debug(msg.format(module, abstract_spec_str))
|
||||
|
||||
# Install the spec that should make the module importable
|
||||
concrete_spec.package.do_install()
|
||||
|
||||
return _try_import_from_store(module, abstract_spec_str=abstract_spec_str)
|
||||
|
||||
|
||||
def _make_bootstrapper(conf):
|
||||
"""Return a bootstrap object built according to the
|
||||
configuration argument
|
||||
"""
|
||||
btype = conf['type']
|
||||
return _bootstrap_methods[btype](conf)
|
||||
|
||||
|
||||
def _source_is_trusted(conf):
|
||||
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
|
||||
if name not in trusted:
|
||||
return False
|
||||
return trusted[name]
|
||||
|
||||
|
||||
def spec_for_current_python():
|
||||
"""For bootstrapping purposes we are just interested in the Python
|
||||
minor version (all patches are ABI compatible with the same minor)
|
||||
and on whether ucs4 support has been enabled for Python 2.7
|
||||
|
||||
See:
|
||||
https://www.python.org/dev/peps/pep-0513/
|
||||
https://stackoverflow.com/a/35801395/771663
|
||||
"""
|
||||
version_str = '.'.join(str(x) for x in sys.version_info[:2])
|
||||
variant_str = ''
|
||||
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
|
||||
unicode_size = sysconfig.get_config_var('Py_UNICODE_SIZE')
|
||||
variant_str = '+ucs4' if unicode_size == 4 else '~ucs4'
|
||||
|
||||
spec_fmt = 'python@{0} {1}'
|
||||
return spec_fmt.format(version_str, variant_str)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def spack_python_interpreter():
|
||||
@@ -336,8 +26,11 @@ def spack_python_interpreter():
|
||||
which Spack is currently running as the only Python external spec
|
||||
available.
|
||||
"""
|
||||
python_prefix = sys.exec_prefix
|
||||
external_python = spec_for_current_python()
|
||||
python_cls = type(spack.spec.Spec('python').package)
|
||||
python_prefix = os.path.dirname(os.path.dirname(sys.executable))
|
||||
externals = python_cls.determine_spec_details(
|
||||
python_prefix, [os.path.basename(sys.executable)])
|
||||
external_python = externals[0]
|
||||
|
||||
entry = {
|
||||
'buildable': False,
|
||||
@@ -350,58 +43,68 @@ def spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def ensure_module_importable_or_raise(module, abstract_spec=None):
|
||||
"""Make the requested module available for import, or raise.
|
||||
|
||||
This function tries to import a Python module in the current interpreter
|
||||
using, in order, the methods configured in bootstrap.yaml.
|
||||
|
||||
If none of the methods succeed, an exception is raised. The function exits
|
||||
on first success.
|
||||
|
||||
Args:
|
||||
module (str): module to be imported in the current interpreter
|
||||
abstract_spec (str): abstract spec that might provide the module. If not
|
||||
given it defaults to "module"
|
||||
|
||||
Raises:
|
||||
ImportError: if the module couldn't be imported
|
||||
"""
|
||||
# If we can import it already, that's great
|
||||
tty.debug("[BOOTSTRAP MODULE {0}] Try importing from Python".format(module))
|
||||
if _python_import(module):
|
||||
return
|
||||
|
||||
abstract_spec = abstract_spec or module
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
for current_config in source_configs:
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(module, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
if b.try_import(module, abstract_spec):
|
||||
return
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(module, str(e)))
|
||||
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
|
||||
if abstract_spec:
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
def _python_import(module):
|
||||
def make_module_available(module, spec=None, install=False):
|
||||
"""Ensure module is importable"""
|
||||
# If we already can import it, that's great
|
||||
try:
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
return False
|
||||
return True
|
||||
pass
|
||||
|
||||
# If it's already installed, use it
|
||||
# Search by spec
|
||||
spec = spack.spec.Spec(spec or module)
|
||||
|
||||
# We have to run as part of this python
|
||||
# We can constrain by a shortened version in place of a version range
|
||||
# because this spec is only used for querying or as a placeholder to be
|
||||
# replaced by an external that already has a concrete version. This syntax
|
||||
# is not suffucient when concretizing without an external, as it will
|
||||
# concretize to python@X.Y instead of python@X.Y.Z
|
||||
spec.constrain('^python@%d.%d' % sys.version_info[:2])
|
||||
installed_specs = spack.store.db.query(spec, installed=True)
|
||||
|
||||
for ispec in installed_specs:
|
||||
# TODO: make sure run-environment is appropriate
|
||||
module_path = os.path.join(ispec.prefix,
|
||||
ispec['python'].package.site_packages_dir)
|
||||
module_path_64 = module_path.replace('/lib/', '/lib64/')
|
||||
try:
|
||||
sys.path.append(module_path)
|
||||
sys.path.append(module_path_64)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
tty.warn("Spec %s did not provide module %s" % (ispec, module))
|
||||
sys.path = sys.path[:-2]
|
||||
|
||||
def _raise_error(module_name, module_spec):
|
||||
error_msg = 'cannot import module "{0}"'.format(module_name)
|
||||
if module_spec:
|
||||
error_msg += ' from spec "{0}'.format(module_spec)
|
||||
raise ImportError(error_msg)
|
||||
|
||||
if not install:
|
||||
_raise_error(module, spec)
|
||||
|
||||
with spack_python_interpreter():
|
||||
# We will install for ourselves, using this python if needed
|
||||
# Concretize the spec
|
||||
spec.concretize()
|
||||
spec.package.do_install()
|
||||
|
||||
module_path = os.path.join(spec.prefix,
|
||||
spec['python'].package.site_packages_dir)
|
||||
module_path_64 = module_path.replace('/lib/', '/lib64/')
|
||||
try:
|
||||
sys.path.append(module_path)
|
||||
sys.path.append(module_path_64)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
sys.path = sys.path[:-2]
|
||||
_raise_error(module, spec)
|
||||
|
||||
|
||||
def get_executable(exe, spec=None, install=False):
|
||||
@@ -409,14 +112,13 @@ def get_executable(exe, spec=None, install=False):
|
||||
|
||||
Args:
|
||||
exe (str): needed executable name
|
||||
spec (spack.spec.Spec or str): spec to search for exe in (default exe)
|
||||
spec (Spec or str): spec to search for exe in (default exe)
|
||||
install (bool): install spec if not available
|
||||
|
||||
When ``install`` is True, Spack will use the python used to run Spack as an
|
||||
external. The ``install`` option should only be used with packages that
|
||||
install quickly (when using external python) or are guaranteed by Spack
|
||||
organization to be in a binary mirror (clingo).
|
||||
"""
|
||||
organization to be in a binary mirror (clingo)."""
|
||||
# Search the system first
|
||||
runner = spack.util.executable.which(exe)
|
||||
if runner:
|
||||
@@ -470,10 +172,7 @@ def _raise_error(executable, exe_spec):
|
||||
|
||||
|
||||
def _bootstrap_config_scopes():
|
||||
tty.debug('[BOOTSTRAP CONFIG SCOPE] name=_builtin')
|
||||
config_scopes = [
|
||||
spack.config.InternalConfigScope('_builtin', spack.config.config_defaults)
|
||||
]
|
||||
config_scopes = []
|
||||
for name, path in spack.config.configuration_paths:
|
||||
platform = spack.architecture.platform().name
|
||||
platform_scope = spack.config.ConfigScope(
|
||||
@@ -481,7 +180,7 @@ def _bootstrap_config_scopes():
|
||||
)
|
||||
generic_scope = spack.config.ConfigScope(name, path)
|
||||
config_scopes.extend([generic_scope, platform_scope])
|
||||
msg = '[BOOTSTRAP CONFIG SCOPE] name={0}, path={1}'
|
||||
msg = '[BOOSTRAP CONFIG SCOPE] name={0}, path={1}'
|
||||
tty.debug(msg.format(generic_scope.name, generic_scope.path))
|
||||
tty.debug(msg.format(platform_scope.name, platform_scope.path))
|
||||
return config_scopes
|
||||
@@ -489,59 +188,12 @@ def _bootstrap_config_scopes():
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ensure_bootstrap_configuration():
|
||||
bootstrap_store_path = store_path()
|
||||
with spack.environment.deactivate_environment():
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
# Default configuration scopes excluding command line and builtin
|
||||
# but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack.modules.disable_modules():
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def store_path():
|
||||
"""Path to the store used for bootstrapped software"""
|
||||
enabled = spack.config.get('bootstrap:enable', True)
|
||||
if not enabled:
|
||||
msg = ('bootstrapping is currently disabled. '
|
||||
'Use "spack bootstrap enable" to enable it')
|
||||
raise RuntimeError(msg)
|
||||
|
||||
bootstrap_root_path = spack.config.get(
|
||||
'bootstrap:root', spack.paths.user_bootstrap_path
|
||||
)
|
||||
bootstrap_store_path = spack.util.path.canonicalize_path(
|
||||
os.path.join(bootstrap_root_path, 'store')
|
||||
)
|
||||
return bootstrap_store_path
|
||||
|
||||
|
||||
def clingo_root_spec():
|
||||
# Construct the root spec that will be used to bootstrap clingo
|
||||
spec_str = 'clingo-bootstrap@spack+python'
|
||||
|
||||
# Add a proper compiler hint to the root spec. We use GCC for
|
||||
# everything but MacOS.
|
||||
if str(spack.architecture.platform()) == 'darwin':
|
||||
spec_str += ' %apple-clang'
|
||||
else:
|
||||
spec_str += ' %gcc'
|
||||
|
||||
# Add the generic target
|
||||
generic_target = archspec.cpu.host().family
|
||||
spec_str += ' target={0}'.format(str(generic_target))
|
||||
|
||||
tty.debug('[BOOTSTRAP ROOT SPEC] clingo: {0}'.format(spec_str))
|
||||
|
||||
return spec_str
|
||||
|
||||
|
||||
def ensure_clingo_importable_or_raise():
|
||||
"""Ensure that the clingo module is available for import."""
|
||||
ensure_module_importable_or_raise(
|
||||
module='clingo', abstract_spec=clingo_root_spec()
|
||||
)
|
||||
with spack.store.use_store(spack.paths.user_bootstrap_store):
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
@@ -33,53 +33,44 @@
|
||||
calls you can make from within the install() function.
|
||||
"""
|
||||
import inspect
|
||||
import re
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
|
||||
from six import StringIO
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import install, install_tree, mkdirp
|
||||
from llnl.util.lang import dedupe
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
from llnl.util.filesystem import mkdirp, install, install_tree
|
||||
from llnl.util.lang import dedupe
|
||||
from llnl.util.tty.log import MultiProcessFd
|
||||
|
||||
import spack.architecture as arch
|
||||
import spack.build_systems.cmake
|
||||
import spack.build_systems.meson
|
||||
import spack.config
|
||||
import spack.install_test
|
||||
import spack.main
|
||||
import spack.package
|
||||
import spack.paths
|
||||
import spack.package
|
||||
import spack.repo
|
||||
import spack.schema.environment
|
||||
import spack.store
|
||||
import spack.install_test
|
||||
import spack.subprocess_context
|
||||
import spack.user_environment
|
||||
import spack.architecture as arch
|
||||
import spack.util.path
|
||||
from spack.error import NoHeadersError, NoLibrariesError
|
||||
from spack.util.cpus import cpus_available
|
||||
from spack.util.environment import (
|
||||
EnvironmentModifications,
|
||||
env_flag,
|
||||
filter_system_paths,
|
||||
get_path,
|
||||
inspect_path,
|
||||
is_system_path,
|
||||
preserve_environment,
|
||||
system_dirs,
|
||||
validate,
|
||||
)
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.log_parse import make_log_context, parse_log_events
|
||||
from spack.util.module_cmd import load_module, module, path_from_modules
|
||||
from spack.util.string import plural
|
||||
from spack.util.environment import (
|
||||
env_flag, filter_system_paths, get_path, is_system_path,
|
||||
EnvironmentModifications, validate, preserve_environment)
|
||||
from spack.util.environment import system_dirs
|
||||
from spack.error import NoLibrariesError, NoHeadersError
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.module_cmd import load_module, path_from_modules, module
|
||||
from spack.util.log_parse import parse_log_events, make_log_context
|
||||
|
||||
|
||||
#
|
||||
# This can be set by the user to globally disable parallel builds.
|
||||
@@ -88,7 +79,7 @@
|
||||
|
||||
#
|
||||
# These environment variables are set by
|
||||
# set_wrapper_variables and used to pass parameters to
|
||||
# set_build_environment_variables and used to pass parameters to
|
||||
# Spack's compiler wrappers.
|
||||
#
|
||||
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
|
||||
@@ -169,12 +160,6 @@ def clean_environment():
|
||||
env.unset('CPLUS_INCLUDE_PATH')
|
||||
env.unset('OBJC_INCLUDE_PATH')
|
||||
|
||||
env.unset('CMAKE_PREFIX_PATH')
|
||||
|
||||
# Avoid that libraries of build dependencies get hijacked.
|
||||
env.unset('LD_PRELOAD')
|
||||
env.unset('DYLD_INSERT_LIBRARIES')
|
||||
|
||||
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
|
||||
# interference with Spack dependencies.
|
||||
# CNL requires these variables to be set (or at least some of them,
|
||||
@@ -322,20 +307,88 @@ def set_compiler_environment_variables(pkg, env):
|
||||
return env
|
||||
|
||||
|
||||
def set_wrapper_variables(pkg, env):
|
||||
"""Set environment variables used by the Spack compiler wrapper
|
||||
(which have the prefix `SPACK_`) and also add the compiler wrappers
|
||||
to PATH.
|
||||
def set_build_environment_variables(pkg, env, dirty):
|
||||
"""Ensure a clean install environment when we build packages.
|
||||
|
||||
This determines the injected -L/-I/-rpath options; each
|
||||
of these specifies a search order and this function computes these
|
||||
options in a manner that is intended to match the DAG traversal order
|
||||
in `modifications_from_dependencies`: that method uses a post-order
|
||||
traversal so that `PrependPath` actions from dependencies take lower
|
||||
precedence; we use a post-order traversal here to match the visitation
|
||||
order of `modifications_from_dependencies` (so we are visiting the
|
||||
lowest priority packages first).
|
||||
This involves unsetting pesky environment variables that may
|
||||
affect the build. It also involves setting environment variables
|
||||
used by Spack's compiler wrappers.
|
||||
|
||||
Args:
|
||||
pkg: The package we are building
|
||||
env: The build environment
|
||||
dirty (bool): Skip unsetting the user's environment settings
|
||||
"""
|
||||
# Gather information about various types of dependencies
|
||||
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
|
||||
build_link_deps = build_deps | link_deps
|
||||
rpath_deps = get_rpath_deps(pkg)
|
||||
|
||||
link_dirs = []
|
||||
include_dirs = []
|
||||
rpath_dirs = []
|
||||
|
||||
# The top-level package is always RPATHed. It hasn't been installed yet
|
||||
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
|
||||
# not be created for the install).
|
||||
for libdir in ['lib', 'lib64']:
|
||||
lib_path = os.path.join(pkg.prefix, libdir)
|
||||
rpath_dirs.append(lib_path)
|
||||
|
||||
# Set up link, include, RPATH directories that are passed to the
|
||||
# compiler wrapper
|
||||
for dep in link_deps:
|
||||
if is_system_path(dep.prefix):
|
||||
continue
|
||||
query = pkg.spec[dep.name]
|
||||
dep_link_dirs = list()
|
||||
try:
|
||||
dep_link_dirs.extend(query.libs.directories)
|
||||
except NoLibrariesError:
|
||||
tty.debug("No libraries found for {0}".format(dep.name))
|
||||
|
||||
for default_lib_dir in ['lib', 'lib64']:
|
||||
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
|
||||
if os.path.isdir(default_lib_prefix):
|
||||
dep_link_dirs.append(default_lib_prefix)
|
||||
|
||||
link_dirs.extend(dep_link_dirs)
|
||||
if dep in rpath_deps:
|
||||
rpath_dirs.extend(dep_link_dirs)
|
||||
|
||||
try:
|
||||
include_dirs.extend(query.headers.directories)
|
||||
except NoHeadersError:
|
||||
tty.debug("No headers found for {0}".format(dep.name))
|
||||
|
||||
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
|
||||
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
|
||||
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
|
||||
|
||||
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
|
||||
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
|
||||
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
|
||||
|
||||
build_prefixes = [dep.prefix for dep in build_deps]
|
||||
build_link_prefixes = [dep.prefix for dep in build_link_deps]
|
||||
|
||||
# add run-time dependencies of direct build-time dependencies:
|
||||
for build_dep in build_deps:
|
||||
for run_dep in build_dep.traverse(deptype='run'):
|
||||
build_prefixes.append(run_dep.prefix)
|
||||
|
||||
# Filter out system paths: ['/', '/usr', '/usr/local']
|
||||
# These paths can be introduced into the build when an external package
|
||||
# is added as a dependency. The problem with these paths is that they often
|
||||
# contain hundreds of other packages installed in the same directory.
|
||||
# If these paths come first, they can overshadow Spack installations.
|
||||
build_prefixes = filter_system_paths(build_prefixes)
|
||||
build_link_prefixes = filter_system_paths(build_link_prefixes)
|
||||
|
||||
# Add dependencies to CMAKE_PREFIX_PATH
|
||||
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
|
||||
|
||||
# Set environment variables if specified for
|
||||
# the given compiler
|
||||
compiler = pkg.compiler
|
||||
@@ -345,6 +398,13 @@ def set_wrapper_variables(pkg, env):
|
||||
extra_rpaths = ':'.join(compiler.extra_rpaths)
|
||||
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
|
||||
|
||||
# Add bin directories from dependencies to the PATH for the build.
|
||||
for prefix in build_prefixes:
|
||||
for dirname in ['bin', 'bin64']:
|
||||
bin_dir = os.path.join(prefix, dirname)
|
||||
if os.path.isdir(bin_dir):
|
||||
env.prepend_path('PATH', bin_dir)
|
||||
|
||||
# Add spack build environment path with compiler wrappers first in
|
||||
# the path. We add the compiler wrapper path, which includes default
|
||||
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
|
||||
@@ -364,7 +424,6 @@ def set_wrapper_variables(pkg, env):
|
||||
if os.path.isdir(ci):
|
||||
env_paths.append(ci)
|
||||
|
||||
tty.debug("Adding compiler bin/ paths: " + " ".join(env_paths))
|
||||
for item in env_paths:
|
||||
env.prepend_path('PATH', item)
|
||||
env.set_path(SPACK_ENV_PATH, env_paths)
|
||||
@@ -383,101 +442,14 @@ def set_wrapper_variables(pkg, env):
|
||||
raise RuntimeError("No ccache binary found in PATH")
|
||||
env.set(SPACK_CCACHE_BINARY, ccache)
|
||||
|
||||
# Gather information about various types of dependencies
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
|
||||
rpath_deps = get_rpath_deps(pkg)
|
||||
# Add any pkgconfig directories to PKG_CONFIG_PATH
|
||||
for prefix in build_link_prefixes:
|
||||
for directory in ('lib', 'lib64', 'share'):
|
||||
pcdir = os.path.join(prefix, directory, 'pkgconfig')
|
||||
if os.path.isdir(pcdir):
|
||||
env.prepend_path('PKG_CONFIG_PATH', pcdir)
|
||||
|
||||
link_dirs = []
|
||||
include_dirs = []
|
||||
rpath_dirs = []
|
||||
|
||||
def _prepend_all(list_to_modify, items_to_add):
|
||||
# Update the original list (creating a new list would be faster but
|
||||
# may not be convenient)
|
||||
for item in reversed(list(items_to_add)):
|
||||
list_to_modify.insert(0, item)
|
||||
|
||||
def update_compiler_args_for_dep(dep):
|
||||
if dep in link_deps and (not is_system_path(dep.prefix)):
|
||||
query = pkg.spec[dep.name]
|
||||
dep_link_dirs = list()
|
||||
try:
|
||||
dep_link_dirs.extend(query.libs.directories)
|
||||
except NoLibrariesError:
|
||||
tty.debug("No libraries found for {0}".format(dep.name))
|
||||
|
||||
for default_lib_dir in ['lib', 'lib64']:
|
||||
default_lib_prefix = os.path.join(
|
||||
dep.prefix, default_lib_dir)
|
||||
if os.path.isdir(default_lib_prefix):
|
||||
dep_link_dirs.append(default_lib_prefix)
|
||||
|
||||
_prepend_all(link_dirs, dep_link_dirs)
|
||||
if dep in rpath_deps:
|
||||
_prepend_all(rpath_dirs, dep_link_dirs)
|
||||
|
||||
try:
|
||||
_prepend_all(include_dirs, query.headers.directories)
|
||||
except NoHeadersError:
|
||||
tty.debug("No headers found for {0}".format(dep.name))
|
||||
|
||||
for dspec in pkg.spec.traverse(root=False, order='post'):
|
||||
if dspec.external:
|
||||
update_compiler_args_for_dep(dspec)
|
||||
|
||||
# Just above, we prepended entries for -L/-rpath for externals. We
|
||||
# now do this for non-external packages so that Spack-built packages
|
||||
# are searched first for libraries etc.
|
||||
for dspec in pkg.spec.traverse(root=False, order='post'):
|
||||
if not dspec.external:
|
||||
update_compiler_args_for_dep(dspec)
|
||||
|
||||
# The top-level package is always RPATHed. It hasn't been installed yet
|
||||
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
|
||||
# not be created for the install).
|
||||
for libdir in ['lib64', 'lib']:
|
||||
lib_path = os.path.join(pkg.prefix, libdir)
|
||||
rpath_dirs.insert(0, lib_path)
|
||||
|
||||
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
|
||||
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
|
||||
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
|
||||
|
||||
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
|
||||
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
|
||||
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
|
||||
|
||||
|
||||
def determine_number_of_jobs(
|
||||
parallel=False, command_line=None, config_default=None, max_cpus=None):
|
||||
"""
|
||||
Packages that require sequential builds need 1 job. Otherwise we use the
|
||||
number of jobs set on the command line. If not set, then we use the config
|
||||
defaults (which is usually set through the builtin config scope), but we
|
||||
cap to the number of CPUs available to avoid oversubscription.
|
||||
|
||||
Parameters:
|
||||
parallel (bool or None): true when package supports parallel builds
|
||||
command_line (int or None): command line override
|
||||
config_default (int or None): config default number of jobs
|
||||
max_cpus (int or None): maximum number of CPUs available. When None, this
|
||||
value is automatically determined.
|
||||
"""
|
||||
if not parallel:
|
||||
return 1
|
||||
|
||||
if command_line is None and 'command_line' in spack.config.scopes():
|
||||
command_line = spack.config.get('config:build_jobs', scope='command_line')
|
||||
|
||||
if command_line is not None:
|
||||
return command_line
|
||||
|
||||
max_cpus = max_cpus or cpus_available()
|
||||
|
||||
# in some rare cases _builtin config may not be set, so default to max 16
|
||||
config_default = config_default or spack.config.get('config:build_jobs', 16)
|
||||
|
||||
return min(max_cpus, config_default)
|
||||
return env
|
||||
|
||||
|
||||
def _set_variables_for_single_module(pkg, module):
|
||||
@@ -488,7 +460,8 @@ def _set_variables_for_single_module(pkg, module):
|
||||
if getattr(module, marker, False):
|
||||
return
|
||||
|
||||
jobs = determine_number_of_jobs(parallel=pkg.parallel)
|
||||
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
|
||||
jobs = min(jobs, multiprocessing.cpu_count())
|
||||
|
||||
m = module
|
||||
m.make_jobs = jobs
|
||||
@@ -687,14 +660,14 @@ def get_std_cmake_args(pkg):
|
||||
"""List of standard arguments used if a package is a CMakePackage.
|
||||
|
||||
Returns:
|
||||
list: standard arguments that would be used if this
|
||||
list of str: standard arguments that would be used if this
|
||||
package were a CMakePackage instance.
|
||||
|
||||
Args:
|
||||
pkg (spack.package.PackageBase): package under consideration
|
||||
pkg (PackageBase): package under consideration
|
||||
|
||||
Returns:
|
||||
list: arguments for cmake
|
||||
list of str: arguments for cmake
|
||||
"""
|
||||
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
|
||||
|
||||
@@ -703,14 +676,14 @@ def get_std_meson_args(pkg):
|
||||
"""List of standard arguments used if a package is a MesonPackage.
|
||||
|
||||
Returns:
|
||||
list: standard arguments that would be used if this
|
||||
list of str: standard arguments that would be used if this
|
||||
package were a MesonPackage instance.
|
||||
|
||||
Args:
|
||||
pkg (spack.package.PackageBase): package under consideration
|
||||
pkg (PackageBase): package under consideration
|
||||
|
||||
Returns:
|
||||
list: arguments for meson
|
||||
list of str: arguments for meson
|
||||
"""
|
||||
return spack.build_systems.meson.MesonPackage._std_args(pkg)
|
||||
|
||||
@@ -740,7 +713,7 @@ def load_external_modules(pkg):
|
||||
associated with them.
|
||||
|
||||
Args:
|
||||
pkg (spack.package.PackageBase): package to load deps for
|
||||
pkg (PackageBase): package to load deps for
|
||||
"""
|
||||
for dep in list(pkg.spec.traverse()):
|
||||
external_modules = dep.external_modules or []
|
||||
@@ -750,47 +723,42 @@ def load_external_modules(pkg):
|
||||
|
||||
def setup_package(pkg, dirty, context='build'):
|
||||
"""Execute all environment setup routines."""
|
||||
if context not in ['build', 'test']:
|
||||
raise ValueError(
|
||||
"'context' must be one of ['build', 'test'] - got: {0}"
|
||||
.format(context))
|
||||
|
||||
set_module_variables_for_package(pkg)
|
||||
|
||||
env = EnvironmentModifications()
|
||||
|
||||
if not dirty:
|
||||
clean_environment()
|
||||
|
||||
# setup compilers for build contexts
|
||||
# setup compilers and build tools for build contexts
|
||||
need_compiler = context == 'build' or (context == 'test' and
|
||||
pkg.test_requires_compiler)
|
||||
if need_compiler:
|
||||
set_compiler_environment_variables(pkg, env)
|
||||
set_wrapper_variables(pkg, env)
|
||||
|
||||
env.extend(modifications_from_dependencies(
|
||||
pkg.spec, context, custom_mods_only=False))
|
||||
set_build_environment_variables(pkg, env, dirty)
|
||||
|
||||
# architecture specific setup
|
||||
pkg.architecture.platform.setup_platform_environment(pkg, env)
|
||||
|
||||
if context == 'build':
|
||||
pkg.setup_build_environment(env)
|
||||
# recursive post-order dependency information
|
||||
env.extend(
|
||||
modifications_from_dependencies(pkg.spec, context=context)
|
||||
)
|
||||
|
||||
if (not dirty) and (not env.is_unset('CPATH')):
|
||||
tty.debug("A dependency has updated CPATH, this may lead pkg-"
|
||||
"config to assume that the package is part of the system"
|
||||
" includes and omit it when invoked with '--cflags'.")
|
||||
|
||||
# setup package itself
|
||||
set_module_variables_for_package(pkg)
|
||||
pkg.setup_build_environment(env)
|
||||
elif context == 'test':
|
||||
import spack.user_environment as uenv # avoid circular import
|
||||
env.extend(uenv.environment_modifications_for_spec(pkg.spec))
|
||||
env.extend(
|
||||
inspect_path(
|
||||
pkg.spec.prefix,
|
||||
spack.user_environment.prefix_inspections(pkg.spec.platform),
|
||||
exclude=is_system_path
|
||||
)
|
||||
modifications_from_dependencies(pkg.spec, context=context)
|
||||
)
|
||||
pkg.setup_run_environment(env)
|
||||
set_module_variables_for_package(pkg)
|
||||
env.prepend_path('PATH', '.')
|
||||
|
||||
# Loading modules, in particular if they are meant to be used outside
|
||||
@@ -832,173 +800,39 @@ def setup_package(pkg, dirty, context='build'):
|
||||
env.apply_modifications()
|
||||
|
||||
|
||||
def _make_runnable(pkg, env):
|
||||
# Helper method which prepends a Package's bin/ prefix to the PATH
|
||||
# environment variable
|
||||
prefix = pkg.prefix
|
||||
|
||||
for dirname in ['bin', 'bin64']:
|
||||
bin_dir = os.path.join(prefix, dirname)
|
||||
if os.path.isdir(bin_dir):
|
||||
env.prepend_path('PATH', bin_dir)
|
||||
|
||||
|
||||
def modifications_from_dependencies(spec, context, custom_mods_only=True):
|
||||
def modifications_from_dependencies(spec, context):
|
||||
"""Returns the environment modifications that are required by
|
||||
the dependencies of a spec and also applies modifications
|
||||
to this spec's package at module scope, if need be.
|
||||
|
||||
Environment modifications include:
|
||||
|
||||
- Updating PATH so that executables can be found
|
||||
- Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective
|
||||
tools can find Spack-built dependencies
|
||||
- Running custom package environment modifications
|
||||
|
||||
Custom package modifications can conflict with the default PATH changes
|
||||
we make (specifically for the PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH
|
||||
environment variables), so this applies changes in a fixed order:
|
||||
|
||||
- All modifications (custom and default) from external deps first
|
||||
- All modifications from non-external deps afterwards
|
||||
|
||||
With that order, `PrependPath` actions from non-external default
|
||||
environment modifications will take precedence over custom modifications
|
||||
from external packages.
|
||||
|
||||
A secondary constraint is that custom and default modifications are
|
||||
grouped on a per-package basis: combined with the post-order traversal this
|
||||
means that default modifications of dependents can override custom
|
||||
modifications of dependencies (again, this would only occur for PATH,
|
||||
CMAKE_PREFIX_PATH, or PKG_CONFIG_PATH).
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): spec for which we want the modifications
|
||||
spec (Spec): spec for which we want the modifications
|
||||
context (str): either 'build' for build-time modifications or 'run'
|
||||
for run-time modifications
|
||||
"""
|
||||
if context not in ['build', 'run', 'test']:
|
||||
raise ValueError(
|
||||
"Expecting context to be one of ['build', 'run', 'test'], "
|
||||
"got: {0}".format(context))
|
||||
|
||||
env = EnvironmentModifications()
|
||||
pkg = spec.package
|
||||
|
||||
# Note: see computation of 'custom_mod_deps' and 'exe_deps' later in this
|
||||
# function; these sets form the building blocks of those collections.
|
||||
build_deps = set(spec.dependencies(deptype=('build', 'test')))
|
||||
link_deps = set(spec.traverse(root=False, deptype='link'))
|
||||
build_link_deps = build_deps | link_deps
|
||||
build_and_supporting_deps = set()
|
||||
for build_dep in build_deps:
|
||||
build_and_supporting_deps.update(build_dep.traverse(deptype='run'))
|
||||
run_and_supporting_deps = set(
|
||||
spec.traverse(root=False, deptype=('run', 'link')))
|
||||
test_and_supporting_deps = set()
|
||||
for test_dep in set(spec.dependencies(deptype='test')):
|
||||
test_and_supporting_deps.update(test_dep.traverse(deptype='run'))
|
||||
# Maps the context to deptype and method to be called
|
||||
deptype_and_method = {
|
||||
'build': (('build', 'link', 'test'),
|
||||
'setup_dependent_build_environment'),
|
||||
'run': (('link', 'run'), 'setup_dependent_run_environment'),
|
||||
'test': (('link', 'run', 'test'), 'setup_dependent_run_environment')
|
||||
}
|
||||
deptype, method = deptype_and_method[context]
|
||||
|
||||
# All dependencies that might have environment modifications to apply
|
||||
custom_mod_deps = set()
|
||||
if context == 'build':
|
||||
custom_mod_deps.update(build_and_supporting_deps)
|
||||
# Tests may be performed after build
|
||||
custom_mod_deps.update(test_and_supporting_deps)
|
||||
else:
|
||||
# test/run context
|
||||
custom_mod_deps.update(run_and_supporting_deps)
|
||||
if context == 'test':
|
||||
custom_mod_deps.update(test_and_supporting_deps)
|
||||
custom_mod_deps.update(link_deps)
|
||||
|
||||
# Determine 'exe_deps': the set of packages with binaries we want to use
|
||||
if context == 'build':
|
||||
exe_deps = build_and_supporting_deps | test_and_supporting_deps
|
||||
elif context == 'run':
|
||||
exe_deps = set(spec.traverse(deptype='run'))
|
||||
elif context == 'test':
|
||||
exe_deps = test_and_supporting_deps
|
||||
|
||||
def default_modifications_for_dep(dep):
|
||||
if (dep in build_link_deps and
|
||||
not is_system_path(dep.prefix) and
|
||||
context == 'build'):
|
||||
prefix = dep.prefix
|
||||
|
||||
env.prepend_path('CMAKE_PREFIX_PATH', prefix)
|
||||
|
||||
for directory in ('lib', 'lib64', 'share'):
|
||||
pcdir = os.path.join(prefix, directory, 'pkgconfig')
|
||||
if os.path.isdir(pcdir):
|
||||
env.prepend_path('PKG_CONFIG_PATH', pcdir)
|
||||
|
||||
if dep in exe_deps and not is_system_path(dep.prefix):
|
||||
_make_runnable(dep, env)
|
||||
|
||||
def add_modifications_for_dep(dep):
|
||||
# Some callers of this function only want the custom modifications.
|
||||
# For callers that want both custom and default modifications, we want
|
||||
# to perform the default modifications here (this groups custom
|
||||
# and default modifications together on a per-package basis).
|
||||
if not custom_mods_only:
|
||||
default_modifications_for_dep(dep)
|
||||
|
||||
# Perform custom modifications here (PrependPath actions performed in
|
||||
# the custom method override the default environment modifications
|
||||
# we do to help the build, namely for PATH, CMAKE_PREFIX_PATH, and
|
||||
# PKG_CONFIG_PATH)
|
||||
if dep in custom_mod_deps:
|
||||
dpkg = dep.package
|
||||
set_module_variables_for_package(dpkg)
|
||||
# Allow dependencies to modify the module
|
||||
dpkg.setup_dependent_package(spec.package.module, spec)
|
||||
if context == 'build':
|
||||
dpkg.setup_dependent_build_environment(env, spec)
|
||||
else:
|
||||
dpkg.setup_dependent_run_environment(env, spec)
|
||||
|
||||
# Note that we want to perform environment modifications in a fixed order.
|
||||
# The Spec.traverse method provides this: i.e. in addition to
|
||||
# the post-order semantics, it also guarantees a fixed traversal order
|
||||
# among dependencies which are not constrained by post-order semantics.
|
||||
for dspec in spec.traverse(root=False, order='post'):
|
||||
if dspec.external:
|
||||
add_modifications_for_dep(dspec)
|
||||
|
||||
for dspec in spec.traverse(root=False, order='post'):
|
||||
# Default env modifications for non-external packages can override
|
||||
# custom modifications of external packages (this can only occur
|
||||
# for modifications to PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH)
|
||||
if not dspec.external:
|
||||
add_modifications_for_dep(dspec)
|
||||
root = context == 'test'
|
||||
for dspec in spec.traverse(order='post', root=root, deptype=deptype):
|
||||
dpkg = dspec.package
|
||||
set_module_variables_for_package(dpkg)
|
||||
# Allow dependencies to modify the module
|
||||
dpkg.setup_dependent_package(pkg.module, spec)
|
||||
getattr(dpkg, method)(env, spec)
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg):
|
||||
# Note that unlike modifications_from_dependencies, this does not include
|
||||
# any edits to CMAKE_PREFIX_PATH defined in custom
|
||||
# setup_dependent_build_environment implementations of dependency packages
|
||||
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
|
||||
build_link_deps = build_deps | link_deps
|
||||
spack_built = []
|
||||
externals = []
|
||||
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
|
||||
# prepending all externals and then all non-externals
|
||||
for dspec in pkg.spec.traverse(root=False, order='post'):
|
||||
if dspec in build_link_deps:
|
||||
if dspec.external:
|
||||
externals.insert(0, dspec)
|
||||
else:
|
||||
spack_built.insert(0, dspec)
|
||||
|
||||
ordered_build_link_deps = spack_built + externals
|
||||
build_link_prefixes = filter_system_paths(
|
||||
x.prefix for x in ordered_build_link_deps)
|
||||
return build_link_prefixes
|
||||
|
||||
|
||||
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
|
||||
input_multiprocess_fd):
|
||||
|
||||
@@ -1071,9 +905,9 @@ def start_build_process(pkg, function, kwargs):
|
||||
|
||||
Args:
|
||||
|
||||
pkg (spack.package.PackageBase): package whose environment we should set up the
|
||||
pkg (PackageBase): package whose environment we should set up the
|
||||
child process for.
|
||||
function (typing.Callable): argless function to run in the child
|
||||
function (callable): argless function to run in the child
|
||||
process.
|
||||
|
||||
Usage::
|
||||
@@ -1158,7 +992,7 @@ def get_package_context(traceback, context=3):
|
||||
"""Return some context for an error message when the build fails.
|
||||
|
||||
Args:
|
||||
traceback: A traceback from some exception raised during
|
||||
traceback (traceback): A traceback from some exception raised during
|
||||
install
|
||||
|
||||
context (int): Lines of context to show before and after the line
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# Why doesn't this work for me?
|
||||
# from spack import *
|
||||
from llnl.util.filesystem import filter_file
|
||||
|
||||
from spack.build_systems.autotools import AutotoolsPackage
|
||||
from spack.directives import extends
|
||||
from spack.package import ExtensionError
|
||||
|
||||
@@ -6,14 +6,13 @@
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import stat
|
||||
from subprocess import PIPE, check_call
|
||||
from subprocess import PIPE
|
||||
from subprocess import check_call
|
||||
from typing import List # novm
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import force_remove, working_dir
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util.filesystem import working_dir, force_remove
|
||||
from spack.package import PackageBase, run_after, run_before
|
||||
from spack.util.executable import Executable
|
||||
|
||||
@@ -30,7 +29,7 @@ class AutotoolsPackage(PackageBase):
|
||||
|
||||
They all have sensible defaults and for many packages the only thing
|
||||
necessary will be to override the helper method
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.configure_args`.
|
||||
:py:meth:`~.AutotoolsPackage.configure_args`.
|
||||
For a finer tuning you may also override:
|
||||
|
||||
+-----------------------------------------------+--------------------+
|
||||
@@ -175,10 +174,7 @@ def runs_ok(script_abs_path):
|
||||
# Copy the good files over the bad ones
|
||||
for abs_path in to_be_patched:
|
||||
name = os.path.basename(abs_path)
|
||||
mode = os.stat(abs_path).st_mode
|
||||
os.chmod(abs_path, stat.S_IWUSR)
|
||||
fs.copy(substitutes[name], abs_path)
|
||||
os.chmod(abs_path, mode)
|
||||
|
||||
@run_before('configure')
|
||||
def _set_autotools_environment_variables(self):
|
||||
@@ -331,7 +327,7 @@ def flags_to_build_system_args(self, flags):
|
||||
|
||||
def configure(self, spec, prefix):
|
||||
"""Runs configure with the arguments specified in
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.configure_args`
|
||||
:py:meth:`~.AutotoolsPackage.configure_args`
|
||||
and an appropriately set prefix.
|
||||
"""
|
||||
options = getattr(self, 'configure_flag_args', [])
|
||||
@@ -345,11 +341,8 @@ def build(self, spec, prefix):
|
||||
"""Makes the build targets specified by
|
||||
:py:attr:``~.AutotoolsPackage.build_targets``
|
||||
"""
|
||||
# See https://autotools.io/automake/silent.html
|
||||
params = ['V=1']
|
||||
params += self.build_targets
|
||||
with working_dir(self.build_directory):
|
||||
inspect.getmodule(self).make(*params)
|
||||
inspect.getmodule(self).make(*self.build_targets)
|
||||
|
||||
def install(self, spec, prefix):
|
||||
"""Makes the install targets specified by
|
||||
@@ -376,8 +369,8 @@ def _activate_or_not(
|
||||
activation_value=None
|
||||
):
|
||||
"""This function contains the current implementation details of
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without` and
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.enable_or_disable`.
|
||||
:py:meth:`~.AutotoolsPackage.with_or_without` and
|
||||
:py:meth:`~.AutotoolsPackage.enable_or_disable`.
|
||||
|
||||
Args:
|
||||
name (str): name of the variant that is being processed
|
||||
@@ -385,7 +378,7 @@ def _activate_or_not(
|
||||
case of ``with_or_without``)
|
||||
deactivation_word (str): the default deactivation word ('without'
|
||||
in the case of ``with_or_without``)
|
||||
activation_value (typing.Callable): callable that accepts a single
|
||||
activation_value (callable): callable that accepts a single
|
||||
value. This value is either one of the allowed values for a
|
||||
multi-valued variant or the name of a bool-valued variant.
|
||||
Returns the parameter to be used when the value is activated.
|
||||
@@ -420,7 +413,7 @@ def _activate_or_not(
|
||||
for ``<spec-name> foo=x +bar``
|
||||
|
||||
Returns:
|
||||
list: list of strings that corresponds to the activation/deactivation
|
||||
list of strings that corresponds to the activation/deactivation
|
||||
of the variant that has been processed
|
||||
|
||||
Raises:
|
||||
@@ -501,7 +494,7 @@ def with_or_without(self, name, activation_value=None):
|
||||
|
||||
Args:
|
||||
name (str): name of a valid multi-valued variant
|
||||
activation_value (typing.Callable): callable that accepts a single
|
||||
activation_value (callable): callable that accepts a single
|
||||
value and returns the parameter to be used leading to an entry
|
||||
of the type ``--with-{name}={parameter}``.
|
||||
|
||||
@@ -514,13 +507,12 @@ def with_or_without(self, name, activation_value=None):
|
||||
return self._activate_or_not(name, 'with', 'without', activation_value)
|
||||
|
||||
def enable_or_disable(self, name, activation_value=None):
|
||||
"""Same as
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without`
|
||||
but substitute ``with`` with ``enable`` and ``without`` with ``disable``.
|
||||
"""Same as :py:meth:`~.AutotoolsPackage.with_or_without` but substitute
|
||||
``with`` with ``enable`` and ``without`` with ``disable``.
|
||||
|
||||
Args:
|
||||
name (str): name of a valid multi-valued variant
|
||||
activation_value (typing.Callable): if present accepts a single value
|
||||
activation_value (callable): if present accepts a single value
|
||||
and returns the parameter to be used leading to an entry of the
|
||||
type ``--enable-{name}={parameter}``
|
||||
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import install, mkdirp
|
||||
|
||||
from spack.build_systems.cmake import CMakePackage
|
||||
from spack.package import run_after
|
||||
|
||||
|
||||
def cmake_cache_path(name, value, comment=""):
|
||||
"""Generate a string for a cmake cache variable"""
|
||||
return 'set({0} "{1}" CACHE PATH "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
def cmake_cache_string(name, value, comment=""):
|
||||
"""Generate a string for a cmake cache variable"""
|
||||
return 'set({0} "{1}" CACHE STRING "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
def cmake_cache_option(name, boolean_value, comment=""):
|
||||
"""Generate a string for a cmake configuration option"""
|
||||
|
||||
value = "ON" if boolean_value else "OFF"
|
||||
return 'set({0} {1} CACHE BOOL "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
class CachedCMakePackage(CMakePackage):
|
||||
"""Specialized class for packages built using CMake initial cache.
|
||||
|
||||
This feature of CMake allows packages to increase reproducibility,
|
||||
especially between Spack- and manual builds. It also allows packages to
|
||||
sidestep certain parsing bugs in extremely long ``cmake`` commands, and to
|
||||
avoid system limits on the length of the command line."""
|
||||
|
||||
phases = ['initconfig', 'cmake', 'build', 'install']
|
||||
|
||||
@property
|
||||
def cache_name(self):
|
||||
return "{0}-{1}-{2}@{3}.cmake".format(
|
||||
self.name,
|
||||
self.spec.architecture,
|
||||
self.spec.compiler.name,
|
||||
self.spec.compiler.version,
|
||||
)
|
||||
|
||||
@property
|
||||
def cache_path(self):
|
||||
return os.path.join(self.stage.source_path, self.cache_name)
|
||||
|
||||
def flag_handler(self, name, flags):
|
||||
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
|
||||
return (None, None, None) # handled in the cmake cache
|
||||
return (flags, None, None)
|
||||
|
||||
def initconfig_compiler_entries(self):
|
||||
# This will tell cmake to use the Spack compiler wrappers when run
|
||||
# through Spack, but use the underlying compiler when run outside of
|
||||
# Spack
|
||||
spec = self.spec
|
||||
|
||||
# Fortran compiler is optional
|
||||
if "FC" in os.environ:
|
||||
spack_fc_entry = cmake_cache_path(
|
||||
"CMAKE_Fortran_COMPILER", os.environ['FC'])
|
||||
system_fc_entry = cmake_cache_path(
|
||||
"CMAKE_Fortran_COMPILER", self.compiler.fc)
|
||||
else:
|
||||
spack_fc_entry = "# No Fortran compiler defined in spec"
|
||||
system_fc_entry = "# No Fortran compiler defined in spec"
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Compilers",
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Compiler Spec: {0}".format(spec.compiler),
|
||||
"#------------------{0}".format("-" * 60),
|
||||
'if(DEFINED ENV{SPACK_CC})\n',
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_C_COMPILER", os.environ['CC']),
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_CXX_COMPILER", os.environ['CXX']),
|
||||
' ' + spack_fc_entry,
|
||||
'else()\n',
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_C_COMPILER", self.compiler.cc),
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_CXX_COMPILER", self.compiler.cxx),
|
||||
' ' + system_fc_entry,
|
||||
'endif()\n'
|
||||
]
|
||||
|
||||
# use global spack compiler flags
|
||||
cppflags = ' '.join(spec.compiler_flags['cppflags'])
|
||||
if cppflags:
|
||||
# avoid always ending up with ' ' with no flags defined
|
||||
cppflags += ' '
|
||||
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
|
||||
if cflags:
|
||||
entries.append(cmake_cache_string("CMAKE_C_FLAGS", cflags))
|
||||
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
|
||||
if cxxflags:
|
||||
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS", cxxflags))
|
||||
fflags = ' '.join(spec.compiler_flags['fflags'])
|
||||
if fflags:
|
||||
entries.append(cmake_cache_string("CMAKE_Fortran_FLAGS", fflags))
|
||||
|
||||
return entries
|
||||
|
||||
def initconfig_mpi_entries(self):
|
||||
spec = self.spec
|
||||
|
||||
if not spec.satisfies('^mpi'):
|
||||
return []
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# MPI",
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
entries.append(cmake_cache_path("MPI_C_COMPILER",
|
||||
spec['mpi'].mpicc))
|
||||
entries.append(cmake_cache_path("MPI_CXX_COMPILER",
|
||||
spec['mpi'].mpicxx))
|
||||
entries.append(cmake_cache_path("MPI_Fortran_COMPILER",
|
||||
spec['mpi'].mpifc))
|
||||
|
||||
# Check for slurm
|
||||
using_slurm = False
|
||||
slurm_checks = ['+slurm',
|
||||
'schedulers=slurm',
|
||||
'process_managers=slurm']
|
||||
if any(spec['mpi'].satisfies(variant) for variant in slurm_checks):
|
||||
using_slurm = True
|
||||
|
||||
# Determine MPIEXEC
|
||||
if using_slurm:
|
||||
if spec['mpi'].external:
|
||||
# Heuristic until we have dependents on externals
|
||||
mpiexec = '/usr/bin/srun'
|
||||
else:
|
||||
mpiexec = os.path.join(spec['slurm'].prefix.bin, 'srun')
|
||||
else:
|
||||
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpirun')
|
||||
if not os.path.exists(mpiexec):
|
||||
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpiexec')
|
||||
|
||||
if not os.path.exists(mpiexec):
|
||||
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.name
|
||||
entries.append("# {0}\n".format(msg))
|
||||
tty.warn(msg)
|
||||
else:
|
||||
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
|
||||
# vs the older versions which expect MPIEXEC
|
||||
if self.spec["cmake"].satisfies('@3.10:'):
|
||||
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE",
|
||||
mpiexec))
|
||||
else:
|
||||
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
|
||||
|
||||
# Determine MPIEXEC_NUMPROC_FLAG
|
||||
if using_slurm:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-n"))
|
||||
else:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-np"))
|
||||
|
||||
return entries
|
||||
|
||||
def initconfig_hardware_entries(self):
|
||||
spec = self.spec
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Hardware",
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
if spec.satisfies('^cuda'):
|
||||
entries.append("#------------------{0}".format("-" * 30))
|
||||
entries.append("# Cuda")
|
||||
entries.append("#------------------{0}\n".format("-" * 30))
|
||||
|
||||
cudatoolkitdir = spec['cuda'].prefix
|
||||
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR",
|
||||
cudatoolkitdir))
|
||||
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER",
|
||||
cudacompiler))
|
||||
|
||||
if spec.satisfies('^mpi'):
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
|
||||
"${MPI_CXX_COMPILER}"))
|
||||
else:
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
|
||||
"${CMAKE_CXX_COMPILER}"))
|
||||
|
||||
return entries
|
||||
|
||||
def std_initconfig_entries(self):
|
||||
return [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# !!!! This is a generated file, edit at own risk !!!!",
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# CMake executable path: {0}".format(
|
||||
self.spec['cmake'].command.path),
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
def initconfig(self, spec, prefix):
|
||||
cache_entries = (self.std_initconfig_entries() +
|
||||
self.initconfig_compiler_entries() +
|
||||
self.initconfig_mpi_entries() +
|
||||
self.initconfig_hardware_entries() +
|
||||
self.initconfig_package_entries())
|
||||
|
||||
with open(self.cache_name, 'w') as f:
|
||||
for entry in cache_entries:
|
||||
f.write('%s\n' % entry)
|
||||
f.write('\n')
|
||||
|
||||
@property
|
||||
def std_cmake_args(self):
|
||||
args = super(CachedCMakePackage, self).std_cmake_args
|
||||
args.extend(['-C', self.cache_path])
|
||||
return args
|
||||
|
||||
@run_after('install')
|
||||
def install_cmake_cache(self):
|
||||
mkdirp(self.spec.prefix.share.cmake)
|
||||
install(self.cache_path, self.spec.prefix.share.cmake)
|
||||
@@ -10,11 +10,11 @@
|
||||
import re
|
||||
from typing import List # novm
|
||||
|
||||
from llnl.util.filesystem import working_dir
|
||||
|
||||
import spack.build_environment
|
||||
from spack.directives import conflicts, depends_on, variant
|
||||
from spack.package import InstallError, PackageBase, run_after
|
||||
from llnl.util.filesystem import working_dir
|
||||
from spack.util.environment import filter_system_paths
|
||||
from spack.directives import depends_on, variant, conflicts
|
||||
from spack.package import PackageBase, InstallError, run_after
|
||||
|
||||
# Regex to extract the primary generator from the CMake generator
|
||||
# string.
|
||||
@@ -185,9 +185,13 @@ def _std_args(pkg):
|
||||
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', False),
|
||||
define('CMAKE_INSTALL_RPATH',
|
||||
spack.build_environment.get_rpaths(pkg)),
|
||||
define('CMAKE_PREFIX_PATH',
|
||||
spack.build_environment.get_cmake_prefix_path(pkg))
|
||||
])
|
||||
# CMake's find_package() looks in CMAKE_PREFIX_PATH first, help CMake
|
||||
# to find immediate link dependencies in right places:
|
||||
deps = [d.prefix for d in
|
||||
pkg.spec.dependencies(deptype=('build', 'link'))]
|
||||
deps = filter_system_paths(deps)
|
||||
args.append(define('CMAKE_PREFIX_PATH', deps))
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
@@ -236,7 +240,7 @@ def define_from_variant(self, cmake_var, variant=None):
|
||||
of ``cmake_var``.
|
||||
|
||||
This utility function is similar to
|
||||
:meth:`~spack.build_systems.autotools.AutotoolsPackage.with_or_without`.
|
||||
:py:meth:`~.AutotoolsPackage.with_or_without`.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -254,9 +258,9 @@ def define_from_variant(self, cmake_var, variant=None):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
self.define_from_variant('SWR')]
|
||||
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
define_from_variant('SWR')]
|
||||
|
||||
will generate the following configuration options:
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import spack.variant
|
||||
from spack.directives import conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.package import PackageBase
|
||||
from spack.directives import depends_on, variant, conflicts
|
||||
|
||||
import spack.variant
|
||||
|
||||
|
||||
class CudaPackage(PackageBase):
|
||||
@@ -79,111 +79,108 @@ def cuda_flags(arch_list):
|
||||
depends_on('cuda@11.0:', when='cuda_arch=80')
|
||||
depends_on('cuda@11.1:', when='cuda_arch=86')
|
||||
|
||||
# From the NVIDIA install guide we know of conflicts for particular
|
||||
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
|
||||
# (gcc, clang). We don't restrict %gcc and %clang conflicts to
|
||||
# platform=linux, since they should also apply to platform=cray, and may
|
||||
# apply to platform=darwin. We currently do not provide conflicts for
|
||||
# platform=darwin with %apple-clang.
|
||||
|
||||
# GCC
|
||||
# According to
|
||||
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
|
||||
# these conflicts are valid independently from the architecture
|
||||
|
||||
# minimum supported versions
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
|
||||
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
|
||||
|
||||
# maximum supported version
|
||||
# NOTE:
|
||||
# in order to not constrain future cuda version to old gcc versions,
|
||||
# it has been decided to use an upper bound for the latest version.
|
||||
# This implies that the last one in the list has to be updated at
|
||||
# each release of a new cuda minor version.
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4')
|
||||
|
||||
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
|
||||
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
|
||||
# There are at least three cases to be aware of for compiler conflicts
|
||||
# 1. Linux x86_64
|
||||
# 2. Linux ppc64le
|
||||
# 3. Mac OS X
|
||||
# CUDA-compiler conflicts are version-to-version specific and are
|
||||
# difficult to express with the current Spack conflict syntax
|
||||
|
||||
# Linux x86_64 compiler conflicts from here:
|
||||
# https://gist.github.com/ax3l/9489132
|
||||
with when('~allow-unsupported-compilers'):
|
||||
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
|
||||
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
|
||||
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
|
||||
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
|
||||
arch_platform = ' target=x86_64: platform=linux'
|
||||
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5' + arch_platform)
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:8' + arch_platform)
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2:' + arch_platform)
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0' + arch_platform)
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27' + arch_platform)
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5' + arch_platform)
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8' + arch_platform)
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1' + arch_platform)
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10' + arch_platform)
|
||||
conflicts('%pgi@:17,20:',
|
||||
when='+cuda ^cuda@10.1.105:10.2.89' + arch_platform)
|
||||
conflicts('%pgi@:17,21:',
|
||||
when='+cuda ^cuda@11.0.2:11.1.0' + arch_platform)
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5' + arch_platform)
|
||||
conflicts('%clang@:3.7,4:',
|
||||
when='+cuda ^cuda@8.0:9.0' + arch_platform)
|
||||
conflicts('%clang@:3.7,4.1:',
|
||||
when='+cuda ^cuda@9.1' + arch_platform)
|
||||
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2' + arch_platform)
|
||||
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130' + arch_platform)
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105' + arch_platform)
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243' + arch_platform)
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2:' + arch_platform)
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0' + arch_platform)
|
||||
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
|
||||
|
||||
# information prior to CUDA 9 difficult to find
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
|
||||
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
|
||||
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
|
||||
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
arch_platform = ' target=ppc64le: platform=linux'
|
||||
# information prior to CUDA 9 difficult to find
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243' + arch_platform)
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2:' + arch_platform)
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0' + arch_platform)
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8' + arch_platform)
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185' + arch_platform)
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10' + arch_platform)
|
||||
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176' + arch_platform)
|
||||
conflicts('%clang@5:', when='+cuda ^cuda@:9.1' + arch_platform)
|
||||
conflicts('%clang@6:', when='+cuda ^cuda@:9.2' + arch_platform)
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130' + arch_platform)
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105' + arch_platform)
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2:' + arch_platform)
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0' + arch_platform)
|
||||
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
|
||||
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
|
||||
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
|
||||
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
|
||||
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
|
||||
# Intel 15.x is compatible with CUDA 7 thru current CUDA
|
||||
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
|
||||
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
|
||||
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
|
||||
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
|
||||
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
|
||||
# Intel 15.x is compatible with CUDA 7 thru current CUDA
|
||||
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
|
||||
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
|
||||
|
||||
# Darwin.
|
||||
# TODO: add missing conflicts for %apple-clang cuda@:10
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')
|
||||
# Mac OS X
|
||||
# platform = ' platform=darwin'
|
||||
# Apple XCode clang vs. LLVM clang are difficult to specify
|
||||
# with spack syntax. Xcode clang name is `clang@x.y.z-apple`
|
||||
# which precludes ranges being specified. We have proposed
|
||||
# rename XCode clang to `clang@apple-x.y.z` or even
|
||||
# `clang-apple@x.y.z as a possible fix.
|
||||
# Compiler conflicts will be eventual taken from here:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
|
||||
|
||||
# Make sure cuda_arch can not be used without +cuda
|
||||
for value in cuda_arch_values:
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import spack.package
|
||||
import spack.util.url
|
||||
import spack.package
|
||||
|
||||
|
||||
class GNUMirrorPackage(spack.package.PackageBase):
|
||||
|
||||
@@ -4,32 +4,26 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
import glob
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import glob
|
||||
import tempfile
|
||||
import re
|
||||
import inspect
|
||||
import xml.etree.ElementTree as ElementTree
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import (
|
||||
HeaderList,
|
||||
LibraryList,
|
||||
ancestor,
|
||||
filter_file,
|
||||
find_headers,
|
||||
find_libraries,
|
||||
find_system_libraries,
|
||||
install,
|
||||
)
|
||||
|
||||
from spack.build_environment import dso_suffix
|
||||
from spack.package import InstallError, PackageBase, run_after
|
||||
from llnl.util.filesystem import \
|
||||
install, ancestor, filter_file, \
|
||||
HeaderList, find_headers, \
|
||||
LibraryList, find_libraries, find_system_libraries
|
||||
|
||||
from spack.version import Version, ver
|
||||
from spack.package import PackageBase, run_after, InstallError
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.prefix import Prefix
|
||||
from spack.version import Version, ver
|
||||
from spack.build_environment import dso_suffix
|
||||
|
||||
# A couple of utility functions that might be useful in general. If so, they
|
||||
# should really be defined elsewhere, unless deemed heretical.
|
||||
@@ -157,7 +151,7 @@ def license_files(self):
|
||||
'+advisor': 'advisor',
|
||||
'+inspector': 'inspector',
|
||||
'+itac': 'itac',
|
||||
'+vtune': 'vtune_profiler',
|
||||
'+vtune': 'vtune_amplifier',
|
||||
}.items():
|
||||
if variant in self.spec:
|
||||
dirs.append(self.normalize_path(
|
||||
@@ -208,8 +202,7 @@ def pset_components(self):
|
||||
'+itac': ' intel-itac intel-ta intel-tc'
|
||||
' intel-trace-analyzer intel-trace-collector',
|
||||
# Trace Analyzer and Collector
|
||||
'+vtune': ' intel-vtune'
|
||||
# VTune, ..-profiler since 2020, ..-amplifier before
|
||||
'+vtune': ' intel-vtune-amplifier', # VTune
|
||||
}.items():
|
||||
if variant in self.spec:
|
||||
c += components_to_add
|
||||
@@ -368,7 +361,7 @@ def normalize_suite_dir(self, suite_dir_name, version_globs=['*.*.*']):
|
||||
toplevel psxevars.sh or equivalent file to source (and thus by
|
||||
the modulefiles that Spack produces).
|
||||
|
||||
version_globs (list): Suffix glob patterns (most specific
|
||||
version_globs (list of str): Suffix glob patterns (most specific
|
||||
first) expected to qualify suite_dir_name to its fully
|
||||
version-specific install directory (as opposed to a
|
||||
compatibility directory or symlink).
|
||||
@@ -542,9 +535,8 @@ def normalize_path(self, component_path, component_suite_dir=None,
|
||||
[None, '2016:', 'compilers_and_libraries'],
|
||||
['advisor', ':2016', 'advisor_xe'],
|
||||
['inspector', ':2016', 'inspector_xe'],
|
||||
['vtune_profiler', ':2017', 'vtune_amplifier_xe'],
|
||||
['vtune_amplifier', ':2017', 'vtune_amplifier_xe'],
|
||||
['vtune', ':2017', 'vtune_amplifier_xe'], # alt.
|
||||
['vtune_profiler', ':2019', 'vtune_amplifier'],
|
||||
['itac', ':', 'itac', [os.sep + standalone_glob]],
|
||||
]:
|
||||
if cs == rename_rule[0] and v.satisfies(ver(rename_rule[1])):
|
||||
@@ -1095,7 +1087,7 @@ def _setup_dependent_env_callback(
|
||||
# Intel MPI since 2019 depends on libfabric which is not in the
|
||||
# lib directory but in a directory of its own which should be
|
||||
# included in the rpath
|
||||
if self.version_yearlike >= ver('2019'):
|
||||
if self.version >= ver('2019'):
|
||||
d = ancestor(self.component_lib_dir('mpi'))
|
||||
libfabrics_path = os.path.join(d, 'libfabric', 'lib')
|
||||
env.append_path('SPACK_COMPILER_EXTRA_RPATHS',
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import working_dir
|
||||
|
||||
from spack.package import PackageBase, run_after
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
|
||||
from llnl.util.filesystem import install_tree, working_dir
|
||||
|
||||
from spack.directives import depends_on
|
||||
from spack.package import PackageBase, run_after
|
||||
from spack.util.executable import which
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user