spack/share/spack/spack-completion.bash

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2025 lines
44 KiB
Bash
Raw Normal View History

# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# NOTE: spack-completion.bash is auto-generated by:
#
# $ spack commands --aliases --format=bash
# --header=bash/spack-completion.bash --update=spack-completion.bash
#
# Please do not manually modify this file.
# The following global variables are set by Bash programmable completion:
#
# COMP_CWORD: An index into ${COMP_WORDS} of the word containing the
# current cursor position
# COMP_KEY: The key (or final key of a key sequence) used to invoke
# the current completion function
# COMP_LINE: The current command line
# COMP_POINT: The index of the current cursor position relative to the
# beginning of the current command
# COMP_TYPE: Set to an integer value corresponding to the type of
# completion attempted that caused a completion function
# to be called
# COMP_WORDBREAKS: The set of characters that the readline library treats
# as word separators when performing word completion
# COMP_WORDS: An array variable consisting of the individual words in
# the current command line
#
# The following global variable is used by Bash programmable completion:
#
# COMPREPLY: An array variable from which bash reads the possible
# completions generated by a shell function invoked by the
# programmable completion facility
#
# See `man bash` for more details.
if test -n "${ZSH_VERSION:-}" ; then
if [[ "$(emulate)" = zsh ]] ; then
if ! typeset -f compdef >& /dev/null ; then
# ensure base completion support is enabled, ignore insecure directories
autoload -U +X compinit && compinit -i
fi
if ! typeset -f complete >& /dev/null ; then
# ensure bash compatible completion support is enabled
autoload -U +X bashcompinit && bashcompinit
fi
emulate sh -c "source '$0:A'"
return # stop interpreting file
fi
fi
# Bash programmable completion for Spack
_bash_completion_spack() {
# In all following examples, let the cursor be denoted by brackets, i.e. []
# For our purposes, flags should not affect tab completion. For instance,
# `spack install []` and `spack -d install --jobs 8 []` should both give the same
# possible completions. Therefore, we need to ignore any flags in COMP_WORDS.
local -a COMP_WORDS_NO_FLAGS
local index=0
while [[ "$index" -lt "$COMP_CWORD" ]]
do
if [[ "${COMP_WORDS[$index]}" == [a-z]* ]]
then
COMP_WORDS_NO_FLAGS+=("${COMP_WORDS[$index]}")
fi
let index++
done
# Options will be listed by a subfunction named after non-flag arguments.
# For example, `spack -d install []` will call _spack_install
# and `spack compiler add []` will call _spack_compiler_add
local subfunction=$(IFS='_'; echo "_${COMP_WORDS_NO_FLAGS[*]}")
# Translate dashes to underscores, as dashes are not permitted in
# compatibility mode. See https://github.com/spack/spack/pull/4079
subfunction=${subfunction//-/_}
# However, the word containing the current cursor position needs to be
# added regardless of whether or not it is a flag. This allows us to
# complete something like `spack install --keep-st[]`
COMP_WORDS_NO_FLAGS+=("${COMP_WORDS[$COMP_CWORD]}")
# Since we have removed all words after COMP_CWORD, we can safely assume
# that COMP_CWORD_NO_FLAGS is simply the index of the last element
local COMP_CWORD_NO_FLAGS=$((${#COMP_WORDS_NO_FLAGS[@]} - 1))
# There is no guarantee that the cursor is at the end of the command line
# when tab completion is envoked. For example, in the following situation:
# `spack -d [] install`
# if the user presses the TAB key, a list of valid flags should be listed.
# Note that we cannot simply ignore everything after the cursor. In the
# previous scenario, the user should expect to see a list of flags, but
# not of other subcommands. Obviously, `spack -d list install` would be
# invalid syntax. To accomplish this, we use the variable list_options
# which is true if the current word starts with '-' or if the cursor is
# not at the end of the line.
local list_options=false
if [[ "${COMP_WORDS[$COMP_CWORD]}" == -* || "$COMP_POINT" -ne "${#COMP_LINE}" ]]
then
list_options=true
fi
# In general, when envoking tab completion, the user is not expecting to
# see optional flags mixed in with subcommands or package names. Tab
# completion is used by those who are either lazy or just bad at spelling.
# If someone doesn't remember what flag to use, seeing single letter flags
# in their results won't help them, and they should instead consult the
# documentation. However, if the user explicitly declares that they are
# looking for a flag, we can certainly help them out.
# `spack install -[]`
# and
# `spack install --[]`
# should list all flags and long flags, respectively. Furthermore, if a
# subcommand has no non-flag completions, such as `spack arch []`, it
# should list flag completions.
local cur=${COMP_WORDS_NO_FLAGS[$COMP_CWORD_NO_FLAGS]}
# If the cursor is in the middle of the line, like:
# `spack -d [] install`
# COMP_WORDS will not contain the empty character, so we have to add it.
if [[ "${COMP_LINE:$COMP_POINT-1:1}" == " " ]]
then
cur=""
fi
# Uncomment this line to enable logging
#_test_vars >> temp
# Make sure function exists before calling it
local rgx #this dance is necessary to cover bash and zsh regex
rgx="$subfunction.*function.* "
if [[ "$(LC_ALL=C type $subfunction 2>&1)" =~ $rgx ]]
then
$subfunction
COMPREPLY=($(compgen -W "$SPACK_COMPREPLY" -- "$cur"))
fi
}
# Helper functions for subcommands
# Results of each query are cached via environment variables
_subcommands() {
if [[ -z "${SPACK_SUBCOMMANDS:-}" ]]
then
SPACK_SUBCOMMANDS="$(spack commands)"
fi
SPACK_COMPREPLY="$SPACK_SUBCOMMANDS"
}
_all_packages() {
if [[ -z "${SPACK_ALL_PACKAGES:-}" ]]
then
SPACK_ALL_PACKAGES="$(spack list)"
fi
SPACK_COMPREPLY="$SPACK_ALL_PACKAGES"
}
_all_resource_hashes() {
if [[ -z "${SPACK_ALL_RESOURCES_HASHES:-}" ]]
then
SPACK_ALL_RESOURCE_HASHES="$(spack resource list --only-hashes)"
fi
SPACK_COMPREPLY="$SPACK_ALL_RESOURCE_HASHES"
}
_installed_packages() {
if [[ -z "${SPACK_INSTALLED_PACKAGES:-}" ]]
then
SPACK_INSTALLED_PACKAGES="$(spack --color=never find --no-groups)"
fi
SPACK_COMPREPLY="$SPACK_INSTALLED_PACKAGES"
}
_installed_compilers() {
if [[ -z "${SPACK_INSTALLED_COMPILERS:-}" ]]
then
SPACK_INSTALLED_COMPILERS="$(spack compilers | egrep -v "^(-|=)")"
fi
SPACK_COMPREPLY="$SPACK_INSTALLED_COMPILERS"
}
_providers() {
if [[ -z "${SPACK_PROVIDERS:-}" ]]
then
SPACK_PROVIDERS="$(spack providers)"
fi
SPACK_COMPREPLY="$SPACK_PROVIDERS"
}
_mirrors() {
if [[ -z "${SPACK_MIRRORS:-}" ]]
then
SPACK_MIRRORS="$(spack mirror list | awk '{print $1}')"
fi
SPACK_COMPREPLY="$SPACK_MIRRORS"
}
_repos() {
if [[ -z "${SPACK_REPOS:-}" ]]
then
SPACK_REPOS="$(spack repo list | awk '{print $1}')"
fi
SPACK_COMPREPLY="$SPACK_REPOS"
}
_unit_tests() {
if [[ -z "${SPACK_TESTS:-}" ]]
then
SPACK_TESTS="$(spack unit-test -l)"
fi
SPACK_COMPREPLY="$SPACK_TESTS"
}
_environments() {
if [[ -z "${SPACK_ENVIRONMENTS:-}" ]]
then
SPACK_ENVIRONMENTS="$(spack env list)"
fi
SPACK_COMPREPLY="$SPACK_ENVIRONMENTS"
}
_keys() {
if [[ -z "${SPACK_KEYS:-}" ]]
then
SPACK_KEYS="$(spack gpg list)"
fi
SPACK_COMPREPLY="$SPACK_KEYS"
}
_config_sections() {
if [[ -z "${SPACK_CONFIG_SECTIONS:-}" ]]
then
SPACK_CONFIG_SECTIONS="$(spack config list)"
fi
SPACK_COMPREPLY="$SPACK_CONFIG_SECTIONS"
}
_extensions() {
if [[ -z "${SPACK_EXTENSIONS:-}" ]]
then
SPACK_EXTENSIONS="$(spack extensions)"
fi
SPACK_COMPREPLY="$SPACK_EXTENSIONS"
}
# Testing functions
# Function for unit testing tab completion
# Syntax: _spack_completions spack install py-
_spack_completions() {
local COMP_CWORD COMP_KEY COMP_LINE COMP_POINT COMP_TYPE COMP_WORDS COMPREPLY
# Set each variable the way bash would
COMP_LINE="$*"
COMP_POINT=${#COMP_LINE}
COMP_WORDS=("$@")
if [[ ${COMP_LINE: -1} == ' ' ]]
then
COMP_WORDS+=('')
fi
COMP_CWORD=$((${#COMP_WORDS[@]} - 1))
COMP_KEY=9 # ASCII 09: Horizontal Tab
COMP_TYPE=64 # ASCII 64: '@', to list completions if the word is not unmodified
# Run Spack's tab completion function
_bash_completion_spack
# Return the result
echo "${COMPREPLY[@]:-}"
}
# Log the environment variables used
# Syntax: _test_vars >> temp
_test_vars() {
echo "-----------------------------------------------------"
echo "Variables set by bash:"
echo
echo "COMP_LINE: '$COMP_LINE'"
echo "# COMP_LINE: '${#COMP_LINE}'"
echo "COMP_WORDS: $(_pretty_print COMP_WORDS[@])"
echo "# COMP_WORDS: '${#COMP_WORDS[@]}'"
echo "COMP_CWORD: '$COMP_CWORD'"
echo "COMP_KEY: '$COMP_KEY'"
echo "COMP_POINT: '$COMP_POINT'"
echo "COMP_TYPE: '$COMP_TYPE'"
echo "COMP_WORDBREAKS: '$COMP_WORDBREAKS'"
echo
echo "Intermediate variables:"
echo
echo "COMP_WORDS_NO_FLAGS: $(_pretty_print COMP_WORDS_NO_FLAGS[@])"
echo "# COMP_WORDS_NO_FLAGS: '${#COMP_WORDS_NO_FLAGS[@]}'"
echo "COMP_CWORD_NO_FLAGS: '$COMP_CWORD_NO_FLAGS'"
echo
echo "Subfunction: '$subfunction'"
if $list_options
then
echo "List options: 'True'"
else
echo "List options: 'False'"
fi
echo "Current word: '$cur'"
}
# Pretty-prints one or more arrays
# Syntax: _pretty_print array1[@] ...
_pretty_print() {
for arg in $@
do
local array=("${!arg}")
printf "$arg: ["
printf "'%s'" "${array[0]}"
printf ", '%s'" "${array[@]:1}"
echo "]"
done
}
complete -o bashdefault -o default -F _bash_completion_spack spack
# Completion for spacktivate
complete -o bashdefault -o default -F _bash_completion_spack spacktivate
_spacktivate() {
_spack_env_activate
}
# Spack commands
#
# Everything below here is auto-generated.
_spack() {
if $list_options
then
SPACK_COMPREPLY="-h --help -H --all-help --color -c --config -C --config-scope -d --debug --timestamp --pdb -e --env -D --env-dir -E --no-env --use-env-repo -k --insecure -l --enable-locks -L --disable-locks -m --mock -b --bootstrap -p --profile --sorted-profile --lines -v --verbose --stacktrace --backtrace -V --version --print-shell-vars"
else
remove activate/deactivate support in favor of environments (#29317) Environments and environment views have taken over the role of `spack activate/deactivate`, and we should deprecate these commands for several reasons: - Global activation is a really poor idea: - Install prefixes should be immutable; since they can have multiple, unrelated dependents; see below - Added complexity elsewhere: verification of installations, tarballs for build caches, creation of environment views of packages with unrelated extensions "globally activated"... by removing the feature, it gets easier for people to contribute, and we'd end up with fewer bugs due to edge cases. - Environment accomplish the same thing for non-global "activation" i.e. `spack view`, but better. Also we write in the docs: ``` However, Spack global activations have two potential drawbacks: #. Activated packages that involve compiled C extensions may still need their dependencies to be loaded manually. For example, ``spack load openblas`` might be required to make ``py-numpy`` work. #. Global activations "break" a core feature of Spack, which is that multiple versions of a package can co-exist side-by-side. For example, suppose you wish to run a Python package in two different environments but the same basic Python --- one with ``py-numpy@1.7`` and one with ``py-numpy@1.8``. Spack extensions will not support this potential debugging use case. ``` Now that environments are established and views can take over the role of activation non-destructively, we can remove global activation/deactivation.
2022-11-11 16:50:07 +08:00
SPACK_COMPREPLY="add arch audit blame bootstrap build-env buildcache cd change checksum ci clean clone commands compiler compilers concretize config containerize create debug dependencies dependents deprecate dev-build develop diff docs edit env extensions external fetch find gc gpg graph help info install license list load location log-parse maintainers make-installer mark mirror module patch pkg providers pydoc python reindex remove rm repo resource restage solve spec stage style tags test test-env tutorial undevelop uninstall unit-test unload url verify versions view"
fi
}
_spack_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --list-name"
else
_all_packages
fi
}
_spack_arch() {
SPACK_COMPREPLY="-h --help -g --generic-target --known-targets -p --platform -o --operating-system -t --target -f --frontend -b --backend"
}
_spack_audit() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
start of work to add spack audit packages-https checker (#25670) This PR will add a new audit, specifically for spack package homepage urls (and eventually other kinds I suspect) to see if there is an http address that can be changed to https. Usage is as follows: ```bash $ spack audit packages-https <package> ``` And in list view: ```bash $ spack audit list generic: Generic checks relying on global variables configs: Sanity checks on compilers.yaml Sanity checks on packages.yaml packages: Sanity checks on specs used in directives packages-https: Sanity checks on https checks of package urls, etc. ``` I think it would be unwise to include with packages, because when run for all, since we do requests it takes a long time. I also like the idea of more well scoped checks - likely there will be other addresses for http/https within a package that we eventually check. For now, there are two error cases - one is when an https url is tried but there is some SSL error (or other error that means we cannot update to https): ```bash $ spack audit packages-https zoltan PKG-HTTPS-DIRECTIVES: 1 issue found 1. Error with attempting https for "zoltan": <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid for 'www.cs.sandia.gov'. (_ssl.c:1125)> ``` This is either not fixable, or could be fixed with a change to the url or (better) contacting the site owners to ask about some certificate or similar. The second case is when there is an http that needs to be https, which is a huge issue now, but hopefully not after this spack PR. ```bash $ spack audit packages-https xman Package "xman" uses http but has a valid https endpoint. ``` And then when a package is fixed: ```bash $ spack audit packages-https zlib PKG-HTTPS-DIRECTIVES: 0 issues found. ``` And that's mostly it. :) Signed-off-by: vsoch <vsoch@users.noreply.github.com> Co-authored-by: vsoch <vsoch@users.noreply.github.com>
2021-09-02 14:46:27 +08:00
SPACK_COMPREPLY="configs packages-https packages list"
fi
}
_spack_audit_configs() {
SPACK_COMPREPLY="-h --help"
}
start of work to add spack audit packages-https checker (#25670) This PR will add a new audit, specifically for spack package homepage urls (and eventually other kinds I suspect) to see if there is an http address that can be changed to https. Usage is as follows: ```bash $ spack audit packages-https <package> ``` And in list view: ```bash $ spack audit list generic: Generic checks relying on global variables configs: Sanity checks on compilers.yaml Sanity checks on packages.yaml packages: Sanity checks on specs used in directives packages-https: Sanity checks on https checks of package urls, etc. ``` I think it would be unwise to include with packages, because when run for all, since we do requests it takes a long time. I also like the idea of more well scoped checks - likely there will be other addresses for http/https within a package that we eventually check. For now, there are two error cases - one is when an https url is tried but there is some SSL error (or other error that means we cannot update to https): ```bash $ spack audit packages-https zoltan PKG-HTTPS-DIRECTIVES: 1 issue found 1. Error with attempting https for "zoltan": <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid for 'www.cs.sandia.gov'. (_ssl.c:1125)> ``` This is either not fixable, or could be fixed with a change to the url or (better) contacting the site owners to ask about some certificate or similar. The second case is when there is an http that needs to be https, which is a huge issue now, but hopefully not after this spack PR. ```bash $ spack audit packages-https xman Package "xman" uses http but has a valid https endpoint. ``` And then when a package is fixed: ```bash $ spack audit packages-https zlib PKG-HTTPS-DIRECTIVES: 0 issues found. ``` And that's mostly it. :) Signed-off-by: vsoch <vsoch@users.noreply.github.com> Co-authored-by: vsoch <vsoch@users.noreply.github.com>
2021-09-02 14:46:27 +08:00
_spack_audit_packages_https() {
if $list_options
then
SPACK_COMPREPLY="-h --help --all"
else
SPACK_COMPREPLY=""
fi
}
_spack_audit_packages() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_audit_list() {
SPACK_COMPREPLY="-h --help"
}
_spack_blame() {
if $list_options
then
SPACK_COMPREPLY="-h --help -t --time -p --percent -g --git --json"
else
_all_packages
fi
}
_spack_bootstrap() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="now status enable disable reset root list add remove mirror"
fi
}
_spack_bootstrap_now() {
SPACK_COMPREPLY="-h --help --dev"
}
_spack_bootstrap_status() {
SPACK_COMPREPLY="-h --help --optional --dev"
}
_spack_bootstrap_enable() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_bootstrap_disable() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_bootstrap_reset() {
SPACK_COMPREPLY="-h --help -y --yes-to-all"
}
_spack_bootstrap_root() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_bootstrap_list() {
SPACK_COMPREPLY="-h --help --scope"
}
Add a command to generate a local mirror for bootstrapping (#28556) This PR builds on #28392 by adding a convenience command to create a local mirror that can be used to bootstrap Spack. This is to overcome the inconvenience in setting up this mirror manually, which has been reported when trying to setup Spack on air-gapped systems. Using this PR the user can create a bootstrapping mirror, on a machine with internet access, by: % spack bootstrap mirror --binary-packages /opt/bootstrap ==> Adding "clingo-bootstrap@spack+python %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror ==> Adding "gnupg@2.3: %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror ==> Adding "patchelf@0.13.1:0.13.99 %apple-clang target=x86_64" and dependencies to the mirror at /opt/bootstrap/local-mirror ==> Adding binary packages from "https://github.com/alalazo/spack-bootstrap-mirrors/releases/download/v0.1-rc.2/bootstrap-buildcache.tar.gz" to the mirror at /opt/bootstrap/local-mirror To register the mirror on the platform where it's supposed to be used run the following command(s): % spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources % spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries The mirror has to be moved over to the air-gapped system, and registered using the commands shown at prompt. The command has options to: 1. Add pre-built binaries downloaded from Github (default is not to add them) 2. Add development dependencies for Spack (currently the Python packages needed to use spack style) * bootstrap: refactor bootstrap.yaml to move sources metadata out * bootstrap: allow adding/removing custom bootstrapping sources This operation can be performed from the command line since new subcommands have been added to `spack bootstrap` * Add --trust argument to spack bootstrap add * Add a command to generate a local mirror for bootstrapping * Add a unit test for mirror creation
2022-05-25 05:33:52 +08:00
_spack_bootstrap_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope --trust"
else
SPACK_COMPREPLY=""
fi
}
_spack_bootstrap_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_bootstrap_mirror() {
if $list_options
then
SPACK_COMPREPLY="-h --help --binary-packages --dev"
else
SPACK_COMPREPLY=""
fi
}
_spack_build_env() {
if $list_options
then
SPACK_COMPREPLY="-h --help --clean --dirty -U --fresh --reuse --reuse-deps --dump --pickle"
else
_all_packages
fi
}
_spack_buildcache() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="push create install list keys preview check download get-buildcache-name save-specfile sync update-index rebuild-index"
fi
}
_spack_buildcache_push() {
if $list_options
then
SPACK_COMPREPLY="-h --help -f --force --allow-root -a --unsigned -u --key -k --update-index --rebuild-index --spec-file --only --fail-fast"
else
_mirrors
fi
}
_spack_buildcache_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help -f --force --allow-root -a --unsigned -u --key -k --update-index --rebuild-index --spec-file --only --fail-fast"
else
Refer to mirrors by name, path, or url (#34891) With this change we get the invariant that `mirror.fetch_url` and `mirror.push_url` return valid URLs, even when the backing config file is actually using (relative) paths with potentially `$spack` and `$env` like variables. Secondly it avoids expanding mirror path / URLs too early, so if I say `spack mirror add name ./path`, it stays `./path` in my config. When it's retrieved through MirrorCollection() we exand it to say `file://<env dir>/path` if `./path` was set in an environment scope. Thirdly, the interface is simplified for the relevant buildcache commands, so it's more like `git push`: ``` spack buildcache create [mirror] [specs...] ``` `mirror` is either a mirror name, a path, or a URL. Resolving the relevant mirror goes as follows: - If it contains either / or \ it is used as an anonymous mirror with path or url. - Otherwise, it's interpreted as a named mirror, which must exist. This helps to guard against typos, e.g. typing `my-mirror` when there is no such named mirror now errors with: ``` $ spack -e . buildcache create my-mirror ==> Error: no mirror named "my-mirror". Did you mean ./my-mirror? ``` instead of creating a directory in the current working directory. I think this is reasonable, as the alternative (requiring that a local dir exists) feels a bit pendantic in the general case -- spack is happy to create the build cache dir when needed, saving a `mkdir`. The old (now deprecated) format will still be available in Spack 0.20, but is scheduled to be removed in 0.21: ``` spack buildcache create (--directory | --mirror-url | --mirror-name) [specs...] ``` This PR also touches `tmp_scope` in tests, because it didn't really work for me, since spack fixes the possible --scope values once and for all across tests, so tests failed when run out of order.
2023-01-17 02:14:41 +08:00
_mirrors
fi
}
_spack_buildcache_install() {
if $list_options
then
SPACK_COMPREPLY="-h --help -f --force -m --multiple -u --unsigned -o --otherarch"
else
_all_packages
fi
}
_spack_buildcache_list() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --long -L --very-long -N --namespaces -v --variants -a --allarch"
else
_all_packages
fi
}
_spack_buildcache_keys() {
SPACK_COMPREPLY="-h --help -i --install -t --trust -f --force"
}
_spack_buildcache_preview() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_installed_packages
fi
}
_spack_buildcache_check() {
SPACK_COMPREPLY="-h --help -m --mirror-url -o --output-file --scope -s --spec --spec-file"
}
_spack_buildcache_download() {
SPACK_COMPREPLY="-h --help -s --spec --spec-file -p --path"
}
_spack_buildcache_get_buildcache_name() {
specs: move to new spec.json format with build provenance (#22845) This is a major rework of Spack's core core `spec.yaml` metadata format. It moves from `spec.yaml` to `spec.json` for speed, and it changes the format in several ways. Specifically: 1. The spec format now has a `_meta` section with a version (now set to version `2`). This will simplify major changes like this one in the future. 2. The node list in spec dictionaries is no longer keyed by name. Instead, it is a list of records with no required key. The name, hash, etc. are fields in the dictionary records like any other. 3. Dependencies can be keyed by any hash (`hash`, `full_hash`, `build_hash`). 4. `build_spec` provenance from #20262 is included in the spec format. This means that, for spliced specs, we preserve the *full* provenance of how to build, and we can reproduce a spliced spec from the original builds that produced it. **NOTE**: Because we have switched the spec format, this PR changes Spack's hashing algorithm. This means that after this commit, Spack will think a lot of things need rebuilds. There are two major benefits this PR provides: * The switch to JSON format speeds up Spack significantly, as Python's builtin JSON implementation is orders of magnitude faster than YAML. * The new Spec format will soon allow us to represent DAGs with potentially multiple versions of the same dependency -- e.g., for build dependencies or for compilers-as-dependencies. This PR lays the necessary groundwork for those features. The old `spec.yaml` format continues to be supported, but is now considered a legacy format, and Spack will opportunistically convert these to the new `spec.json` format.
2021-09-09 16:48:30 +08:00
SPACK_COMPREPLY="-h --help -s --spec --spec-file"
}
specs: move to new spec.json format with build provenance (#22845) This is a major rework of Spack's core core `spec.yaml` metadata format. It moves from `spec.yaml` to `spec.json` for speed, and it changes the format in several ways. Specifically: 1. The spec format now has a `_meta` section with a version (now set to version `2`). This will simplify major changes like this one in the future. 2. The node list in spec dictionaries is no longer keyed by name. Instead, it is a list of records with no required key. The name, hash, etc. are fields in the dictionary records like any other. 3. Dependencies can be keyed by any hash (`hash`, `full_hash`, `build_hash`). 4. `build_spec` provenance from #20262 is included in the spec format. This means that, for spliced specs, we preserve the *full* provenance of how to build, and we can reproduce a spliced spec from the original builds that produced it. **NOTE**: Because we have switched the spec format, this PR changes Spack's hashing algorithm. This means that after this commit, Spack will think a lot of things need rebuilds. There are two major benefits this PR provides: * The switch to JSON format speeds up Spack significantly, as Python's builtin JSON implementation is orders of magnitude faster than YAML. * The new Spec format will soon allow us to represent DAGs with potentially multiple versions of the same dependency -- e.g., for build dependencies or for compilers-as-dependencies. This PR lays the necessary groundwork for those features. The old `spec.yaml` format continues to be supported, but is now considered a legacy format, and Spack will opportunistically convert these to the new `spec.json` format.
2021-09-09 16:48:30 +08:00
_spack_buildcache_save_specfile() {
SPACK_COMPREPLY="-h --help --root-spec --root-specfile -s --specs --specfile-dir"
}
_spack_buildcache_sync() {
Refer to mirrors by name, path, or url (#34891) With this change we get the invariant that `mirror.fetch_url` and `mirror.push_url` return valid URLs, even when the backing config file is actually using (relative) paths with potentially `$spack` and `$env` like variables. Secondly it avoids expanding mirror path / URLs too early, so if I say `spack mirror add name ./path`, it stays `./path` in my config. When it's retrieved through MirrorCollection() we exand it to say `file://<env dir>/path` if `./path` was set in an environment scope. Thirdly, the interface is simplified for the relevant buildcache commands, so it's more like `git push`: ``` spack buildcache create [mirror] [specs...] ``` `mirror` is either a mirror name, a path, or a URL. Resolving the relevant mirror goes as follows: - If it contains either / or \ it is used as an anonymous mirror with path or url. - Otherwise, it's interpreted as a named mirror, which must exist. This helps to guard against typos, e.g. typing `my-mirror` when there is no such named mirror now errors with: ``` $ spack -e . buildcache create my-mirror ==> Error: no mirror named "my-mirror". Did you mean ./my-mirror? ``` instead of creating a directory in the current working directory. I think this is reasonable, as the alternative (requiring that a local dir exists) feels a bit pendantic in the general case -- spack is happy to create the build cache dir when needed, saving a `mkdir`. The old (now deprecated) format will still be available in Spack 0.20, but is scheduled to be removed in 0.21: ``` spack buildcache create (--directory | --mirror-url | --mirror-name) [specs...] ``` This PR also touches `tmp_scope` in tests, because it didn't really work for me, since spack fixes the possible --scope values once and for all across tests, so tests failed when run out of order.
2023-01-17 02:14:41 +08:00
if $list_options
then
SPACK_COMPREPLY="-h --help --manifest-glob"
Refer to mirrors by name, path, or url (#34891) With this change we get the invariant that `mirror.fetch_url` and `mirror.push_url` return valid URLs, even when the backing config file is actually using (relative) paths with potentially `$spack` and `$env` like variables. Secondly it avoids expanding mirror path / URLs too early, so if I say `spack mirror add name ./path`, it stays `./path` in my config. When it's retrieved through MirrorCollection() we exand it to say `file://<env dir>/path` if `./path` was set in an environment scope. Thirdly, the interface is simplified for the relevant buildcache commands, so it's more like `git push`: ``` spack buildcache create [mirror] [specs...] ``` `mirror` is either a mirror name, a path, or a URL. Resolving the relevant mirror goes as follows: - If it contains either / or \ it is used as an anonymous mirror with path or url. - Otherwise, it's interpreted as a named mirror, which must exist. This helps to guard against typos, e.g. typing `my-mirror` when there is no such named mirror now errors with: ``` $ spack -e . buildcache create my-mirror ==> Error: no mirror named "my-mirror". Did you mean ./my-mirror? ``` instead of creating a directory in the current working directory. I think this is reasonable, as the alternative (requiring that a local dir exists) feels a bit pendantic in the general case -- spack is happy to create the build cache dir when needed, saving a `mkdir`. The old (now deprecated) format will still be available in Spack 0.20, but is scheduled to be removed in 0.21: ``` spack buildcache create (--directory | --mirror-url | --mirror-name) [specs...] ``` This PR also touches `tmp_scope` in tests, because it didn't really work for me, since spack fixes the possible --scope values once and for all across tests, so tests failed when run out of order.
2023-01-17 02:14:41 +08:00
else
SPACK_COMPREPLY=""
fi
}
_spack_buildcache_update_index() {
Refer to mirrors by name, path, or url (#34891) With this change we get the invariant that `mirror.fetch_url` and `mirror.push_url` return valid URLs, even when the backing config file is actually using (relative) paths with potentially `$spack` and `$env` like variables. Secondly it avoids expanding mirror path / URLs too early, so if I say `spack mirror add name ./path`, it stays `./path` in my config. When it's retrieved through MirrorCollection() we exand it to say `file://<env dir>/path` if `./path` was set in an environment scope. Thirdly, the interface is simplified for the relevant buildcache commands, so it's more like `git push`: ``` spack buildcache create [mirror] [specs...] ``` `mirror` is either a mirror name, a path, or a URL. Resolving the relevant mirror goes as follows: - If it contains either / or \ it is used as an anonymous mirror with path or url. - Otherwise, it's interpreted as a named mirror, which must exist. This helps to guard against typos, e.g. typing `my-mirror` when there is no such named mirror now errors with: ``` $ spack -e . buildcache create my-mirror ==> Error: no mirror named "my-mirror". Did you mean ./my-mirror? ``` instead of creating a directory in the current working directory. I think this is reasonable, as the alternative (requiring that a local dir exists) feels a bit pendantic in the general case -- spack is happy to create the build cache dir when needed, saving a `mkdir`. The old (now deprecated) format will still be available in Spack 0.20, but is scheduled to be removed in 0.21: ``` spack buildcache create (--directory | --mirror-url | --mirror-name) [specs...] ``` This PR also touches `tmp_scope` in tests, because it didn't really work for me, since spack fixes the possible --scope values once and for all across tests, so tests failed when run out of order.
2023-01-17 02:14:41 +08:00
if $list_options
then
SPACK_COMPREPLY="-h --help -k --keys"
Refer to mirrors by name, path, or url (#34891) With this change we get the invariant that `mirror.fetch_url` and `mirror.push_url` return valid URLs, even when the backing config file is actually using (relative) paths with potentially `$spack` and `$env` like variables. Secondly it avoids expanding mirror path / URLs too early, so if I say `spack mirror add name ./path`, it stays `./path` in my config. When it's retrieved through MirrorCollection() we exand it to say `file://<env dir>/path` if `./path` was set in an environment scope. Thirdly, the interface is simplified for the relevant buildcache commands, so it's more like `git push`: ``` spack buildcache create [mirror] [specs...] ``` `mirror` is either a mirror name, a path, or a URL. Resolving the relevant mirror goes as follows: - If it contains either / or \ it is used as an anonymous mirror with path or url. - Otherwise, it's interpreted as a named mirror, which must exist. This helps to guard against typos, e.g. typing `my-mirror` when there is no such named mirror now errors with: ``` $ spack -e . buildcache create my-mirror ==> Error: no mirror named "my-mirror". Did you mean ./my-mirror? ``` instead of creating a directory in the current working directory. I think this is reasonable, as the alternative (requiring that a local dir exists) feels a bit pendantic in the general case -- spack is happy to create the build cache dir when needed, saving a `mkdir`. The old (now deprecated) format will still be available in Spack 0.20, but is scheduled to be removed in 0.21: ``` spack buildcache create (--directory | --mirror-url | --mirror-name) [specs...] ``` This PR also touches `tmp_scope` in tests, because it didn't really work for me, since spack fixes the possible --scope values once and for all across tests, so tests failed when run out of order.
2023-01-17 02:14:41 +08:00
else
_mirrors
fi
}
_spack_buildcache_rebuild_index() {
if $list_options
then
SPACK_COMPREPLY="-h --help -k --keys"
else
_mirrors
fi
}
_spack_cd() {
if $list_options
then
SPACK_COMPREPLY="-h --help -m --module-dir -r --spack-root -i --install-dir -p --package-dir -P --packages -s --stage-dir -S --stages --source-dir -b --build-dir -e --env --first"
else
_all_packages
fi
}
_spack_change() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --list-name --match-spec -a --all"
else
_all_packages
fi
}
_spack_checksum() {
if $list_options
then
SPACK_COMPREPLY="-h --help --keep-stage -b --batch -l --latest -p --preferred -a --add-to-package --verify"
else
_all_packages
fi
}
_spack_ci() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
Pipelines: reproducible builds (#22887) ### Overview The goal of this PR is to make gitlab pipeline builds (especially build failures) more reproducible outside of the pipeline environment. The two key changes here which aim to improve reproducibility are: 1. Produce a `spack.lock` during pipeline generation which is passed to child jobs via artifacts. This concretized environment is used both by generated child jobs as well as uploaded as an artifact to be used when reproducing the build locally. 2. In the `spack ci rebuild` command, if a spec needs to be rebuilt from source, do this by generating and running an `install.sh` shell script which is then also uploaded as a job artifact to be run during local reproduction. To make it easier to take advantage of improved build reproducibility, this PR also adds a new subcommand, `spack ci reproduce-build`, which, given a url to job artifacts: - fetches and unzips the job artifacts to a local directory - looks for the generated pipeline yaml and parses it to find details about the job to reproduce - attempts to provide a copy of the same version of spack used in the ci build - if the ci build used a docker image, the command prints a `docker run` command you can run to get an interactive shell for reproducing the build #### Some highlights One consequence of this change will be much smaller pipeline yaml files. By encoding the concrete environment in a `spack.lock` and passing to child jobs via artifacts, we will no longer need to encode the concrete root of each spec and write it into the job variables, greatly reducing the size of the generated pipeline yaml. Additionally `spack ci rebuild` output (stdout/stderr) is no longer internally redirected to a log file, so job output will appear directly in the gitlab job trace. With debug logging turned on, this often results in log files getting truncated because they exceed the maximum amount of log output gitlab allows. If this is a problem, you still have the option to `tee` command output to a file in the within the artifacts directory, as now each generated job exposes a `user_data` directory as an artifact, which you can fill with whatever you want in your custom job scripts. There are some changes to be aware of in how pipelines should be set up after this PR: #### Pipeline generation Because the pipeline generation job now writes a `spack.lock` artifact to be consumed by generated downstream jobs, `spack ci generate` takes a new option `--artifacts-root`, inside which it creates a `concrete_env` directory to place the lockfile. This artifacts root directory is also where the `user_data` directory will live, in case you want to generate any custom artifacts. If you do not provide `--artifacts-root`, the default is for it to create a `jobs_scratch_dir` within your `CI_PROJECT_DIR` (a gitlab predefined environment variable) or whatever is your current working directory if that variable isn't set. Here's the diff of the PR testing `.gitlab-ci.yml` taking advantage of the new option: ``` $ git diff develop..pipelines-reproducible-builds share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml diff --git a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml index 579d7b56f3..0247803a30 100644 --- a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml +++ b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml @@ -28,10 +28,11 @@ default: - cd share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME} - spack env activate --without-view . - spack ci generate --check-index-only + --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" tags: ["spack", "public", "medium", "x86_64"] interruptible: true ``` Notice how we replaced the specific pointer to the generated pipeline file with its containing folder, the same folder we passed as `--artifacts-root`. This way anything in that directory (the generated pipeline yaml, as well as the concrete environment directory containing the `spack.lock`) will be uploaded as an artifact and available to the downstream jobs. #### Rebuild jobs Rebuild jobs now must activate the concrete environment created by `spack ci generate` and provided via artifacts. When the pipeline is generated, a directory called `concrete_environment` is created within the artifacts root directory, and this is where the `spack.lock` file is written to be passed to the generated rebuild jobs. The artifacts root directory can be specified using the `--artifacts-root` option to `spack ci generate`, otherwise, it is assumed to be `$CI_PROJECT_DIR`. The directory containing the concrete environment files (`spack.yaml` and `spack.lock`) is then passed to generated child jobs via the `SPACK_CONCRETE_ENV_DIR` variable in the generated pipeline yaml file. When you don't provide custom `script` sections in your `mappings` within the `gitlab-ci` section of your `spack.yaml`, the default behavior of rebuild jobs is now to change into `SPACK_CONCRETE_ENV_DIR` and activate that environment. If you do provide custom rebuild scripts in your `spack.yaml`, be aware those scripts should do the same thing: assume `SPACK_CONCRETE_ENV_DIR` contains the concretized environment to activate. No other changes to existing custom rebuild scripts should be required as a result of this PR. As mentioned above, one key change made in this PR is the generation of the `install.sh` script by the rebuild jobs, as that same script is both run by the CI rebuild job as well as exported as an artifact to aid in subsequent attempts to reproduce the build outside of CI. The generated `install.sh` script contains only a single `spack install` command with arguments computed by `spack ci rebuild`. If the install fails, the job trace in gitlab will contain instructions on how to reproduce the build locally: ``` To reproduce this build locally, run: spack ci reproduce-build https://gitlab.next.spack.io/api/v4/projects/7/jobs/240607/artifacts [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions. ``` When run locally, the `spack ci reproduce-build` command shown above will download and process the job artifacts from gitlab, then print out instructions you can copy-paste to run a local reproducer of the CI job. This PR includes a few other changes to the way pipelines work, see the documentation on pipelines for more details. This PR erelies on ~- [ ] #23194 to be able to refer to uninstalled specs by DAG hash~ EDIT: that is going to take longer to come to fruition, so for now, we will continue to install specs represented by a concrete `spec.yaml` file on disk. - [x] #22657 to support install a single spec already present in the active, concrete environment
2021-05-29 00:38:07 +08:00
SPACK_COMPREPLY="generate rebuild-index rebuild reproduce-build"
fi
}
_spack_ci_generate() {
ci: Support secure binary signing on protected pipelines (#30753) This PR supports the creation of securely signed binaries built from spack develop as well as release branches and tags. Specifically: - remove internal pr mirror url generation logic in favor of buildcache destination on command line - with a single mirror url specified in the spack.yaml, this makes it clearer where binaries from various pipelines are pushed - designate some tags as reserved: ['public', 'protected', 'notary'] - these tags are stripped from all jobs by default and provisioned internally based on pipeline type - update gitlab ci yaml to include pipelines on more protected branches than just develop (so include releases and tags) - binaries from all protected pipelines are pushed into mirrors including the branch name so releases, tags, and develop binaries are kept separate - update rebuild jobs running on protected pipelines to run on special runners provisioned with an intermediate signing key - protected rebuild jobs no longer use "SPACK_SIGNING_KEY" env var to obtain signing key (in fact, final signing key is nowhere available to rebuild jobs) - these intermediate signatures are verified at the end of each pipeline by a new signing job to ensure binaries were produced by a protected pipeline - optionallly schedule a signing/notary job at the end of the pipeline to sign all packges in the mirror - add signing-job-attributes to gitlab-ci section of spack environment to allow configuration - signing job runs on special runner (separate from protected rebuild runners) provisioned with public intermediate key and secret signing key
2022-05-26 22:31:22 +08:00
SPACK_COMPREPLY="-h --help --output-file --copy-to --optimize --dependencies --buildcache-destination --prune-dag --no-prune-dag --check-index-only --artifacts-root"
}
Pipelines: reproducible builds (#22887) ### Overview The goal of this PR is to make gitlab pipeline builds (especially build failures) more reproducible outside of the pipeline environment. The two key changes here which aim to improve reproducibility are: 1. Produce a `spack.lock` during pipeline generation which is passed to child jobs via artifacts. This concretized environment is used both by generated child jobs as well as uploaded as an artifact to be used when reproducing the build locally. 2. In the `spack ci rebuild` command, if a spec needs to be rebuilt from source, do this by generating and running an `install.sh` shell script which is then also uploaded as a job artifact to be run during local reproduction. To make it easier to take advantage of improved build reproducibility, this PR also adds a new subcommand, `spack ci reproduce-build`, which, given a url to job artifacts: - fetches and unzips the job artifacts to a local directory - looks for the generated pipeline yaml and parses it to find details about the job to reproduce - attempts to provide a copy of the same version of spack used in the ci build - if the ci build used a docker image, the command prints a `docker run` command you can run to get an interactive shell for reproducing the build #### Some highlights One consequence of this change will be much smaller pipeline yaml files. By encoding the concrete environment in a `spack.lock` and passing to child jobs via artifacts, we will no longer need to encode the concrete root of each spec and write it into the job variables, greatly reducing the size of the generated pipeline yaml. Additionally `spack ci rebuild` output (stdout/stderr) is no longer internally redirected to a log file, so job output will appear directly in the gitlab job trace. With debug logging turned on, this often results in log files getting truncated because they exceed the maximum amount of log output gitlab allows. If this is a problem, you still have the option to `tee` command output to a file in the within the artifacts directory, as now each generated job exposes a `user_data` directory as an artifact, which you can fill with whatever you want in your custom job scripts. There are some changes to be aware of in how pipelines should be set up after this PR: #### Pipeline generation Because the pipeline generation job now writes a `spack.lock` artifact to be consumed by generated downstream jobs, `spack ci generate` takes a new option `--artifacts-root`, inside which it creates a `concrete_env` directory to place the lockfile. This artifacts root directory is also where the `user_data` directory will live, in case you want to generate any custom artifacts. If you do not provide `--artifacts-root`, the default is for it to create a `jobs_scratch_dir` within your `CI_PROJECT_DIR` (a gitlab predefined environment variable) or whatever is your current working directory if that variable isn't set. Here's the diff of the PR testing `.gitlab-ci.yml` taking advantage of the new option: ``` $ git diff develop..pipelines-reproducible-builds share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml diff --git a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml index 579d7b56f3..0247803a30 100644 --- a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml +++ b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml @@ -28,10 +28,11 @@ default: - cd share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME} - spack env activate --without-view . - spack ci generate --check-index-only + --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" tags: ["spack", "public", "medium", "x86_64"] interruptible: true ``` Notice how we replaced the specific pointer to the generated pipeline file with its containing folder, the same folder we passed as `--artifacts-root`. This way anything in that directory (the generated pipeline yaml, as well as the concrete environment directory containing the `spack.lock`) will be uploaded as an artifact and available to the downstream jobs. #### Rebuild jobs Rebuild jobs now must activate the concrete environment created by `spack ci generate` and provided via artifacts. When the pipeline is generated, a directory called `concrete_environment` is created within the artifacts root directory, and this is where the `spack.lock` file is written to be passed to the generated rebuild jobs. The artifacts root directory can be specified using the `--artifacts-root` option to `spack ci generate`, otherwise, it is assumed to be `$CI_PROJECT_DIR`. The directory containing the concrete environment files (`spack.yaml` and `spack.lock`) is then passed to generated child jobs via the `SPACK_CONCRETE_ENV_DIR` variable in the generated pipeline yaml file. When you don't provide custom `script` sections in your `mappings` within the `gitlab-ci` section of your `spack.yaml`, the default behavior of rebuild jobs is now to change into `SPACK_CONCRETE_ENV_DIR` and activate that environment. If you do provide custom rebuild scripts in your `spack.yaml`, be aware those scripts should do the same thing: assume `SPACK_CONCRETE_ENV_DIR` contains the concretized environment to activate. No other changes to existing custom rebuild scripts should be required as a result of this PR. As mentioned above, one key change made in this PR is the generation of the `install.sh` script by the rebuild jobs, as that same script is both run by the CI rebuild job as well as exported as an artifact to aid in subsequent attempts to reproduce the build outside of CI. The generated `install.sh` script contains only a single `spack install` command with arguments computed by `spack ci rebuild`. If the install fails, the job trace in gitlab will contain instructions on how to reproduce the build locally: ``` To reproduce this build locally, run: spack ci reproduce-build https://gitlab.next.spack.io/api/v4/projects/7/jobs/240607/artifacts [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions. ``` When run locally, the `spack ci reproduce-build` command shown above will download and process the job artifacts from gitlab, then print out instructions you can copy-paste to run a local reproducer of the CI job. This PR includes a few other changes to the way pipelines work, see the documentation on pipelines for more details. This PR erelies on ~- [ ] #23194 to be able to refer to uninstalled specs by DAG hash~ EDIT: that is going to take longer to come to fruition, so for now, we will continue to install specs represented by a concrete `spec.yaml` file on disk. - [x] #22657 to support install a single spec already present in the active, concrete environment
2021-05-29 00:38:07 +08:00
_spack_ci_rebuild_index() {
SPACK_COMPREPLY="-h --help"
}
Pipelines: reproducible builds (#22887) ### Overview The goal of this PR is to make gitlab pipeline builds (especially build failures) more reproducible outside of the pipeline environment. The two key changes here which aim to improve reproducibility are: 1. Produce a `spack.lock` during pipeline generation which is passed to child jobs via artifacts. This concretized environment is used both by generated child jobs as well as uploaded as an artifact to be used when reproducing the build locally. 2. In the `spack ci rebuild` command, if a spec needs to be rebuilt from source, do this by generating and running an `install.sh` shell script which is then also uploaded as a job artifact to be run during local reproduction. To make it easier to take advantage of improved build reproducibility, this PR also adds a new subcommand, `spack ci reproduce-build`, which, given a url to job artifacts: - fetches and unzips the job artifacts to a local directory - looks for the generated pipeline yaml and parses it to find details about the job to reproduce - attempts to provide a copy of the same version of spack used in the ci build - if the ci build used a docker image, the command prints a `docker run` command you can run to get an interactive shell for reproducing the build #### Some highlights One consequence of this change will be much smaller pipeline yaml files. By encoding the concrete environment in a `spack.lock` and passing to child jobs via artifacts, we will no longer need to encode the concrete root of each spec and write it into the job variables, greatly reducing the size of the generated pipeline yaml. Additionally `spack ci rebuild` output (stdout/stderr) is no longer internally redirected to a log file, so job output will appear directly in the gitlab job trace. With debug logging turned on, this often results in log files getting truncated because they exceed the maximum amount of log output gitlab allows. If this is a problem, you still have the option to `tee` command output to a file in the within the artifacts directory, as now each generated job exposes a `user_data` directory as an artifact, which you can fill with whatever you want in your custom job scripts. There are some changes to be aware of in how pipelines should be set up after this PR: #### Pipeline generation Because the pipeline generation job now writes a `spack.lock` artifact to be consumed by generated downstream jobs, `spack ci generate` takes a new option `--artifacts-root`, inside which it creates a `concrete_env` directory to place the lockfile. This artifacts root directory is also where the `user_data` directory will live, in case you want to generate any custom artifacts. If you do not provide `--artifacts-root`, the default is for it to create a `jobs_scratch_dir` within your `CI_PROJECT_DIR` (a gitlab predefined environment variable) or whatever is your current working directory if that variable isn't set. Here's the diff of the PR testing `.gitlab-ci.yml` taking advantage of the new option: ``` $ git diff develop..pipelines-reproducible-builds share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml diff --git a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml index 579d7b56f3..0247803a30 100644 --- a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml +++ b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml @@ -28,10 +28,11 @@ default: - cd share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME} - spack env activate --without-view . - spack ci generate --check-index-only + --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" tags: ["spack", "public", "medium", "x86_64"] interruptible: true ``` Notice how we replaced the specific pointer to the generated pipeline file with its containing folder, the same folder we passed as `--artifacts-root`. This way anything in that directory (the generated pipeline yaml, as well as the concrete environment directory containing the `spack.lock`) will be uploaded as an artifact and available to the downstream jobs. #### Rebuild jobs Rebuild jobs now must activate the concrete environment created by `spack ci generate` and provided via artifacts. When the pipeline is generated, a directory called `concrete_environment` is created within the artifacts root directory, and this is where the `spack.lock` file is written to be passed to the generated rebuild jobs. The artifacts root directory can be specified using the `--artifacts-root` option to `spack ci generate`, otherwise, it is assumed to be `$CI_PROJECT_DIR`. The directory containing the concrete environment files (`spack.yaml` and `spack.lock`) is then passed to generated child jobs via the `SPACK_CONCRETE_ENV_DIR` variable in the generated pipeline yaml file. When you don't provide custom `script` sections in your `mappings` within the `gitlab-ci` section of your `spack.yaml`, the default behavior of rebuild jobs is now to change into `SPACK_CONCRETE_ENV_DIR` and activate that environment. If you do provide custom rebuild scripts in your `spack.yaml`, be aware those scripts should do the same thing: assume `SPACK_CONCRETE_ENV_DIR` contains the concretized environment to activate. No other changes to existing custom rebuild scripts should be required as a result of this PR. As mentioned above, one key change made in this PR is the generation of the `install.sh` script by the rebuild jobs, as that same script is both run by the CI rebuild job as well as exported as an artifact to aid in subsequent attempts to reproduce the build outside of CI. The generated `install.sh` script contains only a single `spack install` command with arguments computed by `spack ci rebuild`. If the install fails, the job trace in gitlab will contain instructions on how to reproduce the build locally: ``` To reproduce this build locally, run: spack ci reproduce-build https://gitlab.next.spack.io/api/v4/projects/7/jobs/240607/artifacts [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions. ``` When run locally, the `spack ci reproduce-build` command shown above will download and process the job artifacts from gitlab, then print out instructions you can copy-paste to run a local reproducer of the CI job. This PR includes a few other changes to the way pipelines work, see the documentation on pipelines for more details. This PR erelies on ~- [ ] #23194 to be able to refer to uninstalled specs by DAG hash~ EDIT: that is going to take longer to come to fruition, so for now, we will continue to install specs represented by a concrete `spec.yaml` file on disk. - [x] #22657 to support install a single spec already present in the active, concrete environment
2021-05-29 00:38:07 +08:00
_spack_ci_rebuild() {
spack ci: add support for running stand-alone tests (#27877) This support requires adding the '--tests' option to 'spack ci rebuild'. Packages whose stand-alone tests are broken (in the CI environment) can be configured in gitlab-ci to be skipped by adding them to broken-tests-packages. Highlights include: - Restructured 'spack ci' help to provide better subcommand summaries; - Ensured only one InstallError (i.e., installer's) rather than allowing build_environment to have its own; and - Refactored CI and CDash reporting to keep CDash-related properties and behavior in a separate class. This allows stand-alone tests from `spack ci` to run when the `--tests` option is used. With `--tests`, stand-alone tests are run **after** a **successful** (re)build of the package. Test results are collected and report(able) using CDash. This PR adds the following features: - Adds `-t` and `--tests` to `spack ci rebuild` to run stand-alone tests; - Adds `--fail-fast` to stop stand-alone tests after the first failure; - Ensures a *single* `InstallError` across packages (i.e., removes second class from build environment); - Captures skipping tests for externals and uninstalled packages (for CDash reporting); - Copies test logs and outputs to the CI artifacts directory to facilitate debugging; - Parses stand-alone test results to report outputs from each `run_test` as separate test parts (CDash reporting); - Logs a test completion message to allow capture of timing of the last `run_test` part; - Adds the runner description to the CDash site to better distinguish entries in CDash tables; - Adds `gitlab-ci` `broken-tests-packages` to CI configuration to skip stand-alone testing for packages with known issues; - Changes `spack ci --help` so description of each subcommand is a single line; - Changes `spack ci <subcommand> --help` to provide the full description of each command (versus no description); and - Ensures `junit` test log file ends in an `.xml` extension (versus default where it does not). Tasks: - [x] Include the equivalent of the architecture information, or at least the host target, in the CDash output - [x] Upload stand-alone test results files as `test` artifacts - [x] Confirm tests are run in GitLab - [x] Ensure CDash results are uploaded as artifacts - [x] Resolve issues with CDash build-and test results appearing on same row of the table - [x] Add unit tests as needed - [x] Investigate why some (dependency) packages don't have test results (e.g., related from other pipelines) - [x] Ensure proper parsing and reporting of skipped tests (as `not run`) .. post- #28701 merge - [x] Restore the proper CDash URLand or mirror ONCE out-of-band testing completed
2022-08-23 15:52:48 +08:00
SPACK_COMPREPLY="-h --help -t --tests --fail-fast"
}
Pipelines: reproducible builds (#22887) ### Overview The goal of this PR is to make gitlab pipeline builds (especially build failures) more reproducible outside of the pipeline environment. The two key changes here which aim to improve reproducibility are: 1. Produce a `spack.lock` during pipeline generation which is passed to child jobs via artifacts. This concretized environment is used both by generated child jobs as well as uploaded as an artifact to be used when reproducing the build locally. 2. In the `spack ci rebuild` command, if a spec needs to be rebuilt from source, do this by generating and running an `install.sh` shell script which is then also uploaded as a job artifact to be run during local reproduction. To make it easier to take advantage of improved build reproducibility, this PR also adds a new subcommand, `spack ci reproduce-build`, which, given a url to job artifacts: - fetches and unzips the job artifacts to a local directory - looks for the generated pipeline yaml and parses it to find details about the job to reproduce - attempts to provide a copy of the same version of spack used in the ci build - if the ci build used a docker image, the command prints a `docker run` command you can run to get an interactive shell for reproducing the build #### Some highlights One consequence of this change will be much smaller pipeline yaml files. By encoding the concrete environment in a `spack.lock` and passing to child jobs via artifacts, we will no longer need to encode the concrete root of each spec and write it into the job variables, greatly reducing the size of the generated pipeline yaml. Additionally `spack ci rebuild` output (stdout/stderr) is no longer internally redirected to a log file, so job output will appear directly in the gitlab job trace. With debug logging turned on, this often results in log files getting truncated because they exceed the maximum amount of log output gitlab allows. If this is a problem, you still have the option to `tee` command output to a file in the within the artifacts directory, as now each generated job exposes a `user_data` directory as an artifact, which you can fill with whatever you want in your custom job scripts. There are some changes to be aware of in how pipelines should be set up after this PR: #### Pipeline generation Because the pipeline generation job now writes a `spack.lock` artifact to be consumed by generated downstream jobs, `spack ci generate` takes a new option `--artifacts-root`, inside which it creates a `concrete_env` directory to place the lockfile. This artifacts root directory is also where the `user_data` directory will live, in case you want to generate any custom artifacts. If you do not provide `--artifacts-root`, the default is for it to create a `jobs_scratch_dir` within your `CI_PROJECT_DIR` (a gitlab predefined environment variable) or whatever is your current working directory if that variable isn't set. Here's the diff of the PR testing `.gitlab-ci.yml` taking advantage of the new option: ``` $ git diff develop..pipelines-reproducible-builds share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml diff --git a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml index 579d7b56f3..0247803a30 100644 --- a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml +++ b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml @@ -28,10 +28,11 @@ default: - cd share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME} - spack env activate --without-view . - spack ci generate --check-index-only + --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" tags: ["spack", "public", "medium", "x86_64"] interruptible: true ``` Notice how we replaced the specific pointer to the generated pipeline file with its containing folder, the same folder we passed as `--artifacts-root`. This way anything in that directory (the generated pipeline yaml, as well as the concrete environment directory containing the `spack.lock`) will be uploaded as an artifact and available to the downstream jobs. #### Rebuild jobs Rebuild jobs now must activate the concrete environment created by `spack ci generate` and provided via artifacts. When the pipeline is generated, a directory called `concrete_environment` is created within the artifacts root directory, and this is where the `spack.lock` file is written to be passed to the generated rebuild jobs. The artifacts root directory can be specified using the `--artifacts-root` option to `spack ci generate`, otherwise, it is assumed to be `$CI_PROJECT_DIR`. The directory containing the concrete environment files (`spack.yaml` and `spack.lock`) is then passed to generated child jobs via the `SPACK_CONCRETE_ENV_DIR` variable in the generated pipeline yaml file. When you don't provide custom `script` sections in your `mappings` within the `gitlab-ci` section of your `spack.yaml`, the default behavior of rebuild jobs is now to change into `SPACK_CONCRETE_ENV_DIR` and activate that environment. If you do provide custom rebuild scripts in your `spack.yaml`, be aware those scripts should do the same thing: assume `SPACK_CONCRETE_ENV_DIR` contains the concretized environment to activate. No other changes to existing custom rebuild scripts should be required as a result of this PR. As mentioned above, one key change made in this PR is the generation of the `install.sh` script by the rebuild jobs, as that same script is both run by the CI rebuild job as well as exported as an artifact to aid in subsequent attempts to reproduce the build outside of CI. The generated `install.sh` script contains only a single `spack install` command with arguments computed by `spack ci rebuild`. If the install fails, the job trace in gitlab will contain instructions on how to reproduce the build locally: ``` To reproduce this build locally, run: spack ci reproduce-build https://gitlab.next.spack.io/api/v4/projects/7/jobs/240607/artifacts [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions. ``` When run locally, the `spack ci reproduce-build` command shown above will download and process the job artifacts from gitlab, then print out instructions you can copy-paste to run a local reproducer of the CI job. This PR includes a few other changes to the way pipelines work, see the documentation on pipelines for more details. This PR erelies on ~- [ ] #23194 to be able to refer to uninstalled specs by DAG hash~ EDIT: that is going to take longer to come to fruition, so for now, we will continue to install specs represented by a concrete `spec.yaml` file on disk. - [x] #22657 to support install a single spec already present in the active, concrete environment
2021-05-29 00:38:07 +08:00
_spack_ci_reproduce_build() {
if $list_options
then
SPACK_COMPREPLY="-h --help --runtime --working-dir -s --autostart --gpg-file --gpg-url"
Pipelines: reproducible builds (#22887) ### Overview The goal of this PR is to make gitlab pipeline builds (especially build failures) more reproducible outside of the pipeline environment. The two key changes here which aim to improve reproducibility are: 1. Produce a `spack.lock` during pipeline generation which is passed to child jobs via artifacts. This concretized environment is used both by generated child jobs as well as uploaded as an artifact to be used when reproducing the build locally. 2. In the `spack ci rebuild` command, if a spec needs to be rebuilt from source, do this by generating and running an `install.sh` shell script which is then also uploaded as a job artifact to be run during local reproduction. To make it easier to take advantage of improved build reproducibility, this PR also adds a new subcommand, `spack ci reproduce-build`, which, given a url to job artifacts: - fetches and unzips the job artifacts to a local directory - looks for the generated pipeline yaml and parses it to find details about the job to reproduce - attempts to provide a copy of the same version of spack used in the ci build - if the ci build used a docker image, the command prints a `docker run` command you can run to get an interactive shell for reproducing the build #### Some highlights One consequence of this change will be much smaller pipeline yaml files. By encoding the concrete environment in a `spack.lock` and passing to child jobs via artifacts, we will no longer need to encode the concrete root of each spec and write it into the job variables, greatly reducing the size of the generated pipeline yaml. Additionally `spack ci rebuild` output (stdout/stderr) is no longer internally redirected to a log file, so job output will appear directly in the gitlab job trace. With debug logging turned on, this often results in log files getting truncated because they exceed the maximum amount of log output gitlab allows. If this is a problem, you still have the option to `tee` command output to a file in the within the artifacts directory, as now each generated job exposes a `user_data` directory as an artifact, which you can fill with whatever you want in your custom job scripts. There are some changes to be aware of in how pipelines should be set up after this PR: #### Pipeline generation Because the pipeline generation job now writes a `spack.lock` artifact to be consumed by generated downstream jobs, `spack ci generate` takes a new option `--artifacts-root`, inside which it creates a `concrete_env` directory to place the lockfile. This artifacts root directory is also where the `user_data` directory will live, in case you want to generate any custom artifacts. If you do not provide `--artifacts-root`, the default is for it to create a `jobs_scratch_dir` within your `CI_PROJECT_DIR` (a gitlab predefined environment variable) or whatever is your current working directory if that variable isn't set. Here's the diff of the PR testing `.gitlab-ci.yml` taking advantage of the new option: ``` $ git diff develop..pipelines-reproducible-builds share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml diff --git a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml index 579d7b56f3..0247803a30 100644 --- a/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml +++ b/share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml @@ -28,10 +28,11 @@ default: - cd share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME} - spack env activate --without-view . - spack ci generate --check-index-only + --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" tags: ["spack", "public", "medium", "x86_64"] interruptible: true ``` Notice how we replaced the specific pointer to the generated pipeline file with its containing folder, the same folder we passed as `--artifacts-root`. This way anything in that directory (the generated pipeline yaml, as well as the concrete environment directory containing the `spack.lock`) will be uploaded as an artifact and available to the downstream jobs. #### Rebuild jobs Rebuild jobs now must activate the concrete environment created by `spack ci generate` and provided via artifacts. When the pipeline is generated, a directory called `concrete_environment` is created within the artifacts root directory, and this is where the `spack.lock` file is written to be passed to the generated rebuild jobs. The artifacts root directory can be specified using the `--artifacts-root` option to `spack ci generate`, otherwise, it is assumed to be `$CI_PROJECT_DIR`. The directory containing the concrete environment files (`spack.yaml` and `spack.lock`) is then passed to generated child jobs via the `SPACK_CONCRETE_ENV_DIR` variable in the generated pipeline yaml file. When you don't provide custom `script` sections in your `mappings` within the `gitlab-ci` section of your `spack.yaml`, the default behavior of rebuild jobs is now to change into `SPACK_CONCRETE_ENV_DIR` and activate that environment. If you do provide custom rebuild scripts in your `spack.yaml`, be aware those scripts should do the same thing: assume `SPACK_CONCRETE_ENV_DIR` contains the concretized environment to activate. No other changes to existing custom rebuild scripts should be required as a result of this PR. As mentioned above, one key change made in this PR is the generation of the `install.sh` script by the rebuild jobs, as that same script is both run by the CI rebuild job as well as exported as an artifact to aid in subsequent attempts to reproduce the build outside of CI. The generated `install.sh` script contains only a single `spack install` command with arguments computed by `spack ci rebuild`. If the install fails, the job trace in gitlab will contain instructions on how to reproduce the build locally: ``` To reproduce this build locally, run: spack ci reproduce-build https://gitlab.next.spack.io/api/v4/projects/7/jobs/240607/artifacts [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions. ``` When run locally, the `spack ci reproduce-build` command shown above will download and process the job artifacts from gitlab, then print out instructions you can copy-paste to run a local reproducer of the CI job. This PR includes a few other changes to the way pipelines work, see the documentation on pipelines for more details. This PR erelies on ~- [ ] #23194 to be able to refer to uninstalled specs by DAG hash~ EDIT: that is going to take longer to come to fruition, so for now, we will continue to install specs represented by a concrete `spec.yaml` file on disk. - [x] #22657 to support install a single spec already present in the active, concrete environment
2021-05-29 00:38:07 +08:00
else
SPACK_COMPREPLY=""
fi
}
_spack_clean() {
if $list_options
then
SPACK_COMPREPLY="-h --help -s --stage -d --downloads -f --failures -m --misc-cache -p --python-cache -b --bootstrap -a --all"
else
_all_packages
fi
}
_spack_clone() {
if $list_options
then
SPACK_COMPREPLY="-h --help -r --remote"
else
SPACK_COMPREPLY=""
fi
}
_spack_commands() {
if $list_options
then
SPACK_COMPREPLY="-h --help --update-completion -a --aliases --format --header --update"
else
SPACK_COMPREPLY=""
fi
}
_spack_compiler() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="find add remove rm list info"
fi
}
_spack_compiler_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_compiler_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_compiler_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all --scope"
else
_installed_compilers
fi
}
_spack_compiler_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all --scope"
else
_installed_compilers
fi
}
_spack_compiler_list() {
SPACK_COMPREPLY="-h --help --scope"
}
_spack_compiler_info() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
_installed_compilers
fi
}
_spack_compilers() {
SPACK_COMPREPLY="-h --help --scope"
}
_spack_concretize() {
SPACK_COMPREPLY="-h --help -f --force --test -q --quiet -U --fresh --reuse --reuse-deps -j --jobs"
}
_spack_config() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY="get blame edit list add prefer-upstream remove rm update revert"
fi
}
_spack_config_get() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_config_sections
fi
}
_spack_config_blame() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_config_sections
fi
}
_spack_config_edit() {
if $list_options
then
SPACK_COMPREPLY="-h --help --print-file"
else
_config_sections
fi
}
_spack_config_list() {
SPACK_COMPREPLY="-h --help"
}
_spack_config_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help -f --file"
else
SPACK_COMPREPLY=""
fi
}
_spack_config_prefer_upstream() {
SPACK_COMPREPLY="-h --help --local"
}
_spack_config_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_config_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_config_update() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_config_sections
fi
}
_spack_config_revert() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_config_sections
fi
}
`spack containerize` generates containers from envs (#14202) This PR adds a new command to Spack: ```console $ spack containerize -h usage: spack containerize [-h] [--config CONFIG] creates recipes to build images for different container runtimes optional arguments: -h, --help show this help message and exit --config CONFIG configuration for the container recipe that will be generated ``` which takes an environment with an additional `container` section: ```yaml spack: specs: - gromacs build_type=Release - mpich - fftw precision=float packages: all: target: [broadwell] container: # Select the format of the recipe e.g. docker, # singularity or anything else that is currently supported format: docker # Select from a valid list of images base: image: "ubuntu:18.04" spack: prerelease # Additional system packages that are needed at runtime os_packages: - libgomp1 ``` and turns it into a `Dockerfile` or a Singularity definition file, for instance: ```Dockerfile # Build stage with Spack pre-installed and ready to be used FROM spack/ubuntu-bionic:prerelease as builder # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) RUN mkdir /opt/spack-environment \ && (echo "spack:" \ && echo " specs:" \ && echo " - gromacs build_type=Release" \ && echo " - mpich" \ && echo " - fftw precision=float" \ && echo " packages:" \ && echo " all:" \ && echo " target:" \ && echo " - broadwell" \ && echo " config:" \ && echo " install_tree: /opt/software" \ && echo " concretization: together" \ && echo " view: /opt/view") > /opt/spack-environment/spack.yaml # Install the software, remove unecessary deps and strip executables RUN cd /opt/spack-environment && spack install && spack autoremove -y RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \ xargs file -i | \ grep 'charset=binary' | \ grep 'x-executable\|x-archive\|x-sharedlib' | \ awk -F: '{print $1}' | xargs strip -s # Modifications to the environment that are necessary to run RUN cd /opt/spack-environment && \ spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh # Bare OS image to run the installed executables FROM ubuntu:18.04 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software COPY --from=builder /opt/view /opt/view COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh RUN apt-get -yqq update && apt-get -yqq upgrade \ && apt-get -yqq install libgomp1 \ && rm -rf /var/lib/apt/lists/* ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"] ```
2020-01-31 09:19:55 +08:00
_spack_containerize() {
SPACK_COMPREPLY="-h --help --list-os --last-stage"
`spack containerize` generates containers from envs (#14202) This PR adds a new command to Spack: ```console $ spack containerize -h usage: spack containerize [-h] [--config CONFIG] creates recipes to build images for different container runtimes optional arguments: -h, --help show this help message and exit --config CONFIG configuration for the container recipe that will be generated ``` which takes an environment with an additional `container` section: ```yaml spack: specs: - gromacs build_type=Release - mpich - fftw precision=float packages: all: target: [broadwell] container: # Select the format of the recipe e.g. docker, # singularity or anything else that is currently supported format: docker # Select from a valid list of images base: image: "ubuntu:18.04" spack: prerelease # Additional system packages that are needed at runtime os_packages: - libgomp1 ``` and turns it into a `Dockerfile` or a Singularity definition file, for instance: ```Dockerfile # Build stage with Spack pre-installed and ready to be used FROM spack/ubuntu-bionic:prerelease as builder # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) RUN mkdir /opt/spack-environment \ && (echo "spack:" \ && echo " specs:" \ && echo " - gromacs build_type=Release" \ && echo " - mpich" \ && echo " - fftw precision=float" \ && echo " packages:" \ && echo " all:" \ && echo " target:" \ && echo " - broadwell" \ && echo " config:" \ && echo " install_tree: /opt/software" \ && echo " concretization: together" \ && echo " view: /opt/view") > /opt/spack-environment/spack.yaml # Install the software, remove unecessary deps and strip executables RUN cd /opt/spack-environment && spack install && spack autoremove -y RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \ xargs file -i | \ grep 'charset=binary' | \ grep 'x-executable\|x-archive\|x-sharedlib' | \ awk -F: '{print $1}' | xargs strip -s # Modifications to the environment that are necessary to run RUN cd /opt/spack-environment && \ spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh # Bare OS image to run the installed executables FROM ubuntu:18.04 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software COPY --from=builder /opt/view /opt/view COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh RUN apt-get -yqq update && apt-get -yqq upgrade \ && apt-get -yqq install libgomp1 \ && rm -rf /var/lib/apt/lists/* ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"] ```
2020-01-31 09:19:55 +08:00
}
_spack_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor -b --batch"
else
SPACK_COMPREPLY=""
fi
}
_spack_debug() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="create-db-tarball report"
fi
}
_spack_debug_create_db_tarball() {
SPACK_COMPREPLY="-h --help"
}
_spack_debug_report() {
SPACK_COMPREPLY="-h --help"
}
_spack_dependencies() {
if $list_options
then
SPACK_COMPREPLY="-h --help -i --installed -t --transitive --deptype -V --no-expand-virtuals"
else
_all_packages
fi
}
_spack_dependents() {
if $list_options
then
SPACK_COMPREPLY="-h --help -i --installed -t --transitive"
else
_all_packages
fi
}
_spack_deprecate() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all -d --dependencies -D --no-dependencies -i --install-deprecator -I --no-install-deprecator -l --link-type"
else
_all_packages
fi
}
_spack_dev_build() {
if $list_options
then
SPACK_COMPREPLY="-h --help -j --jobs -d --source-path -i --ignore-dependencies -n --no-checksum --deprecated --keep-prefix --skip-patch -q --quiet --drop-in --test -b --before -u --until --clean --dirty -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
_spack_develop() {
if $list_options
then
SPACK_COMPREPLY="-h --help -p --path --no-clone --clone -f --force"
else
_all_packages
fi
}
adding spack diff command (#22283) A `spack diff` will take two specs, and then use the spack.solver.asp.SpackSolverSetup to generate lists of facts about each (e.g., nodes, variants, etc.) and then take a set difference between the two to show the user the differences. Example output: $ spack diff python@2.7.8 python@3.8.11 ==> Warning: This interface is subject to change. --- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf +++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx @@ variant_value @@ - python patches a8c52415a8b03c0e5f28b5d52ae498f7a7e602007db2b9554df28cd5685839b8 + python patches 0d98e93189bc278fbc37a50ed7f183bd8aaf249a8e1670a465f0db6bb4f8cf87 @@ version @@ - openssl Version(1.0.2u) + openssl Version(1.1.1k) - python Version(2.7.8) + python Version(3.8.11) Currently this uses diff-like output but we will attempt to improve on this in the future. One use case for `spack diff` is whenever a user has a disambiguate situation and cannot remember how two different installs are different. The command can also output `--json` in the case of a more analysis type use case where we want to save complete data with all diffs and the intersection. However, the command is really more intended for a command line use case, and we likely will have an analyzer more suited to saving data Signed-off-by: vsoch <vsoch@users.noreply.github.com> Co-authored-by: vsoch <vsoch@users.noreply.github.com> Co-authored-by: Tamara Dahlgren <35777542+tldahlgren@users.noreply.github.com> Co-authored-by: Todd Gamblin <tgamblin@llnl.gov>
2021-07-30 15:08:38 +08:00
_spack_diff() {
if $list_options
then
SPACK_COMPREPLY="-h --help --json --first -a --attribute"
else
_all_packages
fi
}
_spack_docs() {
SPACK_COMPREPLY="-h --help"
}
_spack_edit() {
if $list_options
then
SPACK_COMPREPLY="-h --help -b --build-system -c --command -d --docs -t --test -m --module -r --repo -N --namespace"
else
_all_packages
fi
}
_spack_env() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
Makefile generator for parallel spack install of environments (#30254) `make` solves a lot of headaches that would otherwise have to be implemented in Spack: 1. Parallelism over packages through multiple `spack install` processes 2. Orderly output of parallel package installs thanks to `make --sync-output=recurse` or `make -Orecurse` (works well in GNU Make 4.3; macOS is unfortunately on a 16 years old 3.x version, but it's one `spack install gmake` away...) 3. Shared jobserver across packages, which means a single `-j` to rule them all, instead of manually finding a balance between `#spack install processes` & `#jobs per package` (See #30302). This pr adds the `spack env depfile` command that generates a Makefile with dag hashes as targets, and dag hashes of dependencies as prerequisites, and a command along the lines of `spack install --only=packages /hash` to just install a single package. It exposes two convenient phony targets: `all`, `fetch-all`. The former installs the environment, the latter just fetches all sources. So one can either use `make all -j16` directly or run `make fetch-all -j16` on a login node and `make all -j16` on a compute node. Example: ```yaml spack: specs: [perl] view: false ``` running ``` $ spack -e . env depfile --make-target-prefix env | tee Makefile ``` generates ```Makefile SPACK ?= spack .PHONY: env/all env/fetch-all env/clean env/all: env/env env/fetch-all: env/fetch env/env: env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww @touch $@ env/fetch: env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc @touch $@ env/dirs: @mkdir -p env/.fetch env/.install env/.fetch/%: | env/dirs $(info Fetching $(SPEC)) $(SPACK) -e '/tmp/tmp.7PHPSIRACv' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@ env/.install/%: env/.fetch/% $(info Installing $(SPEC)) +$(SPACK) -e '/tmp/tmp.7PHPSIRACv' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package --no-add /$(notdir $@) && touch $@ # Set the human-readable spec for each target env/%/cdqldivylyxocqymwnfzmzc5sx2zwvww: SPEC = perl@5.34.1%gcc@10.3.0+cpanm+shared+threads arch=linux-ubuntu20.04-zen2 env/%/gv5kin2xnn33uxyfte6k4a3bynhmtxze: SPEC = berkeley-db@18.1.40%gcc@10.3.0+cxx~docs+stl patches=b231fcc arch=linux-ubuntu20.04-zen2 env/%/cuymc7e5gupwyu7vza5d4vrbuslk277p: SPEC = bzip2@1.0.8%gcc@10.3.0~debug~pic+shared arch=linux-ubuntu20.04-zen2 env/%/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: SPEC = diffutils@3.8%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws: SPEC = libiconv@1.16%gcc@10.3.0 libs=shared,static arch=linux-ubuntu20.04-zen2 env/%/yfz2agazed7ohevqvnrmm7jfkmsgwjao: SPEC = gdbm@1.19%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/73t7ndb5w72hrat5hsax4caox2sgumzu: SPEC = readline@8.1%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/trvdyncxzfozxofpm3cwgq4vecpxixzs: SPEC = ncurses@6.2%gcc@10.3.0~symlinks+termlib abi=none arch=linux-ubuntu20.04-zen2 env/%/sbzszb7v557ohyd6c2ekirx2t3ctxfxp: SPEC = pkgconf@1.8.0%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/c4go4gxlcznh5p5nklpjm644epuh3pzc: SPEC = zlib@1.2.12%gcc@10.3.0+optimize+pic+shared patches=0d38234 arch=linux-ubuntu20.04-zen2 # Install dependencies env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww: env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p: env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao: env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu: env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs: env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/clean: rm -f -- env/env env/fetch env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc ``` Then with `make -O` you get very nice orderly output when packages are built in parallel: ```console $ make -Orecurse -j16 spack -e . install --only-concrete --only=package /c4go4gxlcznh5p5nklpjm644epuh3pzc && touch c4go4gxlcznh5p5nklpjm644epuh3pzc ==> Installing zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc ... Fetch: 0.00s. Build: 0.88s. Total: 0.88s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc spack -e . install --only-concrete --only=package /sbzszb7v557ohyd6c2ekirx2t3ctxfxp && touch sbzszb7v557ohyd6c2ekirx2t3ctxfxp ==> Installing pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ... Fetch: 0.00s. Build: 3.96s. Total: 3.96s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ``` For Perl, at least for me, using `make -j16` versus `spack -e . install -j16` speeds up the builds from 3m32.623s to 2m22.775s, as some configure scripts run in parallel. Another nice feature is you can do Makefile "metaprogramming" and depend on packages built by Spack. This example fetches all sources (in parallel) first, print a message, and only then build packages (in parallel). ```Makefile SPACK ?= spack .PHONY: env all: env spack.lock: spack.yaml $(SPACK) -e . concretize -f env.mk: spack.lock $(SPACK) -e . env depfile -o $@ --make-target-prefix spack fetch: spack/fetch @echo Fetched all packages && touch $@ env: fetch spack/env @echo This executes after the environment has been installed clean: rm -rf spack/ env.mk spack.lock ifeq (,$(filter clean,$(MAKECMDGOALS))) include env.mk endif ```
2022-05-06 01:45:21 +08:00
SPACK_COMPREPLY="activate deactivate create remove rm list ls status st loads view update revert depfile"
fi
}
_spack_env_activate() {
if $list_options
then
SPACK_COMPREPLY="-h --help --sh --csh --fish --bat --pwsh -v --with-view -V --without-view -p --prompt --temp -d --dir"
else
_environments
fi
}
_spack_env_deactivate() {
SPACK_COMPREPLY="-h --help --sh --csh --fish --bat"
}
_spack_env_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help -d --dir --keep-relative --without-view --with-view"
else
_environments
fi
}
_spack_env_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_environments
fi
}
_spack_env_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_environments
fi
}
_spack_env_list() {
SPACK_COMPREPLY="-h --help"
}
_spack_env_ls() {
SPACK_COMPREPLY="-h --help"
}
_spack_env_status() {
SPACK_COMPREPLY="-h --help"
}
_spack_env_st() {
SPACK_COMPREPLY="-h --help"
}
_spack_env_loads() {
SPACK_COMPREPLY="-h --help -n --module-set-name -m --module-type --input-only -p --prefix -x --exclude -r --dependencies"
}
_spack_env_view() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_env_update() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_environments
fi
}
_spack_env_revert() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_environments
fi
}
Makefile generator for parallel spack install of environments (#30254) `make` solves a lot of headaches that would otherwise have to be implemented in Spack: 1. Parallelism over packages through multiple `spack install` processes 2. Orderly output of parallel package installs thanks to `make --sync-output=recurse` or `make -Orecurse` (works well in GNU Make 4.3; macOS is unfortunately on a 16 years old 3.x version, but it's one `spack install gmake` away...) 3. Shared jobserver across packages, which means a single `-j` to rule them all, instead of manually finding a balance between `#spack install processes` & `#jobs per package` (See #30302). This pr adds the `spack env depfile` command that generates a Makefile with dag hashes as targets, and dag hashes of dependencies as prerequisites, and a command along the lines of `spack install --only=packages /hash` to just install a single package. It exposes two convenient phony targets: `all`, `fetch-all`. The former installs the environment, the latter just fetches all sources. So one can either use `make all -j16` directly or run `make fetch-all -j16` on a login node and `make all -j16` on a compute node. Example: ```yaml spack: specs: [perl] view: false ``` running ``` $ spack -e . env depfile --make-target-prefix env | tee Makefile ``` generates ```Makefile SPACK ?= spack .PHONY: env/all env/fetch-all env/clean env/all: env/env env/fetch-all: env/fetch env/env: env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww @touch $@ env/fetch: env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc @touch $@ env/dirs: @mkdir -p env/.fetch env/.install env/.fetch/%: | env/dirs $(info Fetching $(SPEC)) $(SPACK) -e '/tmp/tmp.7PHPSIRACv' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@ env/.install/%: env/.fetch/% $(info Installing $(SPEC)) +$(SPACK) -e '/tmp/tmp.7PHPSIRACv' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package --no-add /$(notdir $@) && touch $@ # Set the human-readable spec for each target env/%/cdqldivylyxocqymwnfzmzc5sx2zwvww: SPEC = perl@5.34.1%gcc@10.3.0+cpanm+shared+threads arch=linux-ubuntu20.04-zen2 env/%/gv5kin2xnn33uxyfte6k4a3bynhmtxze: SPEC = berkeley-db@18.1.40%gcc@10.3.0+cxx~docs+stl patches=b231fcc arch=linux-ubuntu20.04-zen2 env/%/cuymc7e5gupwyu7vza5d4vrbuslk277p: SPEC = bzip2@1.0.8%gcc@10.3.0~debug~pic+shared arch=linux-ubuntu20.04-zen2 env/%/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: SPEC = diffutils@3.8%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws: SPEC = libiconv@1.16%gcc@10.3.0 libs=shared,static arch=linux-ubuntu20.04-zen2 env/%/yfz2agazed7ohevqvnrmm7jfkmsgwjao: SPEC = gdbm@1.19%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/73t7ndb5w72hrat5hsax4caox2sgumzu: SPEC = readline@8.1%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/trvdyncxzfozxofpm3cwgq4vecpxixzs: SPEC = ncurses@6.2%gcc@10.3.0~symlinks+termlib abi=none arch=linux-ubuntu20.04-zen2 env/%/sbzszb7v557ohyd6c2ekirx2t3ctxfxp: SPEC = pkgconf@1.8.0%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/c4go4gxlcznh5p5nklpjm644epuh3pzc: SPEC = zlib@1.2.12%gcc@10.3.0+optimize+pic+shared patches=0d38234 arch=linux-ubuntu20.04-zen2 # Install dependencies env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww: env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p: env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao: env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu: env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs: env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/clean: rm -f -- env/env env/fetch env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc ``` Then with `make -O` you get very nice orderly output when packages are built in parallel: ```console $ make -Orecurse -j16 spack -e . install --only-concrete --only=package /c4go4gxlcznh5p5nklpjm644epuh3pzc && touch c4go4gxlcznh5p5nklpjm644epuh3pzc ==> Installing zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc ... Fetch: 0.00s. Build: 0.88s. Total: 0.88s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc spack -e . install --only-concrete --only=package /sbzszb7v557ohyd6c2ekirx2t3ctxfxp && touch sbzszb7v557ohyd6c2ekirx2t3ctxfxp ==> Installing pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ... Fetch: 0.00s. Build: 3.96s. Total: 3.96s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ``` For Perl, at least for me, using `make -j16` versus `spack -e . install -j16` speeds up the builds from 3m32.623s to 2m22.775s, as some configure scripts run in parallel. Another nice feature is you can do Makefile "metaprogramming" and depend on packages built by Spack. This example fetches all sources (in parallel) first, print a message, and only then build packages (in parallel). ```Makefile SPACK ?= spack .PHONY: env all: env spack.lock: spack.yaml $(SPACK) -e . concretize -f env.mk: spack.lock $(SPACK) -e . env depfile -o $@ --make-target-prefix spack fetch: spack/fetch @echo Fetched all packages && touch $@ env: fetch spack/env @echo This executes after the environment has been installed clean: rm -rf spack/ env.mk spack.lock ifeq (,$(filter clean,$(MAKECMDGOALS))) include env.mk endif ```
2022-05-06 01:45:21 +08:00
_spack_env_depfile() {
if $list_options
then
SPACK_COMPREPLY="-h --help --make-prefix --make-target-prefix --make-disable-jobserver --use-buildcache -o --output -G --generator"
else
_all_packages
fi
Makefile generator for parallel spack install of environments (#30254) `make` solves a lot of headaches that would otherwise have to be implemented in Spack: 1. Parallelism over packages through multiple `spack install` processes 2. Orderly output of parallel package installs thanks to `make --sync-output=recurse` or `make -Orecurse` (works well in GNU Make 4.3; macOS is unfortunately on a 16 years old 3.x version, but it's one `spack install gmake` away...) 3. Shared jobserver across packages, which means a single `-j` to rule them all, instead of manually finding a balance between `#spack install processes` & `#jobs per package` (See #30302). This pr adds the `spack env depfile` command that generates a Makefile with dag hashes as targets, and dag hashes of dependencies as prerequisites, and a command along the lines of `spack install --only=packages /hash` to just install a single package. It exposes two convenient phony targets: `all`, `fetch-all`. The former installs the environment, the latter just fetches all sources. So one can either use `make all -j16` directly or run `make fetch-all -j16` on a login node and `make all -j16` on a compute node. Example: ```yaml spack: specs: [perl] view: false ``` running ``` $ spack -e . env depfile --make-target-prefix env | tee Makefile ``` generates ```Makefile SPACK ?= spack .PHONY: env/all env/fetch-all env/clean env/all: env/env env/fetch-all: env/fetch env/env: env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww @touch $@ env/fetch: env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc @touch $@ env/dirs: @mkdir -p env/.fetch env/.install env/.fetch/%: | env/dirs $(info Fetching $(SPEC)) $(SPACK) -e '/tmp/tmp.7PHPSIRACv' fetch $(SPACK_FETCH_FLAGS) /$(notdir $@) && touch $@ env/.install/%: env/.fetch/% $(info Installing $(SPEC)) +$(SPACK) -e '/tmp/tmp.7PHPSIRACv' install $(SPACK_INSTALL_FLAGS) --only-concrete --only=package --no-add /$(notdir $@) && touch $@ # Set the human-readable spec for each target env/%/cdqldivylyxocqymwnfzmzc5sx2zwvww: SPEC = perl@5.34.1%gcc@10.3.0+cpanm+shared+threads arch=linux-ubuntu20.04-zen2 env/%/gv5kin2xnn33uxyfte6k4a3bynhmtxze: SPEC = berkeley-db@18.1.40%gcc@10.3.0+cxx~docs+stl patches=b231fcc arch=linux-ubuntu20.04-zen2 env/%/cuymc7e5gupwyu7vza5d4vrbuslk277p: SPEC = bzip2@1.0.8%gcc@10.3.0~debug~pic+shared arch=linux-ubuntu20.04-zen2 env/%/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: SPEC = diffutils@3.8%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws: SPEC = libiconv@1.16%gcc@10.3.0 libs=shared,static arch=linux-ubuntu20.04-zen2 env/%/yfz2agazed7ohevqvnrmm7jfkmsgwjao: SPEC = gdbm@1.19%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/73t7ndb5w72hrat5hsax4caox2sgumzu: SPEC = readline@8.1%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/trvdyncxzfozxofpm3cwgq4vecpxixzs: SPEC = ncurses@6.2%gcc@10.3.0~symlinks+termlib abi=none arch=linux-ubuntu20.04-zen2 env/%/sbzszb7v557ohyd6c2ekirx2t3ctxfxp: SPEC = pkgconf@1.8.0%gcc@10.3.0 arch=linux-ubuntu20.04-zen2 env/%/c4go4gxlcznh5p5nklpjm644epuh3pzc: SPEC = zlib@1.2.12%gcc@10.3.0+optimize+pic+shared patches=0d38234 arch=linux-ubuntu20.04-zen2 # Install dependencies env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww: env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p: env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk: env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao: env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu: env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs: env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/clean: rm -f -- env/env env/fetch env/.fetch/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.fetch/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.fetch/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.fetch/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.fetch/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.fetch/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.fetch/73t7ndb5w72hrat5hsax4caox2sgumzu env/.fetch/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.fetch/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.fetch/c4go4gxlcznh5p5nklpjm644epuh3pzc env/.install/cdqldivylyxocqymwnfzmzc5sx2zwvww env/.install/gv5kin2xnn33uxyfte6k4a3bynhmtxze env/.install/cuymc7e5gupwyu7vza5d4vrbuslk277p env/.install/7vangk4jvsdgw6u6oe6ob63pyjl5cbgk env/.install/hyb7ehxxyqqp2hiw56bzm5ampkw6cxws env/.install/yfz2agazed7ohevqvnrmm7jfkmsgwjao env/.install/73t7ndb5w72hrat5hsax4caox2sgumzu env/.install/trvdyncxzfozxofpm3cwgq4vecpxixzs env/.install/sbzszb7v557ohyd6c2ekirx2t3ctxfxp env/.install/c4go4gxlcznh5p5nklpjm644epuh3pzc ``` Then with `make -O` you get very nice orderly output when packages are built in parallel: ```console $ make -Orecurse -j16 spack -e . install --only-concrete --only=package /c4go4gxlcznh5p5nklpjm644epuh3pzc && touch c4go4gxlcznh5p5nklpjm644epuh3pzc ==> Installing zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc ... Fetch: 0.00s. Build: 0.88s. Total: 0.88s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/zlib-1.2.12-c4go4gxlcznh5p5nklpjm644epuh3pzc spack -e . install --only-concrete --only=package /sbzszb7v557ohyd6c2ekirx2t3ctxfxp && touch sbzszb7v557ohyd6c2ekirx2t3ctxfxp ==> Installing pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ... Fetch: 0.00s. Build: 3.96s. Total: 3.96s. [+] /tmp/tmp.b1eTyAOe85/store/linux-ubuntu20.04-zen2/gcc-10.3.0/pkgconf-1.8.0-sbzszb7v557ohyd6c2ekirx2t3ctxfxp ``` For Perl, at least for me, using `make -j16` versus `spack -e . install -j16` speeds up the builds from 3m32.623s to 2m22.775s, as some configure scripts run in parallel. Another nice feature is you can do Makefile "metaprogramming" and depend on packages built by Spack. This example fetches all sources (in parallel) first, print a message, and only then build packages (in parallel). ```Makefile SPACK ?= spack .PHONY: env all: env spack.lock: spack.yaml $(SPACK) -e . concretize -f env.mk: spack.lock $(SPACK) -e . env depfile -o $@ --make-target-prefix spack fetch: spack/fetch @echo Fetched all packages && touch $@ env: fetch spack/env @echo This executes after the environment has been installed clean: rm -rf spack/ env.mk spack.lock ifeq (,$(filter clean,$(MAKECMDGOALS))) include env.mk endif ```
2022-05-06 01:45:21 +08:00
}
_spack_extensions() {
if $list_options
then
remove activate/deactivate support in favor of environments (#29317) Environments and environment views have taken over the role of `spack activate/deactivate`, and we should deprecate these commands for several reasons: - Global activation is a really poor idea: - Install prefixes should be immutable; since they can have multiple, unrelated dependents; see below - Added complexity elsewhere: verification of installations, tarballs for build caches, creation of environment views of packages with unrelated extensions "globally activated"... by removing the feature, it gets easier for people to contribute, and we'd end up with fewer bugs due to edge cases. - Environment accomplish the same thing for non-global "activation" i.e. `spack view`, but better. Also we write in the docs: ``` However, Spack global activations have two potential drawbacks: #. Activated packages that involve compiled C extensions may still need their dependencies to be loaded manually. For example, ``spack load openblas`` might be required to make ``py-numpy`` work. #. Global activations "break" a core feature of Spack, which is that multiple versions of a package can co-exist side-by-side. For example, suppose you wish to run a Python package in two different environments but the same basic Python --- one with ``py-numpy@1.7`` and one with ``py-numpy@1.8``. Spack extensions will not support this potential debugging use case. ``` Now that environments are established and views can take over the role of activation non-destructively, we can remove global activation/deactivation.
2022-11-11 16:50:07 +08:00
SPACK_COMPREPLY="-h --help -l --long -L --very-long -d --deps -p --paths -s --show"
else
_extensions
fi
}
_spack_external() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
Add command for reading JSON-based DB description (now with more tests) (#29652) This is an amended version of https://github.com/spack/spack/pull/24894 (reverted in https://github.com/spack/spack/pull/29603). https://github.com/spack/spack/pull/24894 broke all instances of `spack external find` (namely when it is invoked without arguments/options) because it was mandating the presence of a file which most systems would not have. This allows `spack external find` to proceed if that file is not present and adds tests for this. - [x] Add a test which confirms that `spack external find` successfully reads a manifest file if present in the default manifest path --- Original commit message --- Adds `spack external read-cray-manifest`, which reads a json file that describes a set of package DAGs. The parsed results are stored directly in the database. A user can see these installed specs with `spack find` (like any installed spec). The easiest way to use them right now as dependencies is to run `spack spec ... ^/hash-of-external-package`. Changes include: * `spack external read-cray-manifest --file <path/to/file>` will add all specs described in the file to Spack's installation DB and will also install described compilers to the compilers configuration (the expected format of the file is described in this PR as well including examples of the file) * Database records now may include an "origin" (the command added in this PR registers the origin as "external-db"). In the future, it is assumed users may want to be able to treat installs registered with this command differently (e.g. they may want to uninstall all specs added with this command) * Hash properties are now always preserved when copying specs if the source spec is concrete * I don't think the hashes of installed-and-concrete specs should change and this was the easiest way to handle that * also specs that are concrete preserve their `.normal` property when copied (external specs may mention compilers that are not registered, and without this change they would fail in `normalize` when calling `validate_or_raise`) * it might be this should only be the case if the spec was installed - [x] Improve testing - [x] Specifically mark DB records added with this command (so that users can do something like "uninstall all packages added with `spack read-external-db`) * This is now possible with `spack uninstall --all --origin=external-db` (this will remove all specs added from manifest files) - [x] Strip variants that are listed in json entries but don't actually exist for the package
2022-04-29 01:56:26 +08:00
SPACK_COMPREPLY="find list read-cray-manifest"
fi
}
_spack_external_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help --not-buildable --exclude -p --path --scope --all -t --tag"
else
_all_packages
fi
}
_spack_external_list() {
SPACK_COMPREPLY="-h --help"
}
Add command for reading JSON-based DB description (now with more tests) (#29652) This is an amended version of https://github.com/spack/spack/pull/24894 (reverted in https://github.com/spack/spack/pull/29603). https://github.com/spack/spack/pull/24894 broke all instances of `spack external find` (namely when it is invoked without arguments/options) because it was mandating the presence of a file which most systems would not have. This allows `spack external find` to proceed if that file is not present and adds tests for this. - [x] Add a test which confirms that `spack external find` successfully reads a manifest file if present in the default manifest path --- Original commit message --- Adds `spack external read-cray-manifest`, which reads a json file that describes a set of package DAGs. The parsed results are stored directly in the database. A user can see these installed specs with `spack find` (like any installed spec). The easiest way to use them right now as dependencies is to run `spack spec ... ^/hash-of-external-package`. Changes include: * `spack external read-cray-manifest --file <path/to/file>` will add all specs described in the file to Spack's installation DB and will also install described compilers to the compilers configuration (the expected format of the file is described in this PR as well including examples of the file) * Database records now may include an "origin" (the command added in this PR registers the origin as "external-db"). In the future, it is assumed users may want to be able to treat installs registered with this command differently (e.g. they may want to uninstall all specs added with this command) * Hash properties are now always preserved when copying specs if the source spec is concrete * I don't think the hashes of installed-and-concrete specs should change and this was the easiest way to handle that * also specs that are concrete preserve their `.normal` property when copied (external specs may mention compilers that are not registered, and without this change they would fail in `normalize` when calling `validate_or_raise`) * it might be this should only be the case if the spec was installed - [x] Improve testing - [x] Specifically mark DB records added with this command (so that users can do something like "uninstall all packages added with `spack read-external-db`) * This is now possible with `spack uninstall --all --origin=external-db` (this will remove all specs added from manifest files) - [x] Strip variants that are listed in json entries but don't actually exist for the package
2022-04-29 01:56:26 +08:00
_spack_external_read_cray_manifest() {
SPACK_COMPREPLY="-h --help --file --directory --ignore-default-dir --dry-run --fail-on-error"
Add command for reading JSON-based DB description (now with more tests) (#29652) This is an amended version of https://github.com/spack/spack/pull/24894 (reverted in https://github.com/spack/spack/pull/29603). https://github.com/spack/spack/pull/24894 broke all instances of `spack external find` (namely when it is invoked without arguments/options) because it was mandating the presence of a file which most systems would not have. This allows `spack external find` to proceed if that file is not present and adds tests for this. - [x] Add a test which confirms that `spack external find` successfully reads a manifest file if present in the default manifest path --- Original commit message --- Adds `spack external read-cray-manifest`, which reads a json file that describes a set of package DAGs. The parsed results are stored directly in the database. A user can see these installed specs with `spack find` (like any installed spec). The easiest way to use them right now as dependencies is to run `spack spec ... ^/hash-of-external-package`. Changes include: * `spack external read-cray-manifest --file <path/to/file>` will add all specs described in the file to Spack's installation DB and will also install described compilers to the compilers configuration (the expected format of the file is described in this PR as well including examples of the file) * Database records now may include an "origin" (the command added in this PR registers the origin as "external-db"). In the future, it is assumed users may want to be able to treat installs registered with this command differently (e.g. they may want to uninstall all specs added with this command) * Hash properties are now always preserved when copying specs if the source spec is concrete * I don't think the hashes of installed-and-concrete specs should change and this was the easiest way to handle that * also specs that are concrete preserve their `.normal` property when copied (external specs may mention compilers that are not registered, and without this change they would fail in `normalize` when calling `validate_or_raise`) * it might be this should only be the case if the spec was installed - [x] Improve testing - [x] Specifically mark DB records added with this command (so that users can do something like "uninstall all packages added with `spack read-external-db`) * This is now possible with `spack uninstall --all --origin=external-db` (this will remove all specs added from manifest files) - [x] Strip variants that are listed in json entries but don't actually exist for the package
2022-04-29 01:56:26 +08:00
}
_spack_fetch() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --no-checksum --deprecated -m --missing -D --dependencies"
else
_all_packages
fi
}
_spack_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help --format -H --hashes --json -d --deps -p --paths --groups --no-groups -l --long -L --very-long -t --tag -N --namespaces -c --show-concretized -f --show-flags --show-full-compiler -x --explicit -X --implicit -u --unknown -m --missing -v --variants --loaded -M --only-missing --deprecated --only-deprecated --start-date --end-date"
else
_installed_packages
fi
}
_spack_gc() {
SPACK_COMPREPLY="-h --help -y --yes-to-all"
}
_spack_gpg() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
Streamline key management for build caches (#17792) * Rework spack.util.web.list_url() list_url() now accepts an optional recursive argument (default: False) for controlling whether to only return files within the prefix url or to return all files whose path starts with the prefix url. Allows for the most effecient implementation for the given prefix url scheme. For example, only recursive queries are supported for S3 prefixes, so the returned list is trimmed down if recursive == False, but the native search is returned as-is when recursive == True. Suitable implementations for each case are also used for file system URLs. * Switch to using an explicit index for public keys Switches to maintaining a build cache's keys under build_cache/_pgp. Within this directory is an index.json file listing all the available keys and a <fingerprint>.pub file for each such key. - Adds spack.binary_distribution.generate_key_index() - (re)generates a build cache's key index - Modifies spack.binary_distribution.build_tarball() - if tarball is signed, automatically pushes the key used for signing along with the tarball - if regenerate_index == True, automatically (re)generates the build cache's key index along with the build cache's package index; as in spack.binary_distribution.generate_key_index() - Modifies spack.binary_distribution.get_keys() - a build cache's key index is now used instead of programmatic listing - Adds spack.binary_distribution.push_keys() - publishes keys from Spack's keyring to a given list of mirrors - Adds new spack subcommand: spack gpg publish - publishes keys from Spack's keyring to a given list of mirrors - Modifies spack.util.gpg.Gpg.signing_keys() - Accepts optional positional arguments for filtering the set of keys returned - Adds spack.util.gpg.Gpg.public_keys() - As spack.util.gpg.Gpg.signing_keys(), except public keys are returned - Modifies spack.util.gpg.Gpg.export_keys() - Fixes an issue where GnuPG would prompt for user input if trying to overwrite an existing file - Modifies spack.util.gpg.Gpg.untrust() - Fixes an issue where GnuPG would fail for input that were not key fingerprints - Modifies spack.util.web.url_exists() - Fixes an issue where url_exists() would throw instead of returning False * rework gpg module/fix error with very long GNUPGHOME dir * add a shim for functools.cached_property * handle permission denied error in gpg util * fix tests/make gpgconf optional if no socket dir is available
2020-09-26 00:54:24 +08:00
SPACK_COMPREPLY="verify trust untrust sign create list init export publish"
fi
}
_spack_gpg_verify() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_installed_packages
fi
}
_spack_gpg_trust() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_gpg_untrust() {
if $list_options
then
SPACK_COMPREPLY="-h --help --signing"
else
_keys
fi
}
_spack_gpg_sign() {
if $list_options
then
SPACK_COMPREPLY="-h --help --output --key --clearsign"
else
_installed_packages
fi
}
_spack_gpg_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help --comment --expires --export --export-secret"
else
SPACK_COMPREPLY=""
fi
}
_spack_gpg_list() {
SPACK_COMPREPLY="-h --help --trusted --signing"
}
_spack_gpg_init() {
SPACK_COMPREPLY="-h --help --from"
}
_spack_gpg_export() {
if $list_options
then
SPACK_COMPREPLY="-h --help --secret"
else
_keys
fi
}
Streamline key management for build caches (#17792) * Rework spack.util.web.list_url() list_url() now accepts an optional recursive argument (default: False) for controlling whether to only return files within the prefix url or to return all files whose path starts with the prefix url. Allows for the most effecient implementation for the given prefix url scheme. For example, only recursive queries are supported for S3 prefixes, so the returned list is trimmed down if recursive == False, but the native search is returned as-is when recursive == True. Suitable implementations for each case are also used for file system URLs. * Switch to using an explicit index for public keys Switches to maintaining a build cache's keys under build_cache/_pgp. Within this directory is an index.json file listing all the available keys and a <fingerprint>.pub file for each such key. - Adds spack.binary_distribution.generate_key_index() - (re)generates a build cache's key index - Modifies spack.binary_distribution.build_tarball() - if tarball is signed, automatically pushes the key used for signing along with the tarball - if regenerate_index == True, automatically (re)generates the build cache's key index along with the build cache's package index; as in spack.binary_distribution.generate_key_index() - Modifies spack.binary_distribution.get_keys() - a build cache's key index is now used instead of programmatic listing - Adds spack.binary_distribution.push_keys() - publishes keys from Spack's keyring to a given list of mirrors - Adds new spack subcommand: spack gpg publish - publishes keys from Spack's keyring to a given list of mirrors - Modifies spack.util.gpg.Gpg.signing_keys() - Accepts optional positional arguments for filtering the set of keys returned - Adds spack.util.gpg.Gpg.public_keys() - As spack.util.gpg.Gpg.signing_keys(), except public keys are returned - Modifies spack.util.gpg.Gpg.export_keys() - Fixes an issue where GnuPG would prompt for user input if trying to overwrite an existing file - Modifies spack.util.gpg.Gpg.untrust() - Fixes an issue where GnuPG would fail for input that were not key fingerprints - Modifies spack.util.web.url_exists() - Fixes an issue where url_exists() would throw instead of returning False * rework gpg module/fix error with very long GNUPGHOME dir * add a shim for functools.cached_property * handle permission denied error in gpg util * fix tests/make gpgconf optional if no socket dir is available
2020-09-26 00:54:24 +08:00
_spack_gpg_publish() {
if $list_options
then
SPACK_COMPREPLY="-h --help -d --directory -m --mirror-name --mirror-url --rebuild-index"
else
_keys
fi
}
_spack_graph() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --ascii -d --dot -s --static -c --color -i --installed --deptype"
else
_all_packages
fi
}
_spack_help() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all --spec"
else
_subcommands
fi
}
_spack_info() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all --detectable --maintainers --no-dependencies --no-variants --no-versions --phases --tags --tests --virtuals"
else
_all_packages
fi
}
_spack_install() {
if $list_options
then
SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --fail-fast --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --use-buildcache --include-build-deps --no-check-signature --show-log-on-error --source -n --no-checksum --deprecated -v --verbose --fake --only-concrete --add --no-add -f --file --clean --dirty --test --log-format --log-file --help-cdash --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp -y --yes-to-all -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
_spack_license() {
if $list_options
then
SPACK_COMPREPLY="-h --help --root"
else
SPACK_COMPREPLY="list-files verify update-copyright-year"
fi
}
_spack_license_list_files() {
SPACK_COMPREPLY="-h --help"
}
_spack_license_verify() {
SPACK_COMPREPLY="-h --help"
}
_spack_license_update_copyright_year() {
SPACK_COMPREPLY="-h --help"
}
_spack_list() {
if $list_options
then
SPACK_COMPREPLY="-h --help -d --search-description --format -v --virtuals -t --tag --count --update"
else
_all_packages
fi
}
_spack_load() {
if $list_options
then
SPACK_COMPREPLY="-h --help --sh --csh --fish --bat --first --only --list"
else
_installed_packages
fi
}
_spack_location() {
if $list_options
then
SPACK_COMPREPLY="-h --help -m --module-dir -r --spack-root -i --install-dir -p --package-dir -P --packages -s --stage-dir -S --stages --source-dir -b --build-dir -e --env --first"
else
_all_packages
fi
}
_spack_log_parse() {
if $list_options
then
SPACK_COMPREPLY="-h --help --show -c --context -p --profile -w --width -j --jobs"
else
SPACK_COMPREPLY=""
fi
}
_spack_maintainers() {
if $list_options
then
SPACK_COMPREPLY="-h --help --maintained --unmaintained -a --all --by-user"
else
_all_packages
fi
}
_spack_make_installer() {
if $list_options
then
2022-01-25 03:35:44 +08:00
SPACK_COMPREPLY="-h --help -v --spack-version -s --spack-source -g --git-installer-verbosity"
else
SPACK_COMPREPLY=""
fi
}
_spack_mark() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all -e --explicit -i --implicit"
else
_installed_packages
fi
}
_spack_mirror() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --no-checksum --deprecated"
else
SPACK_COMPREPLY="create destroy add remove rm set-url set list"
fi
}
_spack_mirror_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help -d --directory -a --all -f --file --exclude-file --exclude-specs --skip-unstable-versions -D --dependencies -n --versions-per-spec"
else
_all_packages
fi
}
Pipelines: Temporary buildcache storage (#21474) Before this change, in pipeline environments where runners do not have access to persistent shared file-system storage, the only way to pass buildcaches to dependents in later stages was by using the "enable-artifacts-buildcache" flag in the gitlab-ci section of the spack.yaml. This change supports a second mechanism, named "temporary-storage-url-prefix", which can be provided instead of the "enable-artifacts-buildcache" feature, but the two cannot be used at the same time. If this prefix is provided (only "file://" and "s3://" urls are supported), the gitlab "CI_PIPELINE_ID" will be appended to it to create a url for a mirror where pipeline jobs will write buildcache entries for use by jobs in subsequent stages. If this prefix is provided, a cleanup job will be generated to run after all the rebuild jobs have finished that will delete the contents of the temporary mirror. To support this behavior a new mirror sub-command has been added: "spack mirror destroy" which can take either a mirror name or url. This change also fixes a bug in generation of "needs" list for each job. Each jobs "needs" list is supposed to only contain direct dependencies for scheduling purposes, unless "enable-artifacts-buildcache" is specified. Only in that case are the needs lists supposed to contain all transitive dependencies. This changes fixes a bug that caused the needs lists to always contain all transitive dependencies, regardless of whether or not "enable-artifacts-buildcache" was specified.
2021-02-17 09:21:18 +08:00
_spack_mirror_destroy() {
SPACK_COMPREPLY="-h --help -m --mirror-name --mirror-url"
}
_spack_mirror_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope --type --s3-access-key-id --s3-access-key-secret --s3-access-token --s3-profile --s3-endpoint-url"
else
_mirrors
fi
}
_spack_mirror_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
_mirrors
fi
}
_spack_mirror_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
_mirrors
fi
}
_spack_mirror_set_url() {
if $list_options
then
SPACK_COMPREPLY="-h --help --push --fetch --scope --s3-access-key-id --s3-access-key-secret --s3-access-token --s3-profile --s3-endpoint-url"
else
_mirrors
fi
}
_spack_mirror_set() {
if $list_options
then
SPACK_COMPREPLY="-h --help --push --fetch --type --url --scope --s3-access-key-id --s3-access-key-secret --s3-access-token --s3-profile --s3-endpoint-url"
else
_mirrors
fi
}
_spack_mirror_list() {
SPACK_COMPREPLY="-h --help --scope"
}
_spack_module() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="lmod tcl"
fi
}
_spack_module_lmod() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --name"
else
SPACK_COMPREPLY="refresh find rm loads setdefault"
fi
}
_spack_module_lmod_refresh() {
if $list_options
then
SPACK_COMPREPLY="-h --help --delete-tree --upstream-modules -y --yes-to-all"
else
_installed_packages
fi
}
_spack_module_lmod_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help --full-path -r --dependencies"
else
_installed_packages
fi
}
_spack_module_lmod_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_installed_packages
fi
}
_spack_module_lmod_loads() {
if $list_options
then
SPACK_COMPREPLY="-h --help --input-only -p --prefix -x --exclude -r --dependencies"
else
_installed_packages
fi
}
_spack_module_lmod_setdefault() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_installed_packages
fi
}
_spack_module_tcl() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --name"
else
SPACK_COMPREPLY="refresh find rm loads setdefault"
fi
}
_spack_module_tcl_refresh() {
if $list_options
then
SPACK_COMPREPLY="-h --help --delete-tree --upstream-modules -y --yes-to-all"
else
_installed_packages
fi
}
_spack_module_tcl_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help --full-path -r --dependencies"
else
_installed_packages
fi
}
_spack_module_tcl_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
_installed_packages
fi
}
_spack_module_tcl_loads() {
if $list_options
then
SPACK_COMPREPLY="-h --help --input-only -p --prefix -x --exclude -r --dependencies"
else
_installed_packages
fi
}
_spack_module_tcl_setdefault() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_installed_packages
fi
}
_spack_patch() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --no-checksum --deprecated -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
_spack_pkg() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="add list diff added changed removed grep source hash"
fi
}
_spack_pkg_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_pkg_list() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_diff() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_added() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_changed() {
if $list_options
then
SPACK_COMPREPLY="-h --help -t --type"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_removed() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_grep() {
if $list_options
then
SPACK_COMPREPLY="--help"
else
SPACK_COMPREPLY=""
fi
}
_spack_pkg_source() {
if $list_options
then
SPACK_COMPREPLY="-h --help -c --canonical"
else
_all_packages
fi
}
_spack_pkg_hash() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_providers() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_providers
fi
}
_spack_pydoc() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_python() {
if $list_options
then
SPACK_COMPREPLY="-h --help -V --version -c -u -i -m --path"
else
SPACK_COMPREPLY=""
fi
}
_spack_reindex() {
SPACK_COMPREPLY="-h --help"
}
_spack_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all -l --list-name -f --force"
else
_all_packages
fi
}
_spack_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all -l --list-name -f --force"
else
_all_packages
fi
}
_spack_repo() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="create list add remove rm"
fi
}
_spack_repo_create() {
if $list_options
then
SPACK_COMPREPLY="-h --help -d --subdirectory"
else
_repos
fi
}
_spack_repo_list() {
SPACK_COMPREPLY="-h --help --scope"
}
_spack_repo_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
SPACK_COMPREPLY=""
fi
}
_spack_repo_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
_repos
fi
}
_spack_repo_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help --scope"
else
_repos
fi
}
_spack_resource() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="list show"
fi
}
_spack_resource_list() {
SPACK_COMPREPLY="-h --help --only-hashes"
}
_spack_resource_show() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_resource_hashes
fi
}
_spack_restage() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_solve() {
if $list_options
then
SPACK_COMPREPLY="-h --help --show -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json -c --cover -t --types --timers --stats -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
_spack_spec() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json --format -c --cover -t --types -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
_spack_stage() {
if $list_options
then
SPACK_COMPREPLY="-h --help -n --no-checksum --deprecated -p --path -U --fresh --reuse --reuse-deps"
else
_all_packages
fi
}
add mypy to style checks; rename `spack flake8` to `spack style` (#20384) I lost my mind a bit after getting the completion stuff working and decided to get Mypy working for spack as well. This adds a `.mypy.ini` that checks all of the spack and llnl modules, though not yet packages, and fixes all of the identified missing types and type issues for the spack library. In addition to these changes, this includes: * rename `spack flake8` to `spack style` Aliases flake8 to style, and just runs flake8 as before, but with a warning. The style command runs both `flake8` and `mypy`, in sequence. Added --no-<tool> options to turn off one or the other, they are on by default. Fixed two issues caught by the tools. * stub typing module for python2.x We don't support typing in Spack for python 2.x. To allow 2.x to support `import typing` and `from typing import ...` without a try/except dance to support old versions, this adds a stub module *just* for python 2.x. Doing it this way means we can only reliably use all type hints in python3.7+, and mypi.ini has been updated to reflect that. * add non-default black check to spack style This is a first step to requiring black. It doesn't enforce it by default, but it will check it if requested. Currently enforcing the line length of 79 since that's what flake8 requires, but it's a bit odd for a black formatted project to be quite that narrow. All settings are in the style command since spack has no pyproject.toml and I don't want to add one until more discussion happens. Also re-format `style.py` since it no longer passed the black style check with the new length. * use style check in github action Update the style and docs action to use `spack style`, adding in mypy and black to the action even if it isn't running black right now.
2020-12-23 13:39:10 +08:00
_spack_style() {
if $list_options
then
SPACK_COMPREPLY="-h --help -b --base -a --all -r --root-relative -U --no-untracked -f --fix --root -t --tool -s --skip"
add mypy to style checks; rename `spack flake8` to `spack style` (#20384) I lost my mind a bit after getting the completion stuff working and decided to get Mypy working for spack as well. This adds a `.mypy.ini` that checks all of the spack and llnl modules, though not yet packages, and fixes all of the identified missing types and type issues for the spack library. In addition to these changes, this includes: * rename `spack flake8` to `spack style` Aliases flake8 to style, and just runs flake8 as before, but with a warning. The style command runs both `flake8` and `mypy`, in sequence. Added --no-<tool> options to turn off one or the other, they are on by default. Fixed two issues caught by the tools. * stub typing module for python2.x We don't support typing in Spack for python 2.x. To allow 2.x to support `import typing` and `from typing import ...` without a try/except dance to support old versions, this adds a stub module *just* for python 2.x. Doing it this way means we can only reliably use all type hints in python3.7+, and mypi.ini has been updated to reflect that. * add non-default black check to spack style This is a first step to requiring black. It doesn't enforce it by default, but it will check it if requested. Currently enforcing the line length of 79 since that's what flake8 requires, but it's a bit odd for a black formatted project to be quite that narrow. All settings are in the style command since spack has no pyproject.toml and I don't want to add one until more discussion happens. Also re-format `style.py` since it no longer passed the black style check with the new length. * use style check in github action Update the style and docs action to use `spack style`, adding in mypy and black to the action even if it isn't running black right now.
2020-12-23 13:39:10 +08:00
else
SPACK_COMPREPLY=""
fi
}
_spack_tags() {
if $list_options
then
SPACK_COMPREPLY="-h --help -i --installed -a --all"
else
SPACK_COMPREPLY=""
fi
}
_spack_test() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="run list find status results remove"
fi
}
_spack_test_run() {
if $list_options
then
SPACK_COMPREPLY="-h --help --alias --fail-fast --fail-first --externals -x --explicit --keep-stage --log-format --log-file --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp --help-cdash --clean --dirty"
else
_installed_packages
fi
}
_spack_test_list() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all"
else
SPACK_COMPREPLY=""
fi
}
_spack_test_find() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_test_status() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY=""
fi
}
_spack_test_results() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --logs -f --failed"
else
SPACK_COMPREPLY=""
fi
}
_spack_test_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help -y --yes-to-all"
else
SPACK_COMPREPLY=""
fi
}
_spack_test_env() {
if $list_options
then
SPACK_COMPREPLY="-h --help --clean --dirty -U --fresh --reuse --reuse-deps --dump --pickle"
else
_all_packages
fi
}
_spack_tutorial() {
SPACK_COMPREPLY="-h --help -y --yes-to-all"
}
_spack_undevelop() {
if $list_options
then
SPACK_COMPREPLY="-h --help -a --all"
else
_all_packages
fi
}
_spack_uninstall() {
if $list_options
then
SPACK_COMPREPLY="-h --help -f --force --remove -R --dependents -y --yes-to-all -a --all --origin"
else
_installed_packages
fi
}
_spack_unit_test() {
if $list_options
then
SPACK_COMPREPLY="-h --help -H --pytest-help -l --list -L --list-long -N --list-names --extension -s -k --showlocals"
else
_unit_tests
fi
}
_spack_unload() {
if $list_options
then
SPACK_COMPREPLY="-h --help --sh --csh --fish --bat -a --all"
else
_installed_packages
fi
}
_spack_url() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
SPACK_COMPREPLY="parse list summary stats"
fi
}
_spack_url_parse() {
if $list_options
then
SPACK_COMPREPLY="-h --help -s --spider"
else
SPACK_COMPREPLY=""
fi
}
_spack_url_list() {
SPACK_COMPREPLY="-h --help -c --color -e --extrapolation -n --incorrect-name -N --correct-name -v --incorrect-version -V --correct-version"
}
_spack_url_summary() {
SPACK_COMPREPLY="-h --help"
}
_spack_url_stats() {
SPACK_COMPREPLY="-h --help --show-issues"
}
_spack_verify() {
if $list_options
then
SPACK_COMPREPLY="-h --help -l --local -j --json -a --all -s --specs -f --files"
else
_all_packages
fi
}
_spack_versions() {
if $list_options
then
SPACK_COMPREPLY="-h --help -s --safe --safe-only -r --remote -n --new -c --concurrency"
else
_all_packages
fi
}
_spack_view() {
if $list_options
then
SPACK_COMPREPLY="-h --help -v --verbose -e --exclude -d --dependencies"
else
SPACK_COMPREPLY="symlink add soft hardlink hard copy relocate remove rm statlink status check"
fi
}
_spack_view_symlink() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_add() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_soft() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_hardlink() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_hard() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_copy() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_relocate() {
if $list_options
then
SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts"
else
_all_packages
fi
}
_spack_view_remove() {
if $list_options
then
SPACK_COMPREPLY="-h --help --no-remove-dependents -a --all"
else
_all_packages
fi
}
_spack_view_rm() {
if $list_options
then
SPACK_COMPREPLY="-h --help --no-remove-dependents -a --all"
else
_all_packages
fi
}
_spack_view_statlink() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_view_status() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}
_spack_view_check() {
if $list_options
then
SPACK_COMPREPLY="-h --help"
else
_all_packages
fi
}