Compare commits
253 Commits
v0.13.4-sh
...
v0.14.2
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7a68a4d851 | ||
![]() |
a3bcd88f8d | ||
![]() |
740f8fe1a9 | ||
![]() |
430ca7c7cf | ||
![]() |
55f5afaf3c | ||
![]() |
6b559912c1 | ||
![]() |
9b5805a5cd | ||
![]() |
c6c1d01ab6 | ||
![]() |
b9688a8c35 | ||
![]() |
ed2781973c | ||
![]() |
99bb88aead | ||
![]() |
a85cce05a1 | ||
![]() |
e2b1737a42 | ||
![]() |
ff0abb9838 | ||
![]() |
3826cdf139 | ||
![]() |
30b4704522 | ||
![]() |
09e13cf7cf | ||
![]() |
296d58ef6b | ||
![]() |
ec720bf28d | ||
![]() |
1e42f0a545 | ||
![]() |
62683eb4bf | ||
![]() |
901bed48ec | ||
![]() |
cc8d9eee8e | ||
![]() |
1c8f792bb5 | ||
![]() |
9a1ce36e44 | ||
![]() |
59a7963785 | ||
![]() |
733f9f8cfa | ||
![]() |
fa0a5e44aa | ||
![]() |
5406e1f43d | ||
![]() |
f3a1a8c6fe | ||
![]() |
b02981f10c | ||
![]() |
654914d53e | ||
![]() |
3753424a87 | ||
![]() |
32a3d59bfa | ||
![]() |
97d46dc36f | ||
![]() |
388960f044 | ||
![]() |
629c69d383 | ||
![]() |
0ea6bab984 | ||
![]() |
50b5e6b94b | ||
![]() |
1a479b5e9e | ||
![]() |
3104bfe93e | ||
![]() |
f961f64215 | ||
![]() |
9f2cee43cf | ||
![]() |
82be8965f2 | ||
![]() |
16a9464b5c | ||
![]() |
6764a9464d | ||
![]() |
6d8e0699f4 | ||
![]() |
0bc4a778bd | ||
![]() |
4a5c64ab6e | ||
![]() |
abfdabd34e | ||
![]() |
b399761106 | ||
![]() |
065cbf1bbe | ||
![]() |
08b5264607 | ||
![]() |
fabd8693b9 | ||
![]() |
38fc441a36 | ||
![]() |
48375adf17 | ||
![]() |
a03b252522 | ||
![]() |
2e387ef585 | ||
![]() |
01bda12692 | ||
![]() |
c5134ff4f2 | ||
![]() |
19af86592b | ||
![]() |
ea9aa2c7cb | ||
![]() |
0d84ee6c68 | ||
![]() |
e4968a495d | ||
![]() |
c784799895 | ||
![]() |
b5b19084de | ||
![]() |
8b72cb64dd | ||
![]() |
dd0e18d7b8 | ||
![]() |
d1929b2ea7 | ||
![]() |
a1e3a1653a | ||
![]() |
f5b5036ad5 | ||
![]() |
40410102d2 | ||
![]() |
b538885b08 | ||
![]() |
d9b8a9a8b3 | ||
![]() |
545c436d33 | ||
![]() |
0ea7b83e71 | ||
![]() |
186ca13cf0 | ||
![]() |
674c4379ee | ||
![]() |
308a6bc601 | ||
![]() |
43f8e9a260 | ||
![]() |
98895297b5 | ||
![]() |
64887e4821 | ||
![]() |
4e2a5388ac | ||
![]() |
9a5fa90c25 | ||
![]() |
d98b98aae6 | ||
![]() |
4d62dda17a | ||
![]() |
a02aadb4db | ||
![]() |
22a56a89c7 | ||
![]() |
a47ab9c6b2 | ||
![]() |
ef9ab2ebc4 | ||
![]() |
0be09abd9c | ||
![]() |
66c3f89b78 | ||
![]() |
47e8084fab | ||
![]() |
d9d863330b | ||
![]() |
e258326133 | ||
![]() |
7a60a04402 | ||
![]() |
9b221d26b8 | ||
![]() |
4cd1a5660e | ||
![]() |
65133daad7 | ||
![]() |
8825335056 | ||
![]() |
d1cbc14022 | ||
![]() |
f4d91cc3ec | ||
![]() |
c969fffbf0 | ||
![]() |
1d986c3790 | ||
![]() |
f9b857f1b0 | ||
![]() |
3331b70f20 | ||
![]() |
0c8738a860 | ||
![]() |
31f9b66e18 | ||
![]() |
dc46e219d1 | ||
![]() |
129256de52 | ||
![]() |
44c01e6b70 | ||
![]() |
f46f238a33 | ||
![]() |
d3b1248c2d | ||
![]() |
eac6c53afb | ||
![]() |
e9862671c9 | ||
![]() |
2dac7b60a9 | ||
![]() |
14025e89f6 | ||
![]() |
f2aca86502 | ||
![]() |
2f4881d582 | ||
![]() |
26ad754f42 | ||
![]() |
4c215d1fed | ||
![]() |
a92543803c | ||
![]() |
0ea220db83 | ||
![]() |
11340a332d | ||
![]() |
3ee0295fb2 | ||
![]() |
6f95967e95 | ||
![]() |
53238af2dc | ||
![]() |
813bfcfcbd | ||
![]() |
893f76da53 | ||
![]() |
975acd4dfb | ||
![]() |
c0bae73d2f | ||
![]() |
c77b6923b8 | ||
![]() |
4bca09cc89 | ||
![]() |
e488b36d59 | ||
![]() |
c8c53cc7e7 | ||
![]() |
5b2d1a6a54 | ||
![]() |
74018df678 | ||
![]() |
db9ccc21b0 | ||
![]() |
1f54627cee | ||
![]() |
0ec908cd13 | ||
![]() |
d08f53c5fb | ||
![]() |
6f6e1a8c4a | ||
![]() |
02f3fc6d7a | ||
![]() |
82f76c44a0 | ||
![]() |
f396106d1c | ||
![]() |
fa28602092 | ||
![]() |
f95348074b | ||
![]() |
86a3d58159 | ||
![]() |
76bf5c53fa | ||
![]() |
1270b4d0df | ||
![]() |
342200774b | ||
![]() |
9f89dce52f | ||
![]() |
1d7fc47672 | ||
![]() |
80123e0d1a | ||
![]() |
21156e6243 | ||
![]() |
5ea7031019 | ||
![]() |
17f19ad407 | ||
![]() |
6c93ef5088 | ||
![]() |
8783f16e36 | ||
![]() |
921cb6c860 | ||
![]() |
ccbdd3c902 | ||
![]() |
30d487509b | ||
![]() |
e781d8eb72 | ||
![]() |
33edadb041 | ||
![]() |
03d32c868a | ||
![]() |
d6e4b4773a | ||
![]() |
38c3b55f19 | ||
![]() |
b8c2141760 | ||
![]() |
bbd256e2fe | ||
![]() |
1cc7a3533b | ||
![]() |
60847abf0e | ||
![]() |
8071369ffe | ||
![]() |
b9156895ec | ||
![]() |
b261b80ebd | ||
![]() |
d3db3c6a2b | ||
![]() |
0dbd3b21c8 | ||
![]() |
fc190f397a | ||
![]() |
8624bf65f9 | ||
![]() |
e833ef9cce | ||
![]() |
1b95c33c82 | ||
![]() |
978d960158 | ||
![]() |
df1e9317b8 | ||
![]() |
7f9cd886f3 | ||
![]() |
7d96e496b2 | ||
![]() |
e75b8c1230 | ||
![]() |
41a2a5db2c | ||
![]() |
8f877fdc65 | ||
![]() |
bab407a28a | ||
![]() |
8308d2d59d | ||
![]() |
a6dc3fe40b | ||
![]() |
12ec8e340f | ||
![]() |
f547f2ce10 | ||
![]() |
00de09268e | ||
![]() |
ee573540a0 | ||
![]() |
d7de6cf055 | ||
![]() |
ad7c11c482 | ||
![]() |
ff71844bf6 | ||
![]() |
98c4d15655 | ||
![]() |
4f3287a9dc | ||
![]() |
45ee708596 | ||
![]() |
959d9d2365 | ||
![]() |
7c88bb937d | ||
![]() |
c92201e6ab | ||
![]() |
bcccf0d4e7 | ||
![]() |
8313de5be0 | ||
![]() |
4db0eb4917 | ||
![]() |
8b5cc1e76f | ||
![]() |
bff3a087ec | ||
![]() |
bd24d53ceb | ||
![]() |
2c0bfd071f | ||
![]() |
e5e6791d97 | ||
![]() |
4a7e4655ac | ||
![]() |
3f5d889d2b | ||
![]() |
b688bb549b | ||
![]() |
baafa7ec84 | ||
![]() |
3c28e72d2f | ||
![]() |
e65aa7569e | ||
![]() |
978a3bb7ef | ||
![]() |
8c05221bc6 | ||
![]() |
c607288a7c | ||
![]() |
d524abad20 | ||
![]() |
e0dfc3ddbf | ||
![]() |
8beb42e749 | ||
![]() |
4fa29ceb50 | ||
![]() |
690ed2fe98 | ||
![]() |
7996bc809a | ||
![]() |
580c8f5b7e | ||
![]() |
46ae3f0570 | ||
![]() |
725336ee48 | ||
![]() |
0d0ab60150 | ||
![]() |
9046deae80 | ||
![]() |
413de215b2 | ||
![]() |
1c5f72dbaa | ||
![]() |
4b7f057a4b | ||
![]() |
03ce5effa1 | ||
![]() |
eb88dfee9a | ||
![]() |
1d25564230 | ||
![]() |
4dc67b79aa | ||
![]() |
681ad2ac44 | ||
![]() |
268f42bf7e | ||
![]() |
856734b804 | ||
![]() |
7252066a32 | ||
![]() |
06c85ea5bf | ||
![]() |
3dd844e597 | ||
![]() |
fb482ae0c9 | ||
![]() |
a337d27874 | ||
![]() |
de84bd3f24 | ||
![]() |
5243d270d4 | ||
![]() |
38303a6a79 | ||
![]() |
a7b43f1015 | ||
![]() |
90f3635afd | ||
![]() |
7e50cec5a4 | ||
![]() |
ca66ab6901 |
124
CHANGELOG.md
124
CHANGELOG.md
@@ -1,3 +1,127 @@
|
||||
# v0.14.2 (2019-04-15)
|
||||
|
||||
This is a minor release on the `0.14` series. It includes performance
|
||||
improvements and bug fixes:
|
||||
|
||||
* Improvements to how `spack install` handles foreground/background (#15723)
|
||||
* Major performance improvements for reading the package DB (#14693, #15777)
|
||||
* No longer check for the old `index.yaml` database file (#15298)
|
||||
* Properly activate environments with '-h' in the name (#15429)
|
||||
* External packages have correct `.prefix` in environments/views (#15475)
|
||||
* Improvements to computing env modifications from sourcing files (#15791)
|
||||
* Bugfix on Cray machines when getting `TERM` env variable (#15630)
|
||||
* Avoid adding spurious `LMOD` env vars to Intel modules (#15778)
|
||||
* Don't output [+] for mock installs run during tests (#15609)
|
||||
|
||||
# v0.14.1 (2019-03-20)
|
||||
|
||||
This is a bugfix release on top of `v0.14.0`. Specific fixes include:
|
||||
|
||||
* several bugfixes for parallel installation (#15339, #15341, #15220, #15197)
|
||||
* `spack load` now works with packages that have been renamed (#14348)
|
||||
* bugfix for `suite-sparse` installation (#15326)
|
||||
* deduplicate identical suffixes added to module names (#14920)
|
||||
* fix issues with `configure_args` during module refresh (#11084)
|
||||
* increased test coverage and test fixes (#15237, #15354, #15346)
|
||||
* remove some unused code (#15431)
|
||||
|
||||
# v0.14.0 (2020-02-23)
|
||||
|
||||
`v0.14.0` is a major feature release, with 3 highlighted features:
|
||||
|
||||
1. **Distributed builds.** Multiple Spack instances will now coordinate
|
||||
properly with each other through locks. This works on a single node
|
||||
(where you've called `spack` several times) or across multiple nodes
|
||||
with a shared filesystem. For example, with SLURM, you could build
|
||||
`trilinos` and its dependencies on 2 24-core nodes, with 3 Spack
|
||||
instances per node and 8 build jobs per instance, with `srun -N 2 -n 6
|
||||
spack install -j 8 trilinos`. This requires a filesystem with locking
|
||||
enabled, but not MPI or any other library for parallelism.
|
||||
|
||||
2. **Build pipelines.** You can also build in parallel through Gitlab
|
||||
CI. Simply create a Spack environment and push it to Gitlab to build
|
||||
on Gitlab runners. Pipeline support is now integreated into a single
|
||||
`spack ci` command, so setting it up is easier than ever. See the
|
||||
[Pipelines section](https://spack.readthedocs.io/en/v0.14.0/pipelines.html)
|
||||
in the docs.
|
||||
|
||||
3. **Container builds.** The new `spack containerize` command allows you
|
||||
to create a Docker or Singularity recipe from any Spack environment.
|
||||
There are options to customize the build if you need them. See the
|
||||
[Container Images section](https://spack.readthedocs.io/en/latest/containers.html)
|
||||
in the docs.
|
||||
|
||||
In addition, there are several other new commands, many bugfixes and
|
||||
improvements, and `spack load` no longer requires modules, so you can use
|
||||
it the same way on your laptop or on your supercomputer.
|
||||
|
||||
Spack grew by over 300 packages since our last release in November 2019,
|
||||
and the project grew to over 500 contributors. Thanks to all of you for
|
||||
making yet another great release possible. Detailed notes below.
|
||||
|
||||
## Major new core features
|
||||
* Distributed builds: spack instances coordinate and build in parallel (#13100)
|
||||
* New `spack ci` command to manage CI pipelines (#12854)
|
||||
* Generate container recipes from environments: `spack containerize` (#14202)
|
||||
* `spack load` now works without using modules (#14062, #14628)
|
||||
* Garbage collect old/unused installations with `spack gc` (#13534)
|
||||
* Configuration files all set environment modifications the same way (#14372,
|
||||
[docs](https://spack.readthedocs.io/en/v0.14.0/configuration.html#environment-modifications))
|
||||
* `spack commands --format=bash` auto-generates completion (#14393, #14607)
|
||||
* Packages can specify alternate fetch URLs in case one fails (#13881)
|
||||
|
||||
## Improvements
|
||||
* Improved locking for concurrency with environments (#14676, #14621, #14692)
|
||||
* `spack test` sends args to `pytest`, supports better listing (#14319)
|
||||
* Better support for aarch64 and cascadelake microarch (#13825, #13780, #13820)
|
||||
* Archspec is now a separate library (see https://github.com/archspec/archspec)
|
||||
* Many improvements to the `spack buildcache` command (#14237, #14346,
|
||||
#14466, #14467, #14639, #14642, #14659, #14696, #14698, #14714, #14732,
|
||||
#14929, #15003, #15086, #15134)
|
||||
|
||||
## Selected Bugfixes
|
||||
* Compilers now require an exact match on version (#8735, #14730, #14752)
|
||||
* Bugfix for patches that specified specific versions (#13989)
|
||||
* `spack find -p` now works in environments (#10019, #13972)
|
||||
* Dependency queries work correctly in `spack find` (#14757)
|
||||
* Bugfixes for locking upstream Spack instances chains (#13364)
|
||||
* Fixes for PowerPC clang optimization flags (#14196)
|
||||
* Fix for issue with compilers and specific microarchitectures (#13733, #14798)
|
||||
|
||||
## New commands and options
|
||||
* `spack ci` (#12854)
|
||||
* `spack containerize` (#14202)
|
||||
* `spack gc` (#13534)
|
||||
* `spack load` accepts `--only package`, `--only dependencies` (#14062, #14628)
|
||||
* `spack commands --format=bash` (#14393)
|
||||
* `spack commands --update-completion` (#14607)
|
||||
* `spack install --with-cache` has new option: `--no-check-signature` (#11107)
|
||||
* `spack test` now has `--list`, `--list-long`, and `--list-names` (#14319)
|
||||
* `spack install --help-cdash` moves CDash help out of the main help (#13704)
|
||||
|
||||
## Deprecations
|
||||
* `spack release-jobs` has been rolled into `spack ci`
|
||||
* `spack bootstrap` will be removed in a future version, as it is no longer
|
||||
needed to set up modules (see `spack load` improvements above)
|
||||
|
||||
## Documentation
|
||||
* New section on building container images with Spack (see
|
||||
[docs](https://spack.readthedocs.io/en/latest/containers.html))
|
||||
* New section on using `spack ci` command to build pipelines (see
|
||||
[docs](https://spack.readthedocs.io/en/latest/pipelines.html))
|
||||
* Document how to add conditional dependencies (#14694)
|
||||
* Document how to use Spack to replace Homebrew/Conda (#13083, see
|
||||
[docs](https://spack.readthedocs.io/en/latest/workflows.html#using-spack-to-replace-homebrew-conda))
|
||||
|
||||
## Important package changes
|
||||
* 3,908 total packages (345 added since 0.13.0)
|
||||
* Added first cut at a TensorFlow package (#13112)
|
||||
* We now build R without "recommended" packages, manage them w/Spack (#12015)
|
||||
* Elpa and OpenBLAS now leverage microarchitecture support (#13655, #14380)
|
||||
* Fix `octave` compiler wrapper usage (#14726)
|
||||
* Enforce that packages in `builtin` aren't missing dependencies (#13949)
|
||||
|
||||
|
||||
# v0.13.4 (2020-02-07)
|
||||
|
||||
This release contains several bugfixes:
|
||||
|
@@ -16,7 +16,7 @@
|
||||
config:
|
||||
# This is the path to the root of the Spack install tree.
|
||||
# You can use $spack here to refer to the root of the spack instance.
|
||||
install_tree: ~/.spack/opt/spack
|
||||
install_tree: $spack/opt/spack
|
||||
|
||||
|
||||
# Locations where templates should be found
|
||||
@@ -30,8 +30,8 @@ config:
|
||||
|
||||
# Locations where different types of modules should be installed.
|
||||
module_roots:
|
||||
tcl: ~/.spack/share/spack/modules
|
||||
lmod: ~/.spack/share/spack/lmod
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
|
||||
|
||||
# Temporary locations Spack can try to use for builds.
|
||||
@@ -67,7 +67,7 @@ config:
|
||||
|
||||
# Cache directory for already downloaded source tarballs and archived
|
||||
# repositories. This can be purged with `spack clean --downloads`.
|
||||
source_cache: ~/.spack/var/spack/cache
|
||||
source_cache: $spack/var/spack/cache
|
||||
|
||||
|
||||
# Cache directory for miscellaneous files, like the package index.
|
||||
@@ -137,7 +137,7 @@ config:
|
||||
# when Spack needs to manage its own package metadata and all operations are
|
||||
# expected to complete within the default time limit. The timeout should
|
||||
# therefore generally be left untouched.
|
||||
db_lock_timeout: 120
|
||||
db_lock_timeout: 3
|
||||
|
||||
|
||||
# How long to wait when attempting to modify a package (e.g. to install it).
|
||||
|
@@ -40,9 +40,11 @@ packages:
|
||||
pil: [py-pillow]
|
||||
pkgconfig: [pkgconf, pkg-config]
|
||||
scalapack: [netlib-scalapack]
|
||||
sycl: [hipsycl]
|
||||
szip: [libszip, libaec]
|
||||
tbb: [intel-tbb]
|
||||
unwind: [libunwind]
|
||||
sycl: [hipsycl]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
|
@@ -1,7 +0,0 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -58,9 +58,9 @@ directory. Here's an example of an external configuration:
|
||||
packages:
|
||||
openmpi:
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
|
||||
This example lists three installations of OpenMPI, one built with GCC,
|
||||
one built with GCC and debug information, and another built with Intel.
|
||||
@@ -107,9 +107,9 @@ be:
|
||||
packages:
|
||||
openmpi:
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
buildable: False
|
||||
|
||||
The addition of the ``buildable`` flag tells Spack that it should never build
|
||||
|
@@ -4454,7 +4454,7 @@ translate variant flags into CMake definitions. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def configure_args(self):
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
return [
|
||||
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),
|
||||
|
@@ -456,7 +456,7 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
|
||||
if os.path.isdir(s):
|
||||
mkdirp(d)
|
||||
else:
|
||||
shutil.copyfile(s, d)
|
||||
shutil.copy2(s, d)
|
||||
|
||||
if _permissions:
|
||||
set_install_permissions(d)
|
||||
@@ -624,9 +624,9 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
# Check the input is indeed a directory with absolute path.
|
||||
# Raise before anything is done to avoid moving the wrong directory
|
||||
assert os.path.isdir(directory_name), \
|
||||
'"directory_name" must be a valid directory'
|
||||
'Invalid directory: ' + directory_name
|
||||
assert os.path.isabs(directory_name), \
|
||||
'"directory_name" must contain an absolute path'
|
||||
'"directory_name" must contain an absolute path: ' + directory_name
|
||||
|
||||
directory_basename = os.path.basename(directory_name)
|
||||
|
||||
|
@@ -619,3 +619,28 @@ def load_module_from_file(module_name, module_path):
|
||||
import imp
|
||||
module = imp.load_source(module_name, module_path)
|
||||
return module
|
||||
|
||||
|
||||
def uniq(sequence):
|
||||
"""Remove strings of duplicate elements from a list.
|
||||
|
||||
This works like the command-line ``uniq`` tool. It filters strings
|
||||
of duplicate elements in a list. Adjacent matching elements are
|
||||
merged into the first occurrence.
|
||||
|
||||
For example::
|
||||
|
||||
uniq([1, 1, 1, 1, 2, 2, 2, 3, 3]) == [1, 2, 3]
|
||||
uniq([1, 1, 1, 1, 2, 2, 2, 1, 1]) == [1, 2, 1]
|
||||
|
||||
"""
|
||||
if not sequence:
|
||||
return []
|
||||
|
||||
uniq_list = [sequence[0]]
|
||||
last = sequence[0]
|
||||
for element in sequence[1:]:
|
||||
if element != last:
|
||||
uniq_list.append(element)
|
||||
last = element
|
||||
return uniq_list
|
||||
|
@@ -8,14 +8,32 @@
|
||||
import errno
|
||||
import time
|
||||
import socket
|
||||
from datetime import datetime
|
||||
|
||||
import llnl.util.tty as tty
|
||||
import spack.util.string
|
||||
|
||||
|
||||
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
|
||||
'LockError', 'LockTimeoutError',
|
||||
'LockPermissionError', 'LockROFileError', 'CantCreateLockError']
|
||||
|
||||
#: Mapping of supported locks to description
|
||||
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
|
||||
|
||||
#: A useful replacement for functions that should return True when not provided
|
||||
#: for example.
|
||||
true_fn = lambda: True
|
||||
|
||||
|
||||
def _attempts_str(wait_time, nattempts):
|
||||
# Don't print anything if we succeeded on the first try
|
||||
if nattempts <= 1:
|
||||
return ''
|
||||
|
||||
attempts = spack.util.string.plural(nattempts, 'attempt')
|
||||
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
|
||||
|
||||
|
||||
class Lock(object):
|
||||
"""This is an implementation of a filesystem lock using Python's lockf.
|
||||
@@ -31,8 +49,8 @@ class Lock(object):
|
||||
maintain multiple locks on the same file.
|
||||
"""
|
||||
|
||||
def __init__(self, path, start=0, length=0, debug=False,
|
||||
default_timeout=None):
|
||||
def __init__(self, path, start=0, length=0, default_timeout=None,
|
||||
debug=False, desc=''):
|
||||
"""Construct a new lock on the file at ``path``.
|
||||
|
||||
By default, the lock applies to the whole file. Optionally,
|
||||
@@ -43,6 +61,16 @@ def __init__(self, path, start=0, length=0, debug=False,
|
||||
not currently expose the ``whence`` parameter -- ``whence`` is
|
||||
always ``os.SEEK_SET`` and ``start`` is always evaluated from the
|
||||
beginning of the file.
|
||||
|
||||
Args:
|
||||
path (str): path to the lock
|
||||
start (int): optional byte offset at which the lock starts
|
||||
length (int): optional number of bytes to lock
|
||||
default_timeout (int): number of seconds to wait for lock attempts,
|
||||
where None means to wait indefinitely
|
||||
debug (bool): debug mode specific to locking
|
||||
desc (str): optional debug message lock description, which is
|
||||
helpful for distinguishing between different Spack locks.
|
||||
"""
|
||||
self.path = path
|
||||
self._file = None
|
||||
@@ -56,6 +84,9 @@ def __init__(self, path, start=0, length=0, debug=False,
|
||||
# enable debug mode
|
||||
self.debug = debug
|
||||
|
||||
# optional debug description
|
||||
self.desc = ' ({0})'.format(desc) if desc else ''
|
||||
|
||||
# If the user doesn't set a default timeout, or if they choose
|
||||
# None, 0, etc. then lock attempts will not time out (unless the
|
||||
# user sets a timeout for each attempt)
|
||||
@@ -89,6 +120,20 @@ def _poll_interval_generator(_wait_times=None):
|
||||
num_requests += 1
|
||||
yield wait_time
|
||||
|
||||
def __repr__(self):
|
||||
"""Formal representation of the lock."""
|
||||
rep = '{0}('.format(self.__class__.__name__)
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += '{0}={1}, '.format(attr, value.__repr__())
|
||||
return '{0})'.format(rep.strip(', '))
|
||||
|
||||
def __str__(self):
|
||||
"""Readable string (with key fields) of the lock."""
|
||||
location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
|
||||
timeout = 'timeout={0}'.format(self.default_timeout)
|
||||
activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
|
||||
return '({0}, {1}, {2})'.format(location, timeout, activity)
|
||||
|
||||
def _lock(self, op, timeout=None):
|
||||
"""This takes a lock using POSIX locks (``fcntl.lockf``).
|
||||
|
||||
@@ -99,8 +144,9 @@ def _lock(self, op, timeout=None):
|
||||
successfully acquired, the total wait time and the number of attempts
|
||||
is returned.
|
||||
"""
|
||||
assert op in (fcntl.LOCK_SH, fcntl.LOCK_EX)
|
||||
assert op in lock_type
|
||||
|
||||
self._log_acquiring('{0} LOCK'.format(lock_type[op].upper()))
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
# Create file and parent directories if they don't exist.
|
||||
@@ -128,6 +174,9 @@ def _lock(self, op, timeout=None):
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
tty.debug("{0} locking [{1}:{2}]: timeout {3} sec"
|
||||
.format(lock_type[op], self._start, self._length, timeout))
|
||||
|
||||
poll_intervals = iter(Lock._poll_interval_generator())
|
||||
start_time = time.time()
|
||||
num_attempts = 0
|
||||
@@ -139,17 +188,21 @@ def _lock(self, op, timeout=None):
|
||||
|
||||
time.sleep(next(poll_intervals))
|
||||
|
||||
# TBD: Is an extra attempt after timeout needed/appropriate?
|
||||
num_attempts += 1
|
||||
if self._poll_lock(op):
|
||||
total_wait_time = time.time() - start_time
|
||||
return total_wait_time, num_attempts
|
||||
|
||||
raise LockTimeoutError("Timed out waiting for lock.")
|
||||
raise LockTimeoutError("Timed out waiting for a {0} lock."
|
||||
.format(lock_type[op]))
|
||||
|
||||
def _poll_lock(self, op):
|
||||
"""Attempt to acquire the lock in a non-blocking manner. Return whether
|
||||
the locking attempt succeeds
|
||||
"""
|
||||
assert op in lock_type
|
||||
|
||||
try:
|
||||
# Try to get the lock (will raise if not available.)
|
||||
fcntl.lockf(self._file, op | fcntl.LOCK_NB,
|
||||
@@ -159,6 +212,9 @@ def _poll_lock(self, op):
|
||||
if self.debug:
|
||||
# All locks read the owner PID and host
|
||||
self._read_debug_data()
|
||||
tty.debug('{0} locked {1} [{2}:{3}] (owner={4})'
|
||||
.format(lock_type[op], self.path,
|
||||
self._start, self._length, self.pid))
|
||||
|
||||
# Exclusive locks write their PID/host
|
||||
if op == fcntl.LOCK_EX:
|
||||
@@ -167,12 +223,12 @@ def _poll_lock(self, op):
|
||||
return True
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EAGAIN, errno.EACCES):
|
||||
# EAGAIN and EACCES == locked by another process
|
||||
pass
|
||||
else:
|
||||
# EAGAIN and EACCES == locked by another process (so try again)
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
raise
|
||||
|
||||
return False
|
||||
|
||||
def _ensure_parent_directory(self):
|
||||
parent = os.path.dirname(self.path)
|
||||
|
||||
@@ -227,6 +283,8 @@ def _unlock(self):
|
||||
self._length, self._start, os.SEEK_SET)
|
||||
self._file.close()
|
||||
self._file = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
|
||||
def acquire_read(self, timeout=None):
|
||||
"""Acquires a recursive, shared lock for reading.
|
||||
@@ -242,15 +300,14 @@ def acquire_read(self, timeout=None):
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._reads == 0 and self._writes == 0:
|
||||
self._debug(
|
||||
'READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
|
||||
.format(self))
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
|
||||
self._acquired_debug('READ LOCK', wait_time, nattempts)
|
||||
self._reads += 1
|
||||
# Log if acquired, which includes counts when verbose
|
||||
self._log_acquired('READ LOCK', wait_time, nattempts)
|
||||
return True
|
||||
else:
|
||||
# Increment the read count for nested lock tracking
|
||||
self._reads += 1
|
||||
return False
|
||||
|
||||
@@ -268,13 +325,11 @@ def acquire_write(self, timeout=None):
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._writes == 0:
|
||||
self._debug(
|
||||
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
|
||||
.format(self))
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
|
||||
self._acquired_debug('WRITE LOCK', wait_time, nattempts)
|
||||
self._writes += 1
|
||||
# Log if acquired, which includes counts when verbose
|
||||
self._log_acquired('WRITE LOCK', wait_time, nattempts)
|
||||
|
||||
# return True only if we weren't nested in a read lock.
|
||||
# TODO: we may need to return two values: whether we got
|
||||
@@ -282,9 +337,65 @@ def acquire_write(self, timeout=None):
|
||||
# write lock for the first time. Now it returns the latter.
|
||||
return self._reads == 0
|
||||
else:
|
||||
# Increment the write count for nested lock tracking
|
||||
self._writes += 1
|
||||
return False
|
||||
|
||||
def is_write_locked(self):
|
||||
"""Check if the file is write locked
|
||||
|
||||
Return:
|
||||
(bool): ``True`` if the path is write locked, otherwise, ``False``
|
||||
"""
|
||||
try:
|
||||
self.acquire_read()
|
||||
|
||||
# If we have a read lock then no other process has a write lock.
|
||||
self.release_read()
|
||||
except LockTimeoutError:
|
||||
# Another process is holding a write lock on the file
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def downgrade_write_to_read(self, timeout=None):
|
||||
"""
|
||||
Downgrade from an exclusive write lock to a shared read.
|
||||
|
||||
Raises:
|
||||
LockDowngradeError: if this is an attempt at a nested transaction
|
||||
"""
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._writes == 1 and self._reads == 0:
|
||||
self._log_downgrading()
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
|
||||
self._reads = 1
|
||||
self._writes = 0
|
||||
self._log_downgraded(wait_time, nattempts)
|
||||
else:
|
||||
raise LockDowngradeError(self.path)
|
||||
|
||||
def upgrade_read_to_write(self, timeout=None):
|
||||
"""
|
||||
Attempts to upgrade from a shared read lock to an exclusive write.
|
||||
|
||||
Raises:
|
||||
LockUpgradeError: if this is an attempt at a nested transaction
|
||||
"""
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._reads == 1 and self._writes == 0:
|
||||
self._log_upgrading()
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
|
||||
self._reads = 0
|
||||
self._writes = 1
|
||||
self._log_upgraded(wait_time, nattempts)
|
||||
else:
|
||||
raise LockUpgradeError(self.path)
|
||||
|
||||
def release_read(self, release_fn=None):
|
||||
"""Releases a read lock.
|
||||
|
||||
@@ -305,17 +416,17 @@ def release_read(self, release_fn=None):
|
||||
"""
|
||||
assert self._reads > 0
|
||||
|
||||
locktype = 'READ LOCK'
|
||||
if self._reads == 1 and self._writes == 0:
|
||||
self._debug(
|
||||
'READ LOCK: {0.path}[{0._start}:{0._length}] [Released]'
|
||||
.format(self))
|
||||
self._log_releasing(locktype)
|
||||
|
||||
result = True
|
||||
if release_fn is not None:
|
||||
result = release_fn()
|
||||
# we need to call release_fn before releasing the lock
|
||||
release_fn = release_fn or true_fn
|
||||
result = release_fn()
|
||||
|
||||
self._unlock() # can raise LockError.
|
||||
self._reads -= 1
|
||||
self._reads = 0
|
||||
self._log_released(locktype)
|
||||
return result
|
||||
else:
|
||||
self._reads -= 1
|
||||
@@ -339,45 +450,91 @@ def release_write(self, release_fn=None):
|
||||
|
||||
"""
|
||||
assert self._writes > 0
|
||||
release_fn = release_fn or true_fn
|
||||
|
||||
locktype = 'WRITE LOCK'
|
||||
if self._writes == 1 and self._reads == 0:
|
||||
self._debug(
|
||||
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]'
|
||||
.format(self))
|
||||
self._log_releasing(locktype)
|
||||
|
||||
# we need to call release_fn before releasing the lock
|
||||
result = True
|
||||
if release_fn is not None:
|
||||
result = release_fn()
|
||||
result = release_fn()
|
||||
|
||||
self._unlock() # can raise LockError.
|
||||
self._writes -= 1
|
||||
self._writes = 0
|
||||
self._log_released(locktype)
|
||||
return result
|
||||
|
||||
else:
|
||||
self._writes -= 1
|
||||
|
||||
# when the last *write* is released, we call release_fn here
|
||||
# instead of immediately before releasing the lock.
|
||||
if self._writes == 0:
|
||||
return release_fn() if release_fn is not None else True
|
||||
return release_fn()
|
||||
else:
|
||||
return False
|
||||
|
||||
def _debug(self, *args):
|
||||
tty.debug(*args)
|
||||
|
||||
def _acquired_debug(self, lock_type, wait_time, nattempts):
|
||||
attempts_format = 'attempt' if nattempts == 1 else 'attempt'
|
||||
if nattempts > 1:
|
||||
acquired_attempts_format = ' after {0:0.2f}s and {1:d} {2}'.format(
|
||||
wait_time, nattempts, attempts_format)
|
||||
else:
|
||||
# Dont print anything if we succeeded immediately
|
||||
acquired_attempts_format = ''
|
||||
self._debug(
|
||||
'{0}: {1.path}[{1._start}:{1._length}] [Acquired{2}]'
|
||||
.format(lock_type, self, acquired_attempts_format))
|
||||
def _get_counts_desc(self):
|
||||
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
|
||||
if tty.is_verbose() else ''
|
||||
|
||||
def _log_acquired(self, locktype, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
|
||||
def _log_acquiring(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Acquiring'))
|
||||
|
||||
def _log_downgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('READ LOCK', '{0}{1}'
|
||||
.format(desc, attempts_part)))
|
||||
|
||||
def _log_downgrading(self):
|
||||
self._debug2(self._status_msg('WRITE LOCK', 'Downgrading'))
|
||||
|
||||
def _log_released(self, locktype):
|
||||
now = datetime.now()
|
||||
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, desc))
|
||||
|
||||
def _log_releasing(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Releasing'))
|
||||
|
||||
def _log_upgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('WRITE LOCK', '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
|
||||
def _log_upgrading(self):
|
||||
self._debug2(self._status_msg('READ LOCK', 'Upgrading'))
|
||||
|
||||
def _status_msg(self, locktype, status):
|
||||
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
|
||||
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
|
||||
locktype, self, status_desc)
|
||||
|
||||
def _debug2(self, *args):
|
||||
# TODO: Easy place to make a single, temporary change to the
|
||||
# TODO: debug level associated with the more detailed messages.
|
||||
# TODO:
|
||||
# TODO: Someday it would be great if we could switch this to
|
||||
# TODO: another level, perhaps _between_ debug and verbose, or
|
||||
# TODO: some other form of filtering so the first level of
|
||||
# TODO: debugging doesn't have to generate these messages. Using
|
||||
# TODO: verbose here did not work as expected because tests like
|
||||
# TODO: test_spec_json will write the verbose messages to the
|
||||
# TODO: output that is used to check test correctness.
|
||||
tty.debug(*args)
|
||||
|
||||
|
||||
class LockTransaction(object):
|
||||
@@ -462,10 +619,28 @@ class LockError(Exception):
|
||||
"""Raised for any errors related to locks."""
|
||||
|
||||
|
||||
class LockDowngradeError(LockError):
|
||||
"""Raised when unable to downgrade from a write to a read lock."""
|
||||
def __init__(self, path):
|
||||
msg = "Cannot downgrade lock from write to read on file: %s" % path
|
||||
super(LockDowngradeError, self).__init__(msg)
|
||||
|
||||
|
||||
class LockLimitError(LockError):
|
||||
"""Raised when exceed maximum attempts to acquire a lock."""
|
||||
|
||||
|
||||
class LockTimeoutError(LockError):
|
||||
"""Raised when an attempt to acquire a lock times out."""
|
||||
|
||||
|
||||
class LockUpgradeError(LockError):
|
||||
"""Raised when unable to upgrade from a read to a write lock."""
|
||||
def __init__(self, path):
|
||||
msg = "Cannot upgrade lock from read to write on file: %s" % path
|
||||
super(LockUpgradeError, self).__init__(msg)
|
||||
|
||||
|
||||
class LockPermissionError(LockError):
|
||||
"""Raised when there are permission issues with a lock."""
|
||||
|
||||
|
@@ -135,7 +135,9 @@ def process_stacktrace(countback):
|
||||
def get_timestamp(force=False):
|
||||
"""Get a string timestamp"""
|
||||
if _debug or _timestamp or force:
|
||||
return datetime.now().strftime("[%Y-%m-%d-%H:%M:%S.%f] ")
|
||||
# Note inclusion of the PID is useful for parallel builds.
|
||||
return '[{0}, {1}] '.format(
|
||||
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), os.getpid())
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
@@ -7,18 +7,27 @@
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import atexit
|
||||
import errno
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import select
|
||||
import sys
|
||||
import traceback
|
||||
import signal
|
||||
from contextlib import contextmanager
|
||||
from six import string_types
|
||||
from six import StringIO
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
termios = None
|
||||
|
||||
|
||||
# Use this to strip escape sequences
|
||||
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h')
|
||||
|
||||
@@ -31,6 +40,25 @@
|
||||
control = re.compile('(\x11\n|\x13\n)')
|
||||
|
||||
|
||||
@contextmanager
|
||||
def ignore_signal(signum):
|
||||
"""Context manager to temporarily ignore a signal."""
|
||||
old_handler = signal.signal(signum, signal.SIG_IGN)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
signal.signal(signum, old_handler)
|
||||
|
||||
|
||||
def _is_background_tty(stream):
|
||||
"""True if the stream is a tty and calling process is in the background.
|
||||
"""
|
||||
return (
|
||||
stream.isatty() and
|
||||
os.getpgrp() != os.tcgetpgrp(stream.fileno())
|
||||
)
|
||||
|
||||
|
||||
def _strip(line):
|
||||
"""Strip color and control characters from a line."""
|
||||
return _escape.sub('', line)
|
||||
@@ -41,22 +69,75 @@ class keyboard_input(object):
|
||||
|
||||
Use this with ``sys.stdin`` for keyboard input, e.g.::
|
||||
|
||||
with keyboard_input(sys.stdin):
|
||||
r, w, x = select.select([sys.stdin], [], [])
|
||||
# ... do something with keypresses ...
|
||||
with keyboard_input(sys.stdin) as kb:
|
||||
while True:
|
||||
kb.check_fg_bg()
|
||||
r, w, x = select.select([sys.stdin], [], [])
|
||||
# ... do something with keypresses ...
|
||||
|
||||
This disables canonical input so that keypresses are available on the
|
||||
stream immediately. Typically standard input allows line editing,
|
||||
which means keypresses won't be sent until the user hits return.
|
||||
The ``keyboard_input`` context manager disables canonical
|
||||
(line-based) input and echoing, so that keypresses are available on
|
||||
the stream immediately, and they are not printed to the
|
||||
terminal. Typically, standard input is line-buffered, which means
|
||||
keypresses won't be sent until the user hits return. In this mode, a
|
||||
user can hit, e.g., 'v', and it will be read on the other end of the
|
||||
pipe immediately but not printed.
|
||||
|
||||
It also disables echoing, so that keys pressed aren't printed to the
|
||||
terminal. So, the user can hit, e.g., 'v', and it's read on the
|
||||
other end of the pipe immediately but not printed.
|
||||
The handler takes care to ensure that terminal changes only take
|
||||
effect when the calling process is in the foreground. If the process
|
||||
is backgrounded, canonical mode and echo are re-enabled. They are
|
||||
disabled again when the calling process comes back to the foreground.
|
||||
|
||||
When the with block completes, prior TTY settings are restored.
|
||||
This context manager works through a single signal handler for
|
||||
``SIGTSTP``, along with a poolling routine called ``check_fg_bg()``.
|
||||
Here are the relevant states, transitions, and POSIX signals::
|
||||
|
||||
[Running] -------- Ctrl-Z sends SIGTSTP ------------.
|
||||
[ in FG ] <------- fg sends SIGCONT --------------. |
|
||||
^ | |
|
||||
| fg (no signal) | |
|
||||
| | v
|
||||
[Running] <------- bg sends SIGCONT ---------- [Stopped]
|
||||
[ in BG ] [ in BG ]
|
||||
|
||||
We handle all transitions exept for ``SIGTSTP`` generated by Ctrl-Z
|
||||
by periodically calling ``check_fg_bg()``. This routine notices if
|
||||
we are in the background with canonical mode or echo disabled, or if
|
||||
we are in the foreground without canonical disabled and echo enabled,
|
||||
and it fixes the terminal settings in response.
|
||||
|
||||
``check_fg_bg()`` works *except* for when the process is stopped with
|
||||
``SIGTSTP``. We cannot rely on a periodic timer in this case, as it
|
||||
may not rrun before the process stops. We therefore restore terminal
|
||||
settings in the ``SIGTSTP`` handler.
|
||||
|
||||
Additional notes:
|
||||
|
||||
* We mostly use polling here instead of a SIGARLM timer or a
|
||||
thread. This is to avoid the complexities of many interrupts, which
|
||||
seem to make system calls (like I/O) unreliable in older Python
|
||||
versions (2.6 and 2.7). See these issues for details:
|
||||
|
||||
1. https://www.python.org/dev/peps/pep-0475/
|
||||
2. https://bugs.python.org/issue8354
|
||||
|
||||
There are essentially too many ways for asynchronous signals to go
|
||||
wrong if we also have to support older Python versions, so we opt
|
||||
not to use them.
|
||||
|
||||
* ``SIGSTOP`` can stop a process (in the foreground or background),
|
||||
but it can't be caught. Because of this, we can't fix any terminal
|
||||
settings on ``SIGSTOP``, and the terminal will be left with
|
||||
``ICANON`` and ``ECHO`` disabled until it is resumes execution.
|
||||
|
||||
* Technically, a process *could* be sent ``SIGTSTP`` while running in
|
||||
the foreground, without the shell backgrounding that process. This
|
||||
doesn't happen in practice, and we assume that ``SIGTSTP`` always
|
||||
means that defaults should be restored.
|
||||
|
||||
* We rely on ``termios`` support. Without it, or if the stream isn't
|
||||
a TTY, ``keyboard_input`` has no effect.
|
||||
|
||||
Note: this depends on termios support. If termios isn't available,
|
||||
or if the stream isn't a TTY, this context manager has no effect.
|
||||
"""
|
||||
def __init__(self, stream):
|
||||
"""Create a context manager that will enable keyboard input on stream.
|
||||
@@ -69,44 +150,97 @@ def __init__(self, stream):
|
||||
"""
|
||||
self.stream = stream
|
||||
|
||||
def _is_background(self):
|
||||
"""True iff calling process is in the background."""
|
||||
return _is_background_tty(self.stream)
|
||||
|
||||
def _get_canon_echo_flags(self):
|
||||
"""Get current termios canonical and echo settings."""
|
||||
cfg = termios.tcgetattr(self.stream)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
)
|
||||
|
||||
def _enable_keyboard_input(self):
|
||||
"""Disable canonical input and echoing on ``self.stream``."""
|
||||
# "enable" input by disabling canonical mode and echo
|
||||
new_cfg = termios.tcgetattr(self.stream)
|
||||
new_cfg[3] &= ~termios.ICANON
|
||||
new_cfg[3] &= ~termios.ECHO
|
||||
|
||||
# Apply new settings for terminal
|
||||
with ignore_signal(signal.SIGTTOU):
|
||||
termios.tcsetattr(self.stream, termios.TCSANOW, new_cfg)
|
||||
|
||||
def _restore_default_terminal_settings(self):
|
||||
"""Restore the original input configuration on ``self.stream``."""
|
||||
# _restore_default_terminal_settings Can be called in foreground
|
||||
# or background. When called in the background, tcsetattr triggers
|
||||
# SIGTTOU, which we must ignore, or the process will be stopped.
|
||||
with ignore_signal(signal.SIGTTOU):
|
||||
termios.tcsetattr(self.stream, termios.TCSANOW, self.old_cfg)
|
||||
|
||||
def _tstp_handler(self, signum, frame):
|
||||
self._restore_default_terminal_settings()
|
||||
os.kill(os.getpid(), signal.SIGSTOP)
|
||||
|
||||
def check_fg_bg(self):
|
||||
# old_cfg is set up in __enter__ and indicates that we have
|
||||
# termios and a valid stream.
|
||||
if not self.old_cfg:
|
||||
return
|
||||
|
||||
# query terminal flags and fg/bg status
|
||||
flags = self._get_canon_echo_flags()
|
||||
bg = self._is_background()
|
||||
|
||||
# restore sanity if flags are amiss -- see diagram in class docs
|
||||
if not bg and any(flags): # fg, but input not enabled
|
||||
self._enable_keyboard_input()
|
||||
elif bg and not all(flags): # bg, but input enabled
|
||||
self._restore_default_terminal_settings()
|
||||
|
||||
def __enter__(self):
|
||||
"""Enable immediate keypress input on stream.
|
||||
"""Enable immediate keypress input, while this process is foreground.
|
||||
|
||||
If the stream is not a TTY or the system doesn't support termios,
|
||||
do nothing.
|
||||
"""
|
||||
self.old_cfg = None
|
||||
self.old_handlers = {}
|
||||
|
||||
# Ignore all this if the input stream is not a tty.
|
||||
if not self.stream or not self.stream.isatty():
|
||||
return
|
||||
return self
|
||||
|
||||
try:
|
||||
# If this fails, self.old_cfg will remain None
|
||||
import termios
|
||||
if termios:
|
||||
# save old termios settings to restore later
|
||||
self.old_cfg = termios.tcgetattr(self.stream)
|
||||
|
||||
# save old termios settings
|
||||
fd = self.stream.fileno()
|
||||
self.old_cfg = termios.tcgetattr(fd)
|
||||
# Install a signal handler to disable/enable keyboard input
|
||||
# when the process moves between foreground and background.
|
||||
self.old_handlers[signal.SIGTSTP] = signal.signal(
|
||||
signal.SIGTSTP, self._tstp_handler)
|
||||
|
||||
# create new settings with canonical input and echo
|
||||
# disabled, so keypresses are immediate & don't echo.
|
||||
self.new_cfg = termios.tcgetattr(fd)
|
||||
self.new_cfg[3] &= ~termios.ICANON
|
||||
self.new_cfg[3] &= ~termios.ECHO
|
||||
# add an atexit handler to ensure the terminal is restored
|
||||
atexit.register(self._restore_default_terminal_settings)
|
||||
|
||||
# Apply new settings for terminal
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, self.new_cfg)
|
||||
# enable keyboard input initially (if foreground)
|
||||
if not self._is_background():
|
||||
self._enable_keyboard_input()
|
||||
|
||||
except Exception:
|
||||
pass # some OS's do not support termios, so ignore
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exception, traceback):
|
||||
"""If termios was avaialble, restore old settings."""
|
||||
"""If termios was available, restore old settings."""
|
||||
if self.old_cfg:
|
||||
import termios
|
||||
termios.tcsetattr(
|
||||
self.stream.fileno(), termios.TCSADRAIN, self.old_cfg)
|
||||
self._restore_default_terminal_settings()
|
||||
|
||||
# restore SIGSTP and SIGCONT handlers
|
||||
if self.old_handlers:
|
||||
for signum, old_handler in self.old_handlers.items():
|
||||
signal.signal(signum, old_handler)
|
||||
|
||||
|
||||
class Unbuffered(object):
|
||||
@@ -282,11 +416,11 @@ def __enter__(self):
|
||||
self._saved_debug = tty._debug
|
||||
|
||||
# OS-level pipe for redirecting output to logger
|
||||
self.read_fd, self.write_fd = os.pipe()
|
||||
read_fd, write_fd = os.pipe()
|
||||
|
||||
# Multiprocessing pipe for communication back from the daemon
|
||||
# Currently only used to save echo value between uses
|
||||
self.parent, self.child = multiprocessing.Pipe()
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe()
|
||||
|
||||
# Sets a daemon that writes to file what it reads from a pipe
|
||||
try:
|
||||
@@ -297,10 +431,15 @@ def __enter__(self):
|
||||
input_stream = None # just don't forward input if this fails
|
||||
|
||||
self.process = multiprocessing.Process(
|
||||
target=self._writer_daemon, args=(input_stream,))
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_stream, read_fd, write_fd, self.echo, self.log_file,
|
||||
child_pipe
|
||||
)
|
||||
)
|
||||
self.process.daemon = True # must set before start()
|
||||
self.process.start()
|
||||
os.close(self.read_fd) # close in the parent process
|
||||
os.close(read_fd) # close in the parent process
|
||||
|
||||
finally:
|
||||
if input_stream:
|
||||
@@ -322,9 +461,9 @@ def __enter__(self):
|
||||
self._saved_stderr = os.dup(sys.stderr.fileno())
|
||||
|
||||
# redirect to the pipe we created above
|
||||
os.dup2(self.write_fd, sys.stdout.fileno())
|
||||
os.dup2(self.write_fd, sys.stderr.fileno())
|
||||
os.close(self.write_fd)
|
||||
os.dup2(write_fd, sys.stdout.fileno())
|
||||
os.dup2(write_fd, sys.stderr.fileno())
|
||||
os.close(write_fd)
|
||||
|
||||
else:
|
||||
# Handle I/O the Python way. This won't redirect lower-level
|
||||
@@ -337,7 +476,7 @@ def __enter__(self):
|
||||
self._saved_stderr = sys.stderr
|
||||
|
||||
# create a file object for the pipe; redirect to it.
|
||||
pipe_fd_out = os.fdopen(self.write_fd, 'w')
|
||||
pipe_fd_out = os.fdopen(write_fd, 'w')
|
||||
sys.stdout = pipe_fd_out
|
||||
sys.stderr = pipe_fd_out
|
||||
|
||||
@@ -376,14 +515,14 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
|
||||
# print log contents in parent if needed.
|
||||
if self.write_log_in_parent:
|
||||
string = self.parent.recv()
|
||||
string = self.parent_pipe.recv()
|
||||
self.file_like.write(string)
|
||||
|
||||
if self.close_log_in_parent:
|
||||
self.log_file.close()
|
||||
|
||||
# recover and store echo settings from the child before it dies
|
||||
self.echo = self.parent.recv()
|
||||
self.echo = self.parent_pipe.recv()
|
||||
|
||||
# join the daemon process. The daemon will quit automatically
|
||||
# when the write pipe is closed; we just wait for it here.
|
||||
@@ -408,72 +547,166 @@ def force_echo(self):
|
||||
# exactly before and after the text we want to echo.
|
||||
sys.stdout.write(xon)
|
||||
sys.stdout.flush()
|
||||
yield
|
||||
sys.stdout.write(xoff)
|
||||
sys.stdout.flush()
|
||||
|
||||
def _writer_daemon(self, stdin):
|
||||
"""Daemon that writes output to the log file and stdout."""
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
in_pipe = os.fdopen(self.read_fd, 'r', 1)
|
||||
os.close(self.write_fd)
|
||||
|
||||
echo = self.echo # initial echo setting, user-controllable
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
|
||||
log_file = self.log_file
|
||||
try:
|
||||
with keyboard_input(stdin):
|
||||
while True:
|
||||
# No need to set any timeout for select.select
|
||||
# Wait until a key press or an event on in_pipe.
|
||||
rlist, _, _ = select.select(istreams, [], [])
|
||||
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
if stdin in rlist:
|
||||
if stdin.read(1) == 'v':
|
||||
echo = not echo
|
||||
|
||||
# Handle output from the with block process.
|
||||
if in_pipe in rlist:
|
||||
# If we arrive here it means that in_pipe was
|
||||
# ready for reading : it should never happen that
|
||||
# line is false-ish
|
||||
line = in_pipe.readline()
|
||||
if not line:
|
||||
break # EOF
|
||||
|
||||
# find control characters and strip them.
|
||||
controls = control.findall(line)
|
||||
line = re.sub(control, '', line)
|
||||
|
||||
# Echo to stdout if requested or forced
|
||||
if echo or force_echo:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(line))
|
||||
log_file.flush()
|
||||
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
except BaseException:
|
||||
tty.error("Exception occurred in writer daemon!")
|
||||
traceback.print_exc()
|
||||
|
||||
yield
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if self.write_log_in_parent:
|
||||
self.child.send(log_file.getvalue())
|
||||
log_file.close()
|
||||
sys.stdout.write(xoff)
|
||||
sys.stdout.flush()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
self.child.send(echo)
|
||||
|
||||
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
|
||||
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
|
||||
|
||||
The daemon receives output from the parent process and writes it both
|
||||
to a log and, optionally, to ``stdout``. The relationship looks like
|
||||
this::
|
||||
|
||||
Terminal
|
||||
|
|
||||
| +-------------------------+
|
||||
| | Parent Process |
|
||||
+--------> | with log_output(): |
|
||||
| stdin | ... |
|
||||
| +-------------------------+
|
||||
| ^ | write_fd (parent's redirected stdout)
|
||||
| | control |
|
||||
| | pipe |
|
||||
| | v read_fd
|
||||
| +-------------------------+ stdout
|
||||
| | Writer daemon |------------>
|
||||
+--------> | read from read_fd | log_file
|
||||
stdin | write to out and log |------------>
|
||||
+-------------------------+
|
||||
|
||||
Within the ``log_output`` handler, the parent's output is redirected
|
||||
to a pipe from which the daemon reads. The daemon writes each line
|
||||
from the pipe to a log file and (optionally) to ``stdout``. The user
|
||||
can hit ``v`` to toggle output on ``stdout``.
|
||||
|
||||
In addition to the input and output file descriptors, the daemon
|
||||
interacts with the parent via ``control_pipe``. It reports whether
|
||||
``stdout`` was enabled or disabled when it finished and, if the
|
||||
``log_file`` is a ``StringIO`` object, then the daemon also sends the
|
||||
logged output back to the parent as a string, to be written to the
|
||||
``StringIO`` in the parent. This is mainly for testing.
|
||||
|
||||
Arguments:
|
||||
stdin (stream): input from the terminal
|
||||
read_fd (int): pipe for reading from parent's redirected stdout
|
||||
write_fd (int): parent's end of the pipe will write to (will be
|
||||
immediately closed by the writer daemon)
|
||||
echo (bool): initial echo setting -- controlled by user and
|
||||
preserved across multiple writer daemons
|
||||
log_file (file-like): file to log all output
|
||||
control_pipe (Pipe): multiprocessing pipe on which to send control
|
||||
information to the parent
|
||||
|
||||
"""
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
in_pipe = os.fdopen(read_fd, 'r', 1)
|
||||
os.close(write_fd)
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
try:
|
||||
with keyboard_input(stdin) as kb:
|
||||
while True:
|
||||
# fix the terminal settings if we recently came to
|
||||
# the foreground
|
||||
kb.check_fg_bg()
|
||||
|
||||
# wait for input from any stream. use a coarse timeout to
|
||||
# allow other checks while we wait for input
|
||||
rlist, _, _ = _retry(select.select)(istreams, [], [], 1e-1)
|
||||
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
# only read stdin if we're in the foreground
|
||||
if stdin in rlist and not _is_background_tty(stdin):
|
||||
# it's possible to be backgrounded between the above
|
||||
# check and the read, so we ignore SIGTTIN here.
|
||||
with ignore_signal(signal.SIGTTIN):
|
||||
try:
|
||||
if stdin.read(1) == 'v':
|
||||
echo = not echo
|
||||
except IOError as e:
|
||||
# If SIGTTIN is ignored, the system gives EIO
|
||||
# to let the caller know the read failed b/c it
|
||||
# was in the bg. Ignore that too.
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
|
||||
if in_pipe in rlist:
|
||||
# Handle output from the calling process.
|
||||
line = _retry(in_pipe.readline)()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# find control characters and strip them.
|
||||
controls = control.findall(line)
|
||||
line = control.sub('', line)
|
||||
|
||||
# Echo to stdout if requested or forced.
|
||||
if echo or force_echo:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(line))
|
||||
log_file.flush()
|
||||
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
except BaseException:
|
||||
tty.error("Exception occurred in writer daemon!")
|
||||
traceback.print_exc()
|
||||
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if isinstance(log_file, StringIO):
|
||||
control_pipe.send(log_file.getvalue())
|
||||
log_file.close()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
control_pipe.send(echo)
|
||||
|
||||
|
||||
def _retry(function):
|
||||
"""Retry a call if errors indicating an interrupted system call occur.
|
||||
|
||||
Interrupted system calls return -1 and set ``errno`` to ``EINTR`` if
|
||||
certain flags are not set. Newer Pythons automatically retry them,
|
||||
but older Pythons do not, so we need to retry the calls.
|
||||
|
||||
This function converts a call like this:
|
||||
|
||||
syscall(args)
|
||||
|
||||
and makes it retry by wrapping the function like this:
|
||||
|
||||
_retry(syscall)(args)
|
||||
|
||||
This is a private function because EINTR is unfortunately raised in
|
||||
different ways from different functions, and we only handle the ones
|
||||
relevant for this file.
|
||||
|
||||
"""
|
||||
def wrapped(*args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return function(*args, **kwargs)
|
||||
except IOError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
except select.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
return wrapped
|
||||
|
344
lib/spack/llnl/util/tty/pty.py
Normal file
344
lib/spack/llnl/util/tty/pty.py
Normal file
@@ -0,0 +1,344 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""The pty module handles pseudo-terminals.
|
||||
|
||||
Currently, the infrastructure here is only used to test llnl.util.tty.log.
|
||||
|
||||
If this is used outside a testing environment, we will want to reconsider
|
||||
things like timeouts in ``ProcessController.wait()``, which are set to
|
||||
get tests done quickly, not to avoid high CPU usage.
|
||||
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import signal
|
||||
import multiprocessing
|
||||
import re
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import llnl.util.tty.log as log
|
||||
|
||||
from spack.util.executable import which
|
||||
|
||||
|
||||
class ProcessController(object):
|
||||
"""Wrapper around some fundamental process control operations.
|
||||
|
||||
This allows one process to drive another similar to the way a shell
|
||||
would, by sending signals and I/O.
|
||||
|
||||
"""
|
||||
def __init__(self, pid, master_fd,
|
||||
timeout=1, sleep_time=1e-1, debug=False):
|
||||
"""Create a controller to manipulate the process with id ``pid``
|
||||
|
||||
Args:
|
||||
pid (int): id of process to control
|
||||
master_fd (int): master file descriptor attached to pid's stdin
|
||||
timeout (int): time in seconds for wait operations to time out
|
||||
(default 1 second)
|
||||
sleep_time (int): time to sleep after signals, to control the
|
||||
signal rate of the controller (default 1e-1)
|
||||
debug (bool): whether ``horizontal_line()`` and ``status()`` should
|
||||
produce output when called (default False)
|
||||
|
||||
``sleep_time`` allows the caller to insert delays after calls
|
||||
that signal or modify the controlled process. Python behaves very
|
||||
poorly if signals arrive too fast, and drowning a Python process
|
||||
with a Python handler with signals can kill the process and hang
|
||||
our tests, so we throttle this a closer-to-interactive rate.
|
||||
|
||||
"""
|
||||
self.pid = pid
|
||||
self.pgid = os.getpgid(pid)
|
||||
self.master_fd = master_fd
|
||||
self.timeout = timeout
|
||||
self.sleep_time = sleep_time
|
||||
self.debug = debug
|
||||
|
||||
# we need the ps command to wait for process statuses
|
||||
self.ps = which("ps", required=True)
|
||||
|
||||
def get_canon_echo_attrs(self):
|
||||
"""Get echo and canon attributes of the terminal of master_fd."""
|
||||
cfg = termios.tcgetattr(self.master_fd)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
)
|
||||
|
||||
def horizontal_line(self, name):
|
||||
"""Labled horizontal line for debugging."""
|
||||
if self.debug:
|
||||
sys.stderr.write(
|
||||
"------------------------------------------- %s\n" % name
|
||||
)
|
||||
|
||||
def status(self):
|
||||
"""Print debug message with status info for the child."""
|
||||
if self.debug:
|
||||
canon, echo = self.get_canon_echo_attrs()
|
||||
sys.stderr.write("canon: %s, echo: %s\n" % (
|
||||
"on" if canon else "off",
|
||||
"on" if echo else "off",
|
||||
))
|
||||
sys.stderr.write("input: %s\n" % self.input_on())
|
||||
sys.stderr.write("bg: %s\n" % self.background())
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def input_on(self):
|
||||
"""True if keyboard input is enabled on the master_fd pty."""
|
||||
return self.get_canon_echo_attrs() == (False, False)
|
||||
|
||||
def background(self):
|
||||
"""True if pgid is in a background pgroup of master_fd's terminal."""
|
||||
return self.pgid != os.tcgetpgrp(self.master_fd)
|
||||
|
||||
def tstp(self):
|
||||
"""Send SIGTSTP to the controlled process."""
|
||||
self.horizontal_line("tstp")
|
||||
os.killpg(self.pgid, signal.SIGTSTP)
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def cont(self):
|
||||
self.horizontal_line("cont")
|
||||
os.killpg(self.pgid, signal.SIGCONT)
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def fg(self):
|
||||
self.horizontal_line("fg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def bg(self):
|
||||
self.horizontal_line("bg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgrp())
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def write(self, byte_string):
|
||||
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
|
||||
os.write(self.master_fd, byte_string)
|
||||
|
||||
def wait(self, condition):
|
||||
start = time.time()
|
||||
while (((time.time() - start) < self.timeout) and not condition()):
|
||||
time.sleep(1e-2)
|
||||
assert condition()
|
||||
|
||||
def wait_enabled(self):
|
||||
self.wait(lambda: self.input_on() and not self.background())
|
||||
|
||||
def wait_disabled(self):
|
||||
self.wait(lambda: not self.input_on() and self.background())
|
||||
|
||||
def wait_disabled_fg(self):
|
||||
self.wait(lambda: not self.input_on() and not self.background())
|
||||
|
||||
def proc_status(self):
|
||||
status = self.ps("-p", str(self.pid), "-o", "stat", output=str)
|
||||
status = re.split(r"\s+", status.strip(), re.M)
|
||||
return status[1]
|
||||
|
||||
def wait_stopped(self):
|
||||
self.wait(lambda: "T" in self.proc_status())
|
||||
|
||||
def wait_running(self):
|
||||
self.wait(lambda: "T" not in self.proc_status())
|
||||
|
||||
|
||||
class PseudoShell(object):
|
||||
"""Sets up master and child processes with a PTY.
|
||||
|
||||
You can create a ``PseudoShell`` if you want to test how some
|
||||
function responds to terminal input. This is a pseudo-shell from a
|
||||
job control perspective; ``master_function`` and ``child_function``
|
||||
are set up with a pseudoterminal (pty) so that the master can drive
|
||||
the child through process control signals and I/O.
|
||||
|
||||
The two functions should have signatures like this::
|
||||
|
||||
def master_function(proc, ctl, **kwargs)
|
||||
def child_function(**kwargs)
|
||||
|
||||
``master_function`` is spawned in its own process and passed three
|
||||
arguments:
|
||||
|
||||
proc
|
||||
the ``multiprocessing.Process`` object representing the child
|
||||
ctl
|
||||
a ``ProcessController`` object tied to the child
|
||||
kwargs
|
||||
keyword arguments passed from ``PseudoShell.start()``.
|
||||
|
||||
``child_function`` is only passed ``kwargs`` delegated from
|
||||
``PseudoShell.start()``.
|
||||
|
||||
The ``ctl.master_fd`` will have its ``master_fd`` connected to
|
||||
``sys.stdin`` in the child process. Both processes will share the
|
||||
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
|
||||
``PseudoShell``.
|
||||
|
||||
Here are the relationships between processes created::
|
||||
|
||||
._________________________________________________________.
|
||||
| Child Process | pid 2
|
||||
| - runs child_function | pgroup 2
|
||||
|_________________________________________________________| session 1
|
||||
^
|
||||
| create process with master_fd connected to stdin
|
||||
| stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Master Process | pid 1
|
||||
| - runs master_function | pgroup 1
|
||||
| - uses ProcessController and master_fd to control child | session 1
|
||||
|_________________________________________________________|
|
||||
^
|
||||
| create process
|
||||
| stdin, stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Caller | pid 0
|
||||
| - Constructs, starts, joins PseudoShell | pgroup 0
|
||||
| - provides master_function, child_function | session 0
|
||||
|_________________________________________________________|
|
||||
|
||||
"""
|
||||
def __init__(self, master_function, child_function):
|
||||
self.proc = None
|
||||
self.master_function = master_function
|
||||
self.child_function = child_function
|
||||
|
||||
# these can be optionally set to change defaults
|
||||
self.controller_timeout = 1
|
||||
self.sleep_time = 0
|
||||
|
||||
def start(self, **kwargs):
|
||||
"""Start the master and child processes.
|
||||
|
||||
Arguments:
|
||||
kwargs (dict): arbitrary keyword arguments that will be
|
||||
passed to master and child functions
|
||||
|
||||
The master process will create the child, then call
|
||||
``master_function``. The child process will call
|
||||
``child_function``.
|
||||
|
||||
"""
|
||||
self.proc = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_master_function,
|
||||
args=(self.master_function, self.child_function,
|
||||
self.controller_timeout, self.sleep_time),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
self.proc.start()
|
||||
|
||||
def join(self):
|
||||
"""Wait for the child process to finish, and return its exit code."""
|
||||
self.proc.join()
|
||||
return self.proc.exitcode
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_child_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
|
||||
"""Child process wrapper for PseudoShell.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``child_function``.
|
||||
|
||||
"""
|
||||
# new process group, like a command or pipeline launched by a shell
|
||||
os.setpgrp()
|
||||
|
||||
# take controlling terminal and set up pty IO
|
||||
stdin_fd = os.open(tty_name, os.O_RDWR)
|
||||
os.dup2(stdin_fd, sys.stdin.fileno())
|
||||
os.dup2(stdout_fd, sys.stdout.fileno())
|
||||
os.dup2(stderr_fd, sys.stderr.fileno())
|
||||
os.close(stdin_fd)
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write(
|
||||
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
|
||||
# tell the parent that we're really running
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("child: ready!\n")
|
||||
ready.value = True
|
||||
|
||||
try:
|
||||
child_function(**kwargs)
|
||||
except BaseException:
|
||||
traceback.print_exc()
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_master_function(
|
||||
master_function, child_function, controller_timeout, sleep_time,
|
||||
**kwargs):
|
||||
"""Set up a pty, spawn a child process, and execute master_function.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``master_function``.
|
||||
|
||||
"""
|
||||
os.setsid() # new session; this process is the controller
|
||||
|
||||
master_fd, child_fd = os.openpty()
|
||||
pty_name = os.ttyname(child_fd)
|
||||
|
||||
# take controlling terminal
|
||||
pty_fd = os.open(pty_name, os.O_RDWR)
|
||||
os.close(pty_fd)
|
||||
|
||||
ready = multiprocessing.Value('i', False)
|
||||
child_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_child_function,
|
||||
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
|
||||
ready, child_function),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
child_process.start()
|
||||
|
||||
# wait for subprocess to be running and connected.
|
||||
while not ready.value:
|
||||
time.sleep(1e-5)
|
||||
pass
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("pid: %d\n" % os.getpid())
|
||||
sys.stderr.write("pgid: %d\n" % os.getpgrp())
|
||||
sys.stderr.write("sid: %d\n" % os.getsid(0))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
child_pgid = os.getpgid(child_process.pid)
|
||||
sys.stderr.write("child pid: %d\n" % child_process.pid)
|
||||
sys.stderr.write("child pgid: %d\n" % child_pgid)
|
||||
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.flush()
|
||||
# set up master to ignore SIGTSTP, like a shell
|
||||
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
|
||||
|
||||
# call the master function once the child is ready
|
||||
try:
|
||||
controller = ProcessController(
|
||||
child_process.pid, master_fd, debug=kwargs.get("debug"))
|
||||
controller.timeout = controller_timeout
|
||||
controller.sleep_time = sleep_time
|
||||
error = master_function(child_process, controller, **kwargs)
|
||||
except BaseException:
|
||||
error = 1
|
||||
traceback.print_exc()
|
||||
|
||||
child_process.join()
|
||||
|
||||
# return whether either the parent or child failed
|
||||
return error or child_process.exitcode
|
@@ -5,7 +5,7 @@
|
||||
|
||||
|
||||
#: major, minor, patch version for Spack, in a tuple
|
||||
spack_version_info = (0, 13, 4)
|
||||
spack_version_info = (0, 14, 2)
|
||||
|
||||
#: String containing Spack version joined with .'s
|
||||
spack_version = '.'.join(str(v) for v in spack_version_info)
|
||||
|
@@ -18,7 +18,7 @@
|
||||
from six.moves.urllib.error import URLError
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp, install_tree
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.cmd
|
||||
import spack.config as config
|
||||
@@ -308,7 +308,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
cache_prefix = build_cache_prefix(tmpdir)
|
||||
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
|
||||
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
|
||||
spackfile_path = os.path.join(
|
||||
@@ -342,8 +342,18 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
|
||||
# make a copy of the install directory to work with
|
||||
workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix))
|
||||
install_tree(spec.prefix, workdir, symlinks=True)
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
# install_tree copies hardlinks
|
||||
# create a temporary tarfile from prefix and exract it to workdir
|
||||
# tarfile preserves hardlinks
|
||||
temp_tarfile_name = tarball_name(spec, '.tar')
|
||||
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
|
||||
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % spec.prefix,
|
||||
arcname='.')
|
||||
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
|
||||
tar.extractall(workdir)
|
||||
os.remove(temp_tarfile_path)
|
||||
|
||||
# create info for later relocation and create tar
|
||||
write_buildinfo_file(spec.prefix, workdir, rel=rel)
|
||||
@@ -368,7 +378,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
tty.die(e)
|
||||
|
||||
# create compressed tarball of the install prefix
|
||||
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
|
||||
with closing(tarfile.open(tarfile_path, 'w:bz2')) as tar:
|
||||
tar.add(name='%s' % workdir,
|
||||
arcname='%s' % os.path.basename(spec.prefix))
|
||||
# remove copy of install directory
|
||||
@@ -407,8 +417,8 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
sign_tarball(key, force, specfile_path)
|
||||
# put tarball, spec and signature files in .spack archive
|
||||
with closing(tarfile.open(spackfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name)
|
||||
tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name)
|
||||
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
|
||||
tar.add(name=specfile_path, arcname='%s' % specfile_name)
|
||||
if not unsigned:
|
||||
tar.add(name='%s.asc' % specfile_path,
|
||||
arcname='%s.asc' % specfile_name)
|
||||
@@ -579,13 +589,17 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
stagepath = os.path.dirname(filename)
|
||||
spackfile_name = tarball_name(spec, '.spack')
|
||||
spackfile_path = os.path.join(stagepath, spackfile_name)
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(tmpdir, specfile_name)
|
||||
|
||||
with closing(tarfile.open(spackfile_path, 'r')) as tar:
|
||||
tar.extractall(tmpdir)
|
||||
# older buildcache tarfiles use gzip compression
|
||||
if not os.path.exists(tarfile_path):
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
if not unsigned:
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
try:
|
||||
@@ -638,7 +652,17 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
# so the pathname should be the same now that the directory layout
|
||||
# is confirmed
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
install_tree(workdir, spec.prefix, symlinks=True)
|
||||
# install_tree copies hardlinks
|
||||
# create a temporary tarfile from prefix and exract it to workdir
|
||||
# tarfile preserves hardlinks
|
||||
temp_tarfile_name = tarball_name(spec, '.tar')
|
||||
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
|
||||
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % workdir,
|
||||
arcname='.')
|
||||
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
|
||||
tar.extractall(spec.prefix)
|
||||
os.remove(temp_tarfile_path)
|
||||
|
||||
# cleanup
|
||||
os.remove(tarfile_path)
|
||||
|
@@ -42,7 +42,6 @@ def _fetch_cache():
|
||||
building the same package different ways or multiple times.
|
||||
"""
|
||||
path = spack.config.get('config:source_cache')
|
||||
|
||||
if not path:
|
||||
path = os.path.join(spack.paths.var_path, "cache")
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
|
@@ -142,7 +142,7 @@ def compiler_info(args):
|
||||
for flag, flag_value in iteritems(c.flags):
|
||||
print("\t\t%s = %s" % (flag, flag_value))
|
||||
if len(c.environment) != 0:
|
||||
if len(c.environment['set']) != 0:
|
||||
if len(c.environment.get('set', {})) != 0:
|
||||
print("\tenvironment:")
|
||||
print("\t set:")
|
||||
for key, value in iteritems(c.environment['set']):
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.tty.colify import colify
|
||||
@@ -21,6 +22,8 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
subparser.epilog = 'If called without argument returns ' \
|
||||
'the list of all valid extendable packages'
|
||||
arguments.add_common_arguments(subparser, ['long', 'very_long'])
|
||||
subparser.add_argument('-d', '--deps', action='store_true',
|
||||
help='output dependencies along with found specs')
|
||||
@@ -42,7 +45,19 @@ def setup_parser(subparser):
|
||||
|
||||
def extensions(parser, args):
|
||||
if not args.spec:
|
||||
tty.die("extensions requires a package spec.")
|
||||
# If called without arguments, list all the extendable packages
|
||||
isatty = sys.stdout.isatty()
|
||||
if isatty:
|
||||
tty.info('Extendable packages:')
|
||||
|
||||
extendable_pkgs = []
|
||||
for name in spack.repo.all_package_names():
|
||||
pkg = spack.repo.get(name)
|
||||
if pkg.extendable:
|
||||
extendable_pkgs.append(name)
|
||||
|
||||
colify(extendable_pkgs, indent=4)
|
||||
return
|
||||
|
||||
# Checks
|
||||
spec = cmd.parse_specs(args.spec)
|
||||
|
@@ -40,8 +40,6 @@ def update_kwargs_from_args(args, kwargs):
|
||||
'fake': args.fake,
|
||||
'dirty': args.dirty,
|
||||
'use_cache': args.use_cache,
|
||||
'install_global': args.install_global,
|
||||
'upstream': args.upstream,
|
||||
'cache_only': args.cache_only,
|
||||
'explicit': True, # Always true for install command
|
||||
'stop_at': args.until,
|
||||
@@ -49,7 +47,7 @@ def update_kwargs_from_args(args, kwargs):
|
||||
})
|
||||
|
||||
kwargs.update({
|
||||
'install_dependencies': ('dependencies' in args.things_to_install),
|
||||
'install_deps': ('dependencies' in args.things_to_install),
|
||||
'install_package': ('package' in args.things_to_install)
|
||||
})
|
||||
|
||||
@@ -125,14 +123,6 @@ def setup_parser(subparser):
|
||||
'-f', '--file', action='append', default=[],
|
||||
dest='specfiles', metavar='SPEC_YAML_FILE',
|
||||
help="install from file. Read specs to install from .yaml files")
|
||||
subparser.add_argument(
|
||||
'--upstream', action='store', default=None,
|
||||
dest='upstream', metavar='UPSTREAM_NAME',
|
||||
help='specify which upstream spack to install too')
|
||||
subparser.add_argument(
|
||||
'-g', '--global', action='store_true', default=False,
|
||||
dest='install_global',
|
||||
help='install package to globally accesible location')
|
||||
|
||||
cd_group = subparser.add_mutually_exclusive_group()
|
||||
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
|
||||
@@ -226,10 +216,7 @@ def default_log_file(spec):
|
||||
"""
|
||||
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
|
||||
basename = fmt.format(x=spec, hash=spec.dag_hash())
|
||||
|
||||
dirname = fs.os.path.join(spack.paths.user_config_path,
|
||||
'var/spack',
|
||||
'junit-report')
|
||||
dirname = fs.os.path.join(spack.paths.var_path, 'junit-report')
|
||||
fs.mkdirp(dirname)
|
||||
return fs.os.path.join(dirname, basename)
|
||||
|
||||
@@ -240,7 +227,6 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
try:
|
||||
# handle active environment, if any
|
||||
env = ev.get_env(cli_args, 'install')
|
||||
|
||||
if env:
|
||||
with env.write_transaction():
|
||||
concrete = env.concretize_and_add(
|
||||
@@ -251,10 +237,6 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
env.regenerate_views()
|
||||
else:
|
||||
spec.package.do_install(**kwargs)
|
||||
spack.config.set('config:active_tree', '~/.spack/opt/spack',
|
||||
scope='user')
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
|
||||
except spack.build_environment.InstallError as e:
|
||||
if cli_args.show_log_on_error:
|
||||
@@ -269,30 +251,6 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
|
||||
|
||||
def install(parser, args, **kwargs):
|
||||
# Install Package to Global Upstream for multi-user use
|
||||
if args.install_global:
|
||||
spack.config.set('config:active_upstream', 'global',
|
||||
scope='user')
|
||||
global_root = spack.config.get('upstreams')
|
||||
global_root = global_root['global']['install_tree']
|
||||
global_root = spack.util.path.canonicalize_path(global_root)
|
||||
spack.config.set('config:active_tree', global_root,
|
||||
scope='user')
|
||||
elif args.upstream:
|
||||
if args.upstream not in spack.config.get('upstreams'):
|
||||
tty.die("specified upstream does not exist")
|
||||
spack.config.set('config:active_upstream', args.upstream,
|
||||
scope='user')
|
||||
root = spack.config.get('upstreams')
|
||||
root = root[args.upstream]['install_tree']
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
spack.config.set('config:active_tree', root, scope='user')
|
||||
else:
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
spack.config.set('config:active_tree',
|
||||
spack.config.get('config:install_tree'),
|
||||
scope='user')
|
||||
if args.help_cdash:
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
|
@@ -8,6 +8,9 @@
|
||||
import code
|
||||
import argparse
|
||||
import platform
|
||||
import runpy
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack
|
||||
|
||||
@@ -19,12 +22,23 @@
|
||||
def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-c', dest='python_command', help='command to execute')
|
||||
subparser.add_argument(
|
||||
'-m', dest='module', action='store',
|
||||
help='run library module as a script')
|
||||
subparser.add_argument(
|
||||
'python_args', nargs=argparse.REMAINDER,
|
||||
help="file to run plus arguments")
|
||||
|
||||
|
||||
def python(parser, args):
|
||||
def python(parser, args, unknown_args):
|
||||
if args.module:
|
||||
sys.argv = ['spack-python'] + unknown_args + args.python_args
|
||||
runpy.run_module(args.module, run_name="__main__", alter_sys=True)
|
||||
return
|
||||
|
||||
if unknown_args:
|
||||
tty.die("Unknown arguments:", " ".join(unknown_args))
|
||||
|
||||
# Fake a main python shell by setting __name__ to __main__.
|
||||
console = code.InteractiveConsole({'__name__': '__main__',
|
||||
'spack': spack})
|
||||
|
@@ -154,7 +154,7 @@ def test(parser, args, unknown_args):
|
||||
|
||||
# The default is to test the core of Spack. If the option `--extension`
|
||||
# has been used, then test that extension.
|
||||
pytest_root = spack.paths.test_path
|
||||
pytest_root = spack.paths.spack_root
|
||||
if args.extension:
|
||||
target = args.extension
|
||||
extensions = spack.config.get('config:extensions')
|
||||
|
@@ -5,8 +5,6 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
@@ -17,7 +15,6 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.repo
|
||||
import spack.store
|
||||
import spack.spec
|
||||
from spack.database import InstallStatuses
|
||||
|
||||
from llnl.util import tty
|
||||
@@ -57,24 +54,8 @@ def setup_parser(subparser):
|
||||
"If used in an environment, all packages in the environment "
|
||||
"will be uninstalled.")
|
||||
|
||||
subparser.add_argument(
|
||||
'packages',
|
||||
nargs=argparse.REMAINDER,
|
||||
help="specs of packages to uninstall")
|
||||
|
||||
subparser.add_argument(
|
||||
'-u', '--upstream', action='store', default=None,
|
||||
dest='upstream', metavar='UPSTREAM_NAME',
|
||||
help='specify which upstream spack to uninstall from')
|
||||
|
||||
subparser.add_argument(
|
||||
'-g', '--global', action='store_true',
|
||||
dest='global_uninstall',
|
||||
help='uninstall packages installed to global upstream')
|
||||
|
||||
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
|
||||
upstream=None, global_uninstall=False):
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
|
||||
"""Returns a list of specs matching the not necessarily
|
||||
concretized specs given from cli
|
||||
|
||||
@@ -86,35 +67,6 @@ def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
|
||||
Return:
|
||||
list of specs
|
||||
"""
|
||||
if global_uninstall:
|
||||
spack.config.set('config:active_upstream', 'global',
|
||||
scope='user')
|
||||
global_root = spack.config.get('upstreams')
|
||||
global_root = global_root['global']['install_tree']
|
||||
global_root = spack.util.path.canonicalize_path(global_root)
|
||||
spack.config.set('config:active_tree', global_root,
|
||||
scope='user')
|
||||
elif upstream:
|
||||
if upstream not in spack.config.get('upstreams'):
|
||||
tty.die("specified upstream does not exist")
|
||||
spack.config.set('config:active_upstream', upstream,
|
||||
scope='user')
|
||||
root = spack.config.get('upstreams')
|
||||
root = root[upstream]['install_tree']
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
spack.config.set('config:active_tree', root, scope='user')
|
||||
else:
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
for spec in specs:
|
||||
if isinstance(spec, spack.spec.Spec):
|
||||
spec_name = str(spec)
|
||||
spec_copy = (copy.deepcopy(spec))
|
||||
spec_copy.concretize()
|
||||
if spec_copy.package.installed_upstream:
|
||||
tty.warn("{0} is installed upstream".format(spec_name))
|
||||
tty.die("Use 'spack uninstall [--upstream upstream_name]'")
|
||||
|
||||
# constrain uninstall resolution to current environment if one is active
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
@@ -272,25 +224,11 @@ def do_uninstall(env, specs, force):
|
||||
for item in ready:
|
||||
item.do_uninstall(force=force)
|
||||
|
||||
# write any changes made to the active environment
|
||||
if env:
|
||||
env.write()
|
||||
|
||||
spack.config.set('config:active_tree',
|
||||
'~/.spack/opt/spack',
|
||||
scope='user')
|
||||
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
|
||||
|
||||
def get_uninstall_list(args, specs, env):
|
||||
# Gets the list of installed specs that match the ones give via cli
|
||||
# args.all takes care of the case where '-a' is given in the cli
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force,
|
||||
upstream=args.upstream,
|
||||
global_uninstall=args.global_uninstall
|
||||
)
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force)
|
||||
|
||||
# Takes care of '-R'
|
||||
active_dpts, inactive_dpts = installed_dependents(uninstall_list, env)
|
||||
@@ -367,7 +305,7 @@ def uninstall_specs(args, specs):
|
||||
anything_to_do = set(uninstall_list).union(set(remove_list))
|
||||
|
||||
if not anything_to_do:
|
||||
tty.warn('There are no packages to uninstall.')
|
||||
tty.warn('There are no package to uninstall.')
|
||||
return
|
||||
|
||||
if not args.yes_to_all:
|
||||
|
@@ -413,6 +413,14 @@ def get_compilers(config, cspec=None, arch_spec=None):
|
||||
assert arch_spec is None
|
||||
|
||||
if arch_spec and target and (target != family and target != 'any'):
|
||||
# If the family of the target is the family we are seeking,
|
||||
# there's an error in the underlying configuration
|
||||
if llnl.util.cpu.targets[target].family == family:
|
||||
msg = ('the "target" field in compilers.yaml accepts only '
|
||||
'target families [replace "{0}" with "{1}"'
|
||||
' in "{2}" specification]')
|
||||
msg = msg.format(str(target), family, items.get('spec', '??'))
|
||||
raise ValueError(msg)
|
||||
continue
|
||||
|
||||
compilers.append(_compiler_from_config_entry(items))
|
||||
|
@@ -362,7 +362,16 @@ def concretize_compiler(self, spec):
|
||||
# compiler_for_spec Should think whether this can be more
|
||||
# efficient
|
||||
def _proper_compiler_style(cspec, aspec):
|
||||
return spack.compilers.compilers_for_spec(cspec, arch_spec=aspec)
|
||||
compilers = spack.compilers.compilers_for_spec(
|
||||
cspec, arch_spec=aspec
|
||||
)
|
||||
# If the spec passed as argument is concrete we want to check
|
||||
# the versions match exactly
|
||||
if (cspec.concrete and compilers and
|
||||
cspec.version not in [c.version for c in compilers]):
|
||||
return []
|
||||
|
||||
return compilers
|
||||
|
||||
if spec.compiler and spec.compiler.concrete:
|
||||
if (self.check_for_compiler_existence and not
|
||||
@@ -403,7 +412,9 @@ def _proper_compiler_style(cspec, aspec):
|
||||
return True
|
||||
else:
|
||||
# No compiler with a satisfactory spec was found
|
||||
raise UnavailableCompilerVersionError(other_compiler)
|
||||
raise UnavailableCompilerVersionError(
|
||||
other_compiler, spec.architecture
|
||||
)
|
||||
else:
|
||||
# We have no hints to go by, grab any compiler
|
||||
compiler_list = spack.compilers.all_compiler_specs()
|
||||
|
@@ -18,33 +18,37 @@
|
||||
as the authoritative database of packages in Spack. This module
|
||||
provides a cache and a sanity checking mechanism for what is in the
|
||||
filesystem.
|
||||
|
||||
"""
|
||||
import datetime
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import contextlib
|
||||
from six import string_types
|
||||
from six import iteritems
|
||||
|
||||
from ruamel.yaml.error import MarkedYAMLError, YAMLError
|
||||
import contextlib
|
||||
import datetime
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
try:
|
||||
import uuid
|
||||
_use_uuid = True
|
||||
except ImportError:
|
||||
_use_uuid = False
|
||||
pass
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.store
|
||||
import six
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.store
|
||||
import spack.util.lock as lk
|
||||
import spack.util.spack_json as sjson
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
from spack.util.crypto import bit_length
|
||||
from llnl.util.filesystem import mkdirp
|
||||
from spack.directory_layout import DirectoryLayoutError
|
||||
from spack.error import SpackError
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
from spack.util.crypto import bit_length
|
||||
from spack.version import Version
|
||||
from spack.util.lock import Lock, WriteTransaction, ReadTransaction, LockError
|
||||
|
||||
# TODO: Provide an API automatically retyring a build after detecting and
|
||||
# TODO: clearing a failure.
|
||||
|
||||
# DB goes in this directory underneath the root
|
||||
_db_dirname = '.spack-db'
|
||||
@@ -65,9 +69,20 @@
|
||||
(Version('0.9.3'), Version('5')),
|
||||
]
|
||||
|
||||
# Timeout for spack database locks in seconds
|
||||
# Default timeout for spack database locks in seconds or None (no timeout).
|
||||
# A balance needs to be struck between quick turnaround for parallel installs
|
||||
# (to avoid excess delays) and waiting long enough when the system is busy
|
||||
# (to ensure the database is updated).
|
||||
_db_lock_timeout = 120
|
||||
|
||||
# Default timeout for spack package locks in seconds or None (no timeout).
|
||||
# A balance needs to be struck between quick turnaround for parallel installs
|
||||
# (to avoid excess delays when performing a parallel installation) and waiting
|
||||
# long enough for the next possible spec to install (to avoid excessive
|
||||
# checking of the last high priority package) or holding on to a lock (to
|
||||
# ensure a failed install is properly tracked).
|
||||
_pkg_lock_timeout = None
|
||||
|
||||
# Types of dependencies tracked by the database
|
||||
_tracked_deps = ('link', 'run')
|
||||
|
||||
@@ -255,6 +270,9 @@ class Database(object):
|
||||
"""Per-process lock objects for each install prefix."""
|
||||
_prefix_locks = {}
|
||||
|
||||
"""Per-process failure (lock) objects for each install prefix."""
|
||||
_prefix_failures = {}
|
||||
|
||||
def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
is_upstream=False):
|
||||
"""Create a Database for Spack installations under ``root``.
|
||||
@@ -267,69 +285,51 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
exist. This is the ``db_dir``.
|
||||
|
||||
The Database will attempt to read an ``index.json`` file in
|
||||
``db_dir``. If it does not find one, it will fall back to read
|
||||
an ``index.yaml`` if one is present. If that does not exist, it
|
||||
will create a database when needed by scanning the entire
|
||||
Database root for ``spec.yaml`` files according to Spack's
|
||||
``DirectoryLayout``.
|
||||
``db_dir``. If that does not exist, it will create a database
|
||||
when needed by scanning the entire Database root for ``spec.yaml``
|
||||
files according to Spack's ``DirectoryLayout``.
|
||||
|
||||
Caller may optionally provide a custom ``db_dir`` parameter
|
||||
where data will be stored. This is intended to be used for
|
||||
where data will be stored. This is intended to be used for
|
||||
testing the Database class.
|
||||
|
||||
"""
|
||||
self.root = root
|
||||
if db_dir is None:
|
||||
# If the db_dir is not provided, default to within the db root.
|
||||
self._db_dir = os.path.join(self.root, _db_dirname)
|
||||
else:
|
||||
# Allow customizing the database directory location for testing.
|
||||
self._db_dir = db_dir
|
||||
|
||||
# If the db_dir is not provided, default to within the db root.
|
||||
self._db_dir = db_dir or os.path.join(self.root, _db_dirname)
|
||||
|
||||
# Set up layout of database files within the db dir
|
||||
self._old_yaml_index_path = os.path.join(self._db_dir, 'index.yaml')
|
||||
self._index_path = os.path.join(self._db_dir, 'index.json')
|
||||
self._verifier_path = os.path.join(self._db_dir, 'index_verifier')
|
||||
self._lock_path = os.path.join(self._db_dir, 'lock')
|
||||
|
||||
# This is for other classes to use to lock prefix directories.
|
||||
self.prefix_lock_path = os.path.join(self._db_dir, 'prefix_lock')
|
||||
|
||||
# Ensure a persistent location for dealing with parallel installation
|
||||
# failures (e.g., across near-concurrent processes).
|
||||
self._failure_dir = os.path.join(self._db_dir, 'failures')
|
||||
|
||||
# Support special locks for handling parallel installation failures
|
||||
# of a spec.
|
||||
self.prefix_fail_path = os.path.join(self._db_dir, 'prefix_failures')
|
||||
|
||||
# Create needed directories and files
|
||||
if not os.path.exists(self._db_dir):
|
||||
mkdirp(self._db_dir)
|
||||
|
||||
if not os.path.exists(self._failure_dir) and not is_upstream:
|
||||
mkdirp(self._failure_dir)
|
||||
|
||||
self.is_upstream = is_upstream
|
||||
|
||||
# Create .spack-db/index.json for global upstream it doesn't exist
|
||||
global_install_tree = spack.config.get(
|
||||
'upstreams')['global']['install_tree']
|
||||
global_install_tree = global_install_tree.replace(
|
||||
'$spack', spack.paths.prefix)
|
||||
if self.is_upstream:
|
||||
if global_install_tree in self._db_dir:
|
||||
if not os.path.isfile(self._index_path):
|
||||
f = open(self._index_path, "w+")
|
||||
database = {
|
||||
'database': {
|
||||
'installs': {},
|
||||
'version': str(_db_version)
|
||||
}
|
||||
}
|
||||
try:
|
||||
sjson.dump(database, f)
|
||||
except YAMLError as e:
|
||||
raise syaml.SpackYAMLError(
|
||||
"error writing YAML database:", str(e))
|
||||
|
||||
self.lock = ForbiddenLock()
|
||||
else:
|
||||
self.lock = Lock(self._lock_path)
|
||||
self.last_seen_verifier = ''
|
||||
|
||||
# initialize rest of state.
|
||||
self.db_lock_timeout = (
|
||||
spack.config.get('config:db_lock_timeout') or _db_lock_timeout)
|
||||
self.package_lock_timeout = (
|
||||
spack.config.get('config:package_lock_timeout') or None)
|
||||
spack.config.get('config:package_lock_timeout') or
|
||||
_pkg_lock_timeout)
|
||||
tty.debug('DATABASE LOCK TIMEOUT: {0}s'.format(
|
||||
str(self.db_lock_timeout)))
|
||||
timeout_format_str = ('{0}s'.format(str(self.package_lock_timeout))
|
||||
@@ -340,8 +340,9 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
if self.is_upstream:
|
||||
self.lock = ForbiddenLock()
|
||||
else:
|
||||
self.lock = Lock(self._lock_path,
|
||||
default_timeout=self.db_lock_timeout)
|
||||
self.lock = lk.Lock(self._lock_path,
|
||||
default_timeout=self.db_lock_timeout,
|
||||
desc='database')
|
||||
self._data = {}
|
||||
|
||||
self.upstream_dbs = list(upstream_dbs) if upstream_dbs else []
|
||||
@@ -356,14 +357,136 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
|
||||
def write_transaction(self):
|
||||
"""Get a write lock context manager for use in a `with` block."""
|
||||
return WriteTransaction(
|
||||
return lk.WriteTransaction(
|
||||
self.lock, acquire=self._read, release=self._write)
|
||||
|
||||
def read_transaction(self):
|
||||
"""Get a read lock context manager for use in a `with` block."""
|
||||
return ReadTransaction(self.lock, acquire=self._read)
|
||||
return lk.ReadTransaction(self.lock, acquire=self._read)
|
||||
|
||||
def prefix_lock(self, spec):
|
||||
def _failed_spec_path(self, spec):
|
||||
"""Return the path to the spec's failure file, which may not exist."""
|
||||
if not spec.concrete:
|
||||
raise ValueError('Concrete spec required for failure path for {0}'
|
||||
.format(spec.name))
|
||||
|
||||
return os.path.join(self._failure_dir,
|
||||
'{0}-{1}'.format(spec.name, spec.full_hash()))
|
||||
|
||||
def clear_failure(self, spec, force=False):
|
||||
"""
|
||||
Remove any persistent and cached failure tracking for the spec.
|
||||
|
||||
see `mark_failed()`.
|
||||
|
||||
Args:
|
||||
spec (Spec): the spec whose failure indicators are being removed
|
||||
force (bool): True if the failure information should be cleared
|
||||
when a prefix failure lock exists for the file or False if
|
||||
the failure should not be cleared (e.g., it may be
|
||||
associated with a concurrent build)
|
||||
|
||||
"""
|
||||
failure_locked = self.prefix_failure_locked(spec)
|
||||
if failure_locked and not force:
|
||||
tty.msg('Retaining failure marking for {0} due to lock'
|
||||
.format(spec.name))
|
||||
return
|
||||
|
||||
if failure_locked:
|
||||
tty.warn('Removing failure marking despite lock for {0}'
|
||||
.format(spec.name))
|
||||
|
||||
lock = self._prefix_failures.pop(spec.prefix, None)
|
||||
if lock:
|
||||
lock.release_write()
|
||||
|
||||
if self.prefix_failure_marked(spec):
|
||||
try:
|
||||
path = self._failed_spec_path(spec)
|
||||
tty.debug('Removing failure marking for {0}'.format(spec.name))
|
||||
os.remove(path)
|
||||
except OSError as err:
|
||||
tty.warn('Unable to remove failure marking for {0} ({1}): {2}'
|
||||
.format(spec.name, path, str(err)))
|
||||
|
||||
def mark_failed(self, spec):
|
||||
"""
|
||||
Mark a spec as failing to install.
|
||||
|
||||
Prefix failure marking takes the form of a byte range lock on the nth
|
||||
byte of a file for coordinating between concurrent parallel build
|
||||
processes and a persistent file, named with the full hash and
|
||||
containing the spec, in a subdirectory of the database to enable
|
||||
persistence across overlapping but separate related build processes.
|
||||
|
||||
The failure lock file, ``spack.store.db.prefix_failures``, lives
|
||||
alongside the install DB. ``n`` is the sys.maxsize-bit prefix of the
|
||||
associated DAG hash to make the likelihood of collision very low with
|
||||
no cleanup required.
|
||||
"""
|
||||
# Dump the spec to the failure file for (manual) debugging purposes
|
||||
path = self._failed_spec_path(spec)
|
||||
with open(path, 'w') as f:
|
||||
spec.to_json(f)
|
||||
|
||||
# Also ensure a failure lock is taken to prevent cleanup removal
|
||||
# of failure status information during a concurrent parallel build.
|
||||
err = 'Unable to mark {0.name} as failed.'
|
||||
|
||||
prefix = spec.prefix
|
||||
if prefix not in self._prefix_failures:
|
||||
mark = lk.Lock(
|
||||
self.prefix_fail_path,
|
||||
start=spec.dag_hash_bit_prefix(bit_length(sys.maxsize)),
|
||||
length=1,
|
||||
default_timeout=self.package_lock_timeout, desc=spec.name)
|
||||
|
||||
try:
|
||||
mark.acquire_write()
|
||||
except lk.LockTimeoutError:
|
||||
# Unlikely that another process failed to install at the same
|
||||
# time but log it anyway.
|
||||
tty.debug('PID {0} failed to mark install failure for {1}'
|
||||
.format(os.getpid(), spec.name))
|
||||
tty.warn(err.format(spec))
|
||||
|
||||
# Whether we or another process marked it as a failure, track it
|
||||
# as such locally.
|
||||
self._prefix_failures[prefix] = mark
|
||||
|
||||
return self._prefix_failures[prefix]
|
||||
|
||||
def prefix_failed(self, spec):
|
||||
"""Return True if the prefix (installation) is marked as failed."""
|
||||
# The failure was detected in this process.
|
||||
if spec.prefix in self._prefix_failures:
|
||||
return True
|
||||
|
||||
# The failure was detected by a concurrent process (e.g., an srun),
|
||||
# which is expected to be holding a write lock if that is the case.
|
||||
if self.prefix_failure_locked(spec):
|
||||
return True
|
||||
|
||||
# Determine if the spec may have been marked as failed by a separate
|
||||
# spack build process running concurrently.
|
||||
return self.prefix_failure_marked(spec)
|
||||
|
||||
def prefix_failure_locked(self, spec):
|
||||
"""Return True if a process has a failure lock on the spec."""
|
||||
check = lk.Lock(
|
||||
self.prefix_fail_path,
|
||||
start=spec.dag_hash_bit_prefix(bit_length(sys.maxsize)),
|
||||
length=1,
|
||||
default_timeout=self.package_lock_timeout, desc=spec.name)
|
||||
|
||||
return check.is_write_locked()
|
||||
|
||||
def prefix_failure_marked(self, spec):
|
||||
"""Determine if the spec has a persistent failure marking."""
|
||||
return os.path.exists(self._failed_spec_path(spec))
|
||||
|
||||
def prefix_lock(self, spec, timeout=None):
|
||||
"""Get a lock on a particular spec's installation directory.
|
||||
|
||||
NOTE: The installation directory **does not** need to exist.
|
||||
@@ -378,13 +501,16 @@ def prefix_lock(self, spec):
|
||||
readers-writer lock semantics with just a single lockfile, so no
|
||||
cleanup required.
|
||||
"""
|
||||
timeout = timeout or self.package_lock_timeout
|
||||
prefix = spec.prefix
|
||||
if prefix not in self._prefix_locks:
|
||||
self._prefix_locks[prefix] = Lock(
|
||||
self._prefix_locks[prefix] = lk.Lock(
|
||||
self.prefix_lock_path,
|
||||
start=spec.dag_hash_bit_prefix(bit_length(sys.maxsize)),
|
||||
length=1,
|
||||
default_timeout=self.package_lock_timeout)
|
||||
default_timeout=timeout, desc=spec.name)
|
||||
elif timeout != self._prefix_locks[prefix].default_timeout:
|
||||
self._prefix_locks[prefix].default_timeout = timeout
|
||||
|
||||
return self._prefix_locks[prefix]
|
||||
|
||||
@@ -395,7 +521,7 @@ def prefix_read_lock(self, spec):
|
||||
|
||||
try:
|
||||
yield self
|
||||
except LockError:
|
||||
except lk.LockError:
|
||||
# This addresses the case where a nested lock attempt fails inside
|
||||
# of this context manager
|
||||
raise
|
||||
@@ -412,7 +538,7 @@ def prefix_write_lock(self, spec):
|
||||
|
||||
try:
|
||||
yield self
|
||||
except LockError:
|
||||
except lk.LockError:
|
||||
# This addresses the case where a nested lock attempt fails inside
|
||||
# of this context manager
|
||||
raise
|
||||
@@ -423,7 +549,8 @@ def prefix_write_lock(self, spec):
|
||||
prefix_lock.release_write()
|
||||
|
||||
def _write_to_file(self, stream):
|
||||
"""Write out the databsae to a JSON file.
|
||||
"""Write out the database in JSON format to the stream passed
|
||||
as argument.
|
||||
|
||||
This function does not do any locking or transactions.
|
||||
"""
|
||||
@@ -445,9 +572,8 @@ def _write_to_file(self, stream):
|
||||
|
||||
try:
|
||||
sjson.dump(database, stream)
|
||||
except YAMLError as e:
|
||||
raise syaml.SpackYAMLError(
|
||||
"error writing YAML database:", str(e))
|
||||
except (TypeError, ValueError) as e:
|
||||
raise sjson.SpackJSONError("error writing JSON database:", str(e))
|
||||
|
||||
def _read_spec_from_dict(self, hash_key, installs):
|
||||
"""Recursively construct a spec from a hash in a YAML database.
|
||||
@@ -518,28 +644,15 @@ def _assign_dependencies(self, hash_key, installs, data):
|
||||
|
||||
spec._add_dependency(child, dtypes)
|
||||
|
||||
def _read_from_file(self, stream, format='json'):
|
||||
"""
|
||||
Fill database from file, do not maintain old data
|
||||
Translate the spec portions from node-dict form to spec form
|
||||
def _read_from_file(self, filename):
|
||||
"""Fill database from file, do not maintain old data.
|
||||
Translate the spec portions from node-dict form to spec form.
|
||||
|
||||
Does not do any locking.
|
||||
"""
|
||||
if format.lower() == 'json':
|
||||
load = sjson.load
|
||||
elif format.lower() == 'yaml':
|
||||
load = syaml.load
|
||||
else:
|
||||
raise ValueError("Invalid database format: %s" % format)
|
||||
|
||||
try:
|
||||
if isinstance(stream, string_types):
|
||||
with open(stream, 'r') as f:
|
||||
fdata = load(f)
|
||||
else:
|
||||
fdata = load(stream)
|
||||
except MarkedYAMLError as e:
|
||||
raise syaml.SpackYAMLError("error parsing YAML database:", str(e))
|
||||
with open(filename, 'r') as f:
|
||||
fdata = sjson.load(f)
|
||||
except Exception as e:
|
||||
raise CorruptDatabaseError("error parsing database:", str(e))
|
||||
|
||||
@@ -551,12 +664,12 @@ def check(cond, msg):
|
||||
raise CorruptDatabaseError(
|
||||
"Spack database is corrupt: %s" % msg, self._index_path)
|
||||
|
||||
check('database' in fdata, "No 'database' attribute in YAML.")
|
||||
check('database' in fdata, "no 'database' attribute in JSON DB.")
|
||||
|
||||
# High-level file checks
|
||||
db = fdata['database']
|
||||
check('installs' in db, "No 'installs' in YAML DB.")
|
||||
check('version' in db, "No 'version' in YAML DB.")
|
||||
check('installs' in db, "no 'installs' in JSON DB.")
|
||||
check('version' in db, "no 'version' in JSON DB.")
|
||||
|
||||
installs = db['installs']
|
||||
|
||||
@@ -632,7 +745,6 @@ def reindex(self, directory_layout):
|
||||
"""Build database index from scratch based on a directory layout.
|
||||
|
||||
Locks the DB if it isn't locked already.
|
||||
|
||||
"""
|
||||
if self.is_upstream:
|
||||
raise UpstreamDatabaseLockingError(
|
||||
@@ -648,7 +760,7 @@ def _read_suppress_error():
|
||||
self._error = e
|
||||
self._data = {}
|
||||
|
||||
transaction = WriteTransaction(
|
||||
transaction = lk.WriteTransaction(
|
||||
self.lock, acquire=_read_suppress_error, release=self._write
|
||||
)
|
||||
|
||||
@@ -796,7 +908,6 @@ def _write(self, type, value, traceback):
|
||||
after the start of the next transaction, when it read from disk again.
|
||||
|
||||
This routine does no locking.
|
||||
|
||||
"""
|
||||
# Do not write if exceptions were raised
|
||||
if type is not None:
|
||||
@@ -810,6 +921,11 @@ def _write(self, type, value, traceback):
|
||||
with open(temp_file, 'w') as f:
|
||||
self._write_to_file(f)
|
||||
os.rename(temp_file, self._index_path)
|
||||
if _use_uuid:
|
||||
with open(self._verifier_path, 'w') as f:
|
||||
new_verifier = str(uuid.uuid4())
|
||||
f.write(new_verifier)
|
||||
self.last_seen_verifier = new_verifier
|
||||
except BaseException as e:
|
||||
tty.debug(e)
|
||||
# Clean up temp file if something goes wrong.
|
||||
@@ -821,35 +937,33 @@ def _read(self):
|
||||
"""Re-read Database from the data in the set location.
|
||||
|
||||
This does no locking, with one exception: it will automatically
|
||||
migrate an index.yaml to an index.json if possible. This requires
|
||||
taking a write lock.
|
||||
|
||||
try to regenerate a missing DB if local. This requires taking a
|
||||
write lock.
|
||||
"""
|
||||
if os.path.isfile(self._index_path):
|
||||
# Read from JSON file if a JSON database exists
|
||||
self._read_from_file(self._index_path, format='json')
|
||||
current_verifier = ''
|
||||
if _use_uuid:
|
||||
try:
|
||||
with open(self._verifier_path, 'r') as f:
|
||||
current_verifier = f.read()
|
||||
except BaseException:
|
||||
pass
|
||||
if ((current_verifier != self.last_seen_verifier) or
|
||||
(current_verifier == '')):
|
||||
self.last_seen_verifier = current_verifier
|
||||
# Read from file if a database exists
|
||||
self._read_from_file(self._index_path)
|
||||
return
|
||||
elif self.is_upstream:
|
||||
raise UpstreamDatabaseLockingError(
|
||||
"No database index file is present, and upstream"
|
||||
" databases cannot generate an index file")
|
||||
|
||||
elif os.path.isfile(self._old_yaml_index_path):
|
||||
if (not self.is_upstream) and os.access(
|
||||
self._db_dir, os.R_OK | os.W_OK):
|
||||
# if we can write, then read AND write a JSON file.
|
||||
self._read_from_file(self._old_yaml_index_path, format='yaml')
|
||||
with WriteTransaction(self.lock):
|
||||
self._write(None, None, None)
|
||||
else:
|
||||
# Read chck for a YAML file if we can't find JSON.
|
||||
self._read_from_file(self._old_yaml_index_path, format='yaml')
|
||||
|
||||
else:
|
||||
if self.is_upstream:
|
||||
raise UpstreamDatabaseLockingError(
|
||||
"No database index file is present, and upstream"
|
||||
" databases cannot generate an index file")
|
||||
# The file doesn't exist, try to traverse the directory.
|
||||
# reindex() takes its own write lock, so no lock here.
|
||||
with WriteTransaction(self.lock):
|
||||
self._write(None, None, None)
|
||||
self.reindex(spack.store.layout)
|
||||
# The file doesn't exist, try to traverse the directory.
|
||||
# reindex() takes its own write lock, so no lock here.
|
||||
with lk.WriteTransaction(self.lock):
|
||||
self._write(None, None, None)
|
||||
self.reindex(spack.store.layout)
|
||||
|
||||
def _add(
|
||||
self,
|
||||
@@ -929,7 +1043,9 @@ def _add(
|
||||
)
|
||||
|
||||
# Connect dependencies from the DB to the new copy.
|
||||
for name, dep in iteritems(spec.dependencies_dict(_tracked_deps)):
|
||||
for name, dep in six.iteritems(
|
||||
spec.dependencies_dict(_tracked_deps)
|
||||
):
|
||||
dkey = dep.spec.dag_hash()
|
||||
upstream, record = self.query_by_spec_hash(dkey)
|
||||
new_spec._add_dependency(record.spec, dep.deptypes)
|
||||
@@ -1002,8 +1118,7 @@ def _increment_ref_count(self, spec):
|
||||
rec.ref_count += 1
|
||||
|
||||
def _remove(self, spec):
|
||||
"""Non-locking version of remove(); does real work.
|
||||
"""
|
||||
"""Non-locking version of remove(); does real work."""
|
||||
key = self._get_matching_spec_key(spec)
|
||||
rec = self._data[key]
|
||||
|
||||
@@ -1011,11 +1126,14 @@ def _remove(self, spec):
|
||||
rec.installed = False
|
||||
return rec.spec
|
||||
|
||||
if self.is_upstream:
|
||||
return rec.spec
|
||||
|
||||
del self._data[key]
|
||||
for dep in rec.spec.dependencies(_tracked_deps):
|
||||
# FIXME: the two lines below needs to be updated once #11983 is
|
||||
# FIXME: fixed. The "if" statement should be deleted and specs are
|
||||
# FIXME: to be removed from dependents by hash and not by name.
|
||||
# FIXME: See https://github.com/spack/spack/pull/15777#issuecomment-607818955
|
||||
if dep._dependents.get(spec.name):
|
||||
del dep._dependents[spec.name]
|
||||
self._decrement_ref_count(dep)
|
||||
|
||||
if rec.deprecated_for:
|
||||
@@ -1250,7 +1368,7 @@ def _query(
|
||||
# TODO: handling of hashes restriction is not particularly elegant.
|
||||
hash_key = query_spec.dag_hash()
|
||||
if (hash_key in self._data and
|
||||
(not hashes or hash_key in hashes)):
|
||||
(not hashes or hash_key in hashes)):
|
||||
return [self._data[hash_key].spec]
|
||||
else:
|
||||
return []
|
||||
|
@@ -29,7 +29,6 @@
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import xml.etree.ElementTree
|
||||
|
||||
import llnl.util.tty as tty
|
||||
import six
|
||||
@@ -760,13 +759,6 @@ def mirror_id(self):
|
||||
result = os.path.sep.join(['git', repo_path, repo_ref])
|
||||
return result
|
||||
|
||||
def get_source_id(self):
|
||||
if not self.branch:
|
||||
return
|
||||
output = self.git('ls-remote', self.url, self.branch, output=str)
|
||||
if output:
|
||||
return output.split()[0]
|
||||
|
||||
def _repo_info(self):
|
||||
args = ''
|
||||
|
||||
@@ -944,11 +936,6 @@ def cachable(self):
|
||||
def source_id(self):
|
||||
return self.revision
|
||||
|
||||
def get_source_id(self):
|
||||
output = self.svn('info', '--xml', self.url, output=str)
|
||||
info = xml.etree.ElementTree.fromstring(output)
|
||||
return info.find('entry/commit').get('revision')
|
||||
|
||||
def mirror_id(self):
|
||||
if self.revision:
|
||||
repo_path = url_util.parse(self.url).path
|
||||
@@ -1064,11 +1051,6 @@ def mirror_id(self):
|
||||
result = os.path.sep.join(['hg', repo_path, self.revision])
|
||||
return result
|
||||
|
||||
def get_source_id(self):
|
||||
output = self.hg('id', self.url, output=str)
|
||||
if output:
|
||||
return output.strip()
|
||||
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.stage.expanded:
|
||||
@@ -1257,7 +1239,7 @@ def _from_merged_attrs(fetcher, pkg, version):
|
||||
# TODO: refactor this logic into its own method or function
|
||||
# TODO: to avoid duplication
|
||||
mirrors = [spack.url.substitute_version(u, version)
|
||||
for u in getattr(pkg, 'urls', [])]
|
||||
for u in getattr(pkg, 'urls', [])[1:]]
|
||||
attrs = {fetcher.url_attr: url, 'mirrors': mirrors}
|
||||
else:
|
||||
url = getattr(pkg, fetcher.url_attr)
|
||||
|
1700
lib/spack/spack/installer.py
Normal file
1700
lib/spack/spack/installer.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -550,9 +550,11 @@ def __call__(self, *argv, **kwargs):
|
||||
tty.debug(e)
|
||||
self.error = e
|
||||
if fail_on_error:
|
||||
self._log_command_output(out)
|
||||
raise
|
||||
|
||||
if fail_on_error and self.returncode not in (None, 0):
|
||||
self._log_command_output(out)
|
||||
raise SpackCommandError(
|
||||
"Command exited with code %d: %s(%s)" % (
|
||||
self.returncode, self.command_name,
|
||||
@@ -560,6 +562,13 @@ def __call__(self, *argv, **kwargs):
|
||||
|
||||
return out.getvalue()
|
||||
|
||||
def _log_command_output(self, out):
|
||||
if tty.is_verbose():
|
||||
fmt = self.command_name + ': {0}'
|
||||
for ln in out.getvalue().split('\n'):
|
||||
if len(ln) > 0:
|
||||
tty.verbose(fmt.format(ln.replace('==> ', '')))
|
||||
|
||||
|
||||
def _profile_wrapper(command, parser, args, unknown_args):
|
||||
import cProfile
|
||||
@@ -634,7 +643,6 @@ def shell_set(var, value):
|
||||
|
||||
other_spack_instances = spack.config.get(
|
||||
'upstreams') or {}
|
||||
|
||||
for install_properties in other_spack_instances.values():
|
||||
upstream_module_roots = install_properties.get('modules', {})
|
||||
upstream_module_roots = dict(
|
||||
|
@@ -214,7 +214,6 @@ def root_path(name):
|
||||
Returns:
|
||||
root folder for module file installation
|
||||
"""
|
||||
|
||||
# Root folders where the various module files should be written
|
||||
roots = spack.config.get('config:module_roots', {})
|
||||
path = roots.get(name, os.path.join(spack.paths.share_path, name))
|
||||
@@ -282,7 +281,6 @@ def read_module_indices():
|
||||
module_type_to_index = {}
|
||||
module_type_to_root = install_properties.get('modules', {})
|
||||
for module_type, root in module_type_to_root.items():
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
module_type_to_index[module_type] = read_module_index(root)
|
||||
module_indices.append(module_type_to_index)
|
||||
|
||||
@@ -344,7 +342,11 @@ def get_module(module_type, spec, get_full_path, required=True):
|
||||
The module name or path. May return ``None`` if the module is not
|
||||
available.
|
||||
"""
|
||||
if spec.package.installed_upstream:
|
||||
try:
|
||||
upstream = spec.package.installed_upstream
|
||||
except spack.repo.UnknownPackageError:
|
||||
upstream, record = spack.store.db.query_by_spec_hash(spec.dag_hash())
|
||||
if upstream:
|
||||
module = (spack.modules.common.upstream_module_index
|
||||
.upstream_module(spec, module_type))
|
||||
if not module:
|
||||
@@ -426,6 +428,7 @@ def suffixes(self):
|
||||
for constraint, suffix in self.conf.get('suffixes', {}).items():
|
||||
if constraint in self.spec:
|
||||
suffixes.append(suffix)
|
||||
suffixes = sorted(set(suffixes))
|
||||
if self.hash:
|
||||
suffixes.append(self.hash)
|
||||
return suffixes
|
||||
@@ -625,16 +628,9 @@ def configure_options(self):
|
||||
msg = 'unknown, software installed outside of Spack'
|
||||
return msg
|
||||
|
||||
# This is quite simple right now, but contains information on how
|
||||
# to call different build system classes.
|
||||
for attr in ('configure_args', 'cmake_args'):
|
||||
try:
|
||||
configure_args = getattr(pkg, attr)()
|
||||
return ' '.join(configure_args)
|
||||
except (AttributeError, IOError, KeyError):
|
||||
# The method doesn't exist in the current spec,
|
||||
# or it's not usable
|
||||
pass
|
||||
if os.path.exists(pkg.install_configure_args_path):
|
||||
with open(pkg.install_configure_args_path, 'r') as args_file:
|
||||
return args_file.read()
|
||||
|
||||
# Returning a false-like value makes the default templates skip
|
||||
# the configure option section
|
||||
|
@@ -14,7 +14,6 @@
|
||||
import contextlib
|
||||
import copy
|
||||
import functools
|
||||
import glob
|
||||
import hashlib
|
||||
import inspect
|
||||
import os
|
||||
@@ -48,21 +47,16 @@
|
||||
import spack.util.environment
|
||||
import spack.util.web
|
||||
import spack.multimethod
|
||||
import spack.binary_distribution as binary_distribution
|
||||
|
||||
from llnl.util.filesystem import mkdirp, touch, chgrp
|
||||
from llnl.util.filesystem import working_dir, install_tree, install
|
||||
from llnl.util.filesystem import mkdirp, touch, working_dir
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.link_tree import LinkTree
|
||||
from llnl.util.tty.log import log_output
|
||||
from llnl.util.tty.color import colorize
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
from spack.util.executable import which
|
||||
from spack.installer import \
|
||||
install_args_docstring, PackageInstaller, InstallError
|
||||
from spack.stage import stage_prefix, Stage, ResourceStage, StageComposite
|
||||
from spack.util.environment import dump_environment
|
||||
from spack.util.package_hash import package_hash
|
||||
from spack.version import Version
|
||||
from spack.package_prefs import get_package_dir_permissions, get_package_group
|
||||
|
||||
"""Allowed URL schemes for spack packages."""
|
||||
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
|
||||
@@ -74,6 +68,9 @@
|
||||
# Filename for the Spack build/install environment file.
|
||||
_spack_build_envfile = 'spack-build-env.txt'
|
||||
|
||||
# Filename for the Spack configure args file.
|
||||
_spack_configure_argsfile = 'spack-configure-args.txt'
|
||||
|
||||
|
||||
class InstallPhase(object):
|
||||
"""Manages a single phase of the installation.
|
||||
@@ -430,10 +427,18 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
|
||||
# These are default values for instance variables.
|
||||
#
|
||||
|
||||
#: A list or set of build time test functions to be called when tests
|
||||
#: are executed or 'None' if there are no such test functions.
|
||||
build_time_test_callbacks = None
|
||||
|
||||
#: Most Spack packages are used to install source or binary code while
|
||||
#: those that do not can be used to install a set of other Spack packages.
|
||||
has_code = True
|
||||
|
||||
#: A list or set of install time test functions to be called when tests
|
||||
#: are executed or 'None' if there are no such test functions.
|
||||
install_time_test_callbacks = None
|
||||
|
||||
#: By default we build in parallel. Subclasses can override this.
|
||||
parallel = True
|
||||
|
||||
@@ -758,7 +763,7 @@ def url_for_version(self, version):
|
||||
# If no specific URL, use the default, class-level URL
|
||||
url = getattr(self, 'url', None)
|
||||
urls = getattr(self, 'urls', [None])
|
||||
default_url = url or urls.pop(0)
|
||||
default_url = url or urls[0]
|
||||
|
||||
# if no exact match AND no class-level default, use the nearest URL
|
||||
if not default_url:
|
||||
@@ -894,6 +899,18 @@ def install_log_path(self):
|
||||
# Otherwise, return the current install log path name.
|
||||
return os.path.join(install_path, _spack_build_logfile)
|
||||
|
||||
@property
|
||||
def configure_args_path(self):
|
||||
"""Return the configure args file path associated with staging."""
|
||||
return os.path.join(self.stage.path, _spack_configure_argsfile)
|
||||
|
||||
@property
|
||||
def install_configure_args_path(self):
|
||||
"""Return the configure args file path on successful installation."""
|
||||
install_path = spack.store.layout.metadata_path(self.spec)
|
||||
|
||||
return os.path.join(install_path, _spack_configure_argsfile)
|
||||
|
||||
def _make_fetcher(self):
|
||||
# Construct a composite fetcher that always contains at least
|
||||
# one element (the root package). In case there are resources
|
||||
@@ -1283,41 +1300,6 @@ def content_hash(self, content=None):
|
||||
hashlib.sha256(bytes().join(
|
||||
sorted(hash_content))).digest()).lower()
|
||||
|
||||
def do_fake_install(self):
|
||||
"""Make a fake install directory containing fake executables,
|
||||
headers, and libraries."""
|
||||
|
||||
command = self.name
|
||||
header = self.name
|
||||
library = self.name
|
||||
|
||||
# Avoid double 'lib' for packages whose names already start with lib
|
||||
if not self.name.startswith('lib'):
|
||||
library = 'lib' + library
|
||||
|
||||
dso_suffix = '.dylib' if sys.platform == 'darwin' else '.so'
|
||||
chmod = which('chmod')
|
||||
|
||||
# Install fake command
|
||||
mkdirp(self.prefix.bin)
|
||||
touch(os.path.join(self.prefix.bin, command))
|
||||
chmod('+x', os.path.join(self.prefix.bin, command))
|
||||
|
||||
# Install fake header file
|
||||
mkdirp(self.prefix.include)
|
||||
touch(os.path.join(self.prefix.include, header + '.h'))
|
||||
|
||||
# Install fake shared and static libraries
|
||||
mkdirp(self.prefix.lib)
|
||||
for suffix in [dso_suffix, '.a']:
|
||||
touch(os.path.join(self.prefix.lib, library + suffix))
|
||||
|
||||
# Install fake man page
|
||||
mkdirp(self.prefix.man.man1)
|
||||
|
||||
packages_dir = spack.store.layout.build_packages_path(self.spec)
|
||||
dump_packages(self.spec, packages_dir)
|
||||
|
||||
def _has_make_target(self, target):
|
||||
"""Checks to see if 'target' is a valid target in a Makefile.
|
||||
|
||||
@@ -1461,382 +1443,17 @@ def _stage_and_write_lock(self):
|
||||
with spack.store.db.prefix_write_lock(self.spec):
|
||||
yield
|
||||
|
||||
def _process_external_package(self, explicit):
|
||||
"""Helper function to process external packages.
|
||||
|
||||
Runs post install hooks and registers the package in the DB.
|
||||
|
||||
Args:
|
||||
explicit (bool): if the package was requested explicitly by
|
||||
the user, False if it was pulled in as a dependency of an
|
||||
explicit package.
|
||||
"""
|
||||
if self.spec.external_module:
|
||||
message = '{s.name}@{s.version} : has external module in {module}'
|
||||
tty.msg(message.format(s=self, module=self.spec.external_module))
|
||||
message = '{s.name}@{s.version} : is actually installed in {path}'
|
||||
tty.msg(message.format(s=self, path=self.spec.external_path))
|
||||
else:
|
||||
message = '{s.name}@{s.version} : externally installed in {path}'
|
||||
tty.msg(message.format(s=self, path=self.spec.external_path))
|
||||
try:
|
||||
# Check if the package was already registered in the DB
|
||||
# If this is the case, then just exit
|
||||
rec = spack.store.db.get_record(self.spec)
|
||||
message = '{s.name}@{s.version} : already registered in DB'
|
||||
tty.msg(message.format(s=self))
|
||||
# Update the value of rec.explicit if it is necessary
|
||||
self._update_explicit_entry_in_db(rec, explicit)
|
||||
|
||||
except KeyError:
|
||||
# If not register it and generate the module file
|
||||
# For external packages we just need to run
|
||||
# post-install hooks to generate module files
|
||||
message = '{s.name}@{s.version} : generating module file'
|
||||
tty.msg(message.format(s=self))
|
||||
spack.hooks.post_install(self.spec)
|
||||
# Add to the DB
|
||||
message = '{s.name}@{s.version} : registering into DB'
|
||||
tty.msg(message.format(s=self))
|
||||
spack.store.db.add(self.spec, None, explicit=explicit)
|
||||
|
||||
def _update_explicit_entry_in_db(self, rec, explicit):
|
||||
if explicit and not rec.explicit:
|
||||
with spack.store.db.write_transaction():
|
||||
rec = spack.store.db.get_record(self.spec)
|
||||
rec.explicit = True
|
||||
message = '{s.name}@{s.version} : marking the package explicit'
|
||||
tty.msg(message.format(s=self))
|
||||
|
||||
def try_install_from_binary_cache(self, explicit, unsigned=False):
|
||||
tty.msg('Searching for binary cache of %s' % self.name)
|
||||
specs = binary_distribution.get_spec(spec=self.spec,
|
||||
force=False)
|
||||
binary_spec = spack.spec.Spec.from_dict(self.spec.to_dict())
|
||||
binary_spec._mark_concrete()
|
||||
if binary_spec not in specs:
|
||||
return False
|
||||
tarball = binary_distribution.download_tarball(binary_spec)
|
||||
# see #10063 : install from source if tarball doesn't exist
|
||||
if tarball is None:
|
||||
tty.msg('%s exist in binary cache but with different hash' %
|
||||
self.name)
|
||||
return False
|
||||
tty.msg('Installing %s from binary cache' % self.name)
|
||||
binary_distribution.extract_tarball(
|
||||
binary_spec, tarball, allow_root=False,
|
||||
unsigned=unsigned, force=False)
|
||||
self.installed_from_binary_cache = True
|
||||
spack.store.db.add(
|
||||
self.spec, spack.store.layout, explicit=explicit)
|
||||
return True
|
||||
|
||||
def bootstrap_compiler(self, **kwargs):
|
||||
"""Called by do_install to setup ensure Spack has the right compiler.
|
||||
|
||||
Checks Spack's compiler configuration for a compiler that
|
||||
matches the package spec. If none are configured, installs and
|
||||
adds to the compiler configuration the compiler matching the
|
||||
CompilerSpec object."""
|
||||
compilers = spack.compilers.compilers_for_spec(
|
||||
self.spec.compiler,
|
||||
arch_spec=self.spec.architecture
|
||||
)
|
||||
if not compilers:
|
||||
dep = spack.compilers.pkg_spec_for_compiler(self.spec.compiler)
|
||||
dep.architecture = self.spec.architecture
|
||||
# concrete CompilerSpec has less info than concrete Spec
|
||||
# concretize as Spec to add that information
|
||||
dep.concretize()
|
||||
dep.package.do_install(**kwargs)
|
||||
spack.compilers.add_compilers_to_config(
|
||||
spack.compilers.find_compilers([dep.prefix])
|
||||
)
|
||||
|
||||
def do_install(self, **kwargs):
|
||||
"""Called by commands to install a package and its dependencies.
|
||||
"""Called by commands to install a package and or its dependencies.
|
||||
|
||||
Package implementations should override install() to describe
|
||||
their build process.
|
||||
|
||||
Args:
|
||||
keep_prefix (bool): Keep install prefix on failure. By default,
|
||||
destroys it.
|
||||
keep_stage (bool): By default, stage is destroyed only if there
|
||||
are no exceptions during build. Set to True to keep the stage
|
||||
even with exceptions.
|
||||
install_source (bool): By default, source is not installed, but
|
||||
for debugging it might be useful to keep it around.
|
||||
install_deps (bool): Install dependencies before installing this
|
||||
package
|
||||
skip_patch (bool): Skip patch stage of build if True.
|
||||
verbose (bool): Display verbose build output (by default,
|
||||
suppresses it)
|
||||
fake (bool): Don't really build; install fake stub files instead.
|
||||
explicit (bool): True if package was explicitly installed, False
|
||||
if package was implicitly installed (as a dependency).
|
||||
tests (bool or list or set): False to run no tests, True to test
|
||||
all packages, or a list of package names to run tests for some
|
||||
dirty (bool): Don't clean the build environment before installing.
|
||||
restage (bool): Force spack to restage the package source.
|
||||
force (bool): Install again, even if already installed.
|
||||
use_cache (bool): Install from binary package, if available.
|
||||
cache_only (bool): Fail if binary package unavailable.
|
||||
stop_at (InstallPhase): last installation phase to be executed
|
||||
(or None)
|
||||
"""
|
||||
if not self.spec.concrete:
|
||||
raise ValueError("Can only install concrete packages: %s."
|
||||
% self.spec.name)
|
||||
Args:"""
|
||||
builder = PackageInstaller(self)
|
||||
builder.install(**kwargs)
|
||||
|
||||
keep_prefix = kwargs.get('keep_prefix', False)
|
||||
keep_stage = kwargs.get('keep_stage', False)
|
||||
install_source = kwargs.get('install_source', False)
|
||||
install_deps = kwargs.get('install_deps', True)
|
||||
skip_patch = kwargs.get('skip_patch', False)
|
||||
verbose = kwargs.get('verbose', False)
|
||||
fake = kwargs.get('fake', False)
|
||||
explicit = kwargs.get('explicit', False)
|
||||
tests = kwargs.get('tests', False)
|
||||
dirty = kwargs.get('dirty', False)
|
||||
restage = kwargs.get('restage', False)
|
||||
|
||||
# install_self defaults True and is popped so that dependencies are
|
||||
# always installed regardless of whether the root was installed
|
||||
install_self = kwargs.pop('install_package', True)
|
||||
# explicit defaults False so that dependents are implicit regardless
|
||||
# of whether their dependents are implicitly or explicitly installed.
|
||||
# Spack ensures root packages of install commands are always marked to
|
||||
# install explicit
|
||||
explicit = kwargs.pop('explicit', False)
|
||||
|
||||
# For external packages the workflow is simplified, and basically
|
||||
# consists in module file generation and registration in the DB
|
||||
if self.spec.external:
|
||||
return self._process_external_package(explicit)
|
||||
|
||||
if self.installed_upstream:
|
||||
tty.msg("{0.name} is installed in an upstream Spack instance"
|
||||
" at {0.prefix}".format(self))
|
||||
# Note this skips all post-install hooks. In the case of modules
|
||||
# this is considered correct because we want to retrieve the
|
||||
# module from the upstream Spack instance.
|
||||
return
|
||||
|
||||
partial = self.check_for_unfinished_installation(keep_prefix, restage)
|
||||
|
||||
# Ensure package is not already installed
|
||||
layout = spack.store.layout
|
||||
with spack.store.db.prefix_read_lock(self.spec):
|
||||
if partial:
|
||||
tty.msg(
|
||||
"Continuing from partial install of %s" % self.name)
|
||||
elif layout.check_installed(self.spec):
|
||||
msg = '{0.name} is already installed in {0.prefix}'
|
||||
tty.msg(msg.format(self))
|
||||
rec = spack.store.db.get_record(self.spec)
|
||||
# In case the stage directory has already been created,
|
||||
# this ensures it's removed after we checked that the spec
|
||||
# is installed
|
||||
if keep_stage is False:
|
||||
self.stage.destroy()
|
||||
return self._update_explicit_entry_in_db(rec, explicit)
|
||||
|
||||
self._do_install_pop_kwargs(kwargs)
|
||||
|
||||
# First, install dependencies recursively.
|
||||
if install_deps:
|
||||
tty.debug('Installing {0} dependencies'.format(self.name))
|
||||
dep_kwargs = kwargs.copy()
|
||||
dep_kwargs['explicit'] = False
|
||||
dep_kwargs['install_deps'] = False
|
||||
for dep in self.spec.traverse(order='post', root=False):
|
||||
if spack.config.get('config:install_missing_compilers', False):
|
||||
Package._install_bootstrap_compiler(dep.package, **kwargs)
|
||||
dep.package.do_install(**dep_kwargs)
|
||||
|
||||
# Then install the compiler if it is not already installed.
|
||||
if install_deps:
|
||||
Package._install_bootstrap_compiler(self, **kwargs)
|
||||
|
||||
if not install_self:
|
||||
return
|
||||
|
||||
# Then, install the package proper
|
||||
tty.msg(colorize('@*{Installing} @*g{%s}' % self.name))
|
||||
|
||||
if kwargs.get('use_cache', True):
|
||||
if self.try_install_from_binary_cache(
|
||||
explicit, unsigned=kwargs.get('unsigned', False)):
|
||||
tty.msg('Successfully installed %s from binary cache'
|
||||
% self.name)
|
||||
print_pkg(self.prefix)
|
||||
spack.hooks.post_install(self.spec)
|
||||
return
|
||||
elif kwargs.get('cache_only', False):
|
||||
tty.die('No binary for %s found and cache-only specified'
|
||||
% self.name)
|
||||
|
||||
tty.msg('No binary for %s found: installing from source'
|
||||
% self.name)
|
||||
|
||||
# Set run_tests flag before starting build
|
||||
self.run_tests = (tests is True or
|
||||
tests and self.name in tests)
|
||||
|
||||
# Then install the package itself.
|
||||
def build_process():
|
||||
"""This implements the process forked for each build.
|
||||
|
||||
Has its own process and python module space set up by
|
||||
build_environment.fork().
|
||||
|
||||
This function's return value is returned to the parent process.
|
||||
"""
|
||||
|
||||
start_time = time.time()
|
||||
if not fake:
|
||||
if not skip_patch:
|
||||
self.do_patch()
|
||||
else:
|
||||
self.do_stage()
|
||||
|
||||
tty.msg(
|
||||
'Building {0} [{1}]'.format(self.name, self.build_system_class)
|
||||
)
|
||||
|
||||
# get verbosity from do_install() parameter or saved value
|
||||
echo = verbose
|
||||
if PackageBase._verbose is not None:
|
||||
echo = PackageBase._verbose
|
||||
|
||||
self.stage.keep = keep_stage
|
||||
with self._stage_and_write_lock():
|
||||
# Run the pre-install hook in the child process after
|
||||
# the directory is created.
|
||||
spack.hooks.pre_install(self.spec)
|
||||
if fake:
|
||||
self.do_fake_install()
|
||||
else:
|
||||
source_path = self.stage.source_path
|
||||
if install_source and os.path.isdir(source_path):
|
||||
src_target = os.path.join(
|
||||
self.spec.prefix, 'share', self.name, 'src')
|
||||
tty.msg('Copying source to {0}'.format(src_target))
|
||||
install_tree(self.stage.source_path, src_target)
|
||||
|
||||
# Do the real install in the source directory.
|
||||
with working_dir(self.stage.source_path):
|
||||
# Save the build environment in a file before building.
|
||||
dump_environment(self.env_path)
|
||||
|
||||
# cache debug settings
|
||||
debug_enabled = tty.is_debug()
|
||||
|
||||
# Spawn a daemon that reads from a pipe and redirects
|
||||
# everything to log_path
|
||||
with log_output(self.log_path, echo, True) as logger:
|
||||
for phase_name, phase_attr in zip(
|
||||
self.phases, self._InstallPhase_phases):
|
||||
|
||||
with logger.force_echo():
|
||||
inner_debug = tty.is_debug()
|
||||
tty.set_debug(debug_enabled)
|
||||
tty.msg(
|
||||
"Executing phase: '%s'" % phase_name)
|
||||
tty.set_debug(inner_debug)
|
||||
|
||||
# Redirect stdout and stderr to daemon pipe
|
||||
phase = getattr(self, phase_attr)
|
||||
phase(self.spec, self.prefix)
|
||||
|
||||
echo = logger.echo
|
||||
self.log()
|
||||
|
||||
# Run post install hooks before build stage is removed.
|
||||
spack.hooks.post_install(self.spec)
|
||||
|
||||
# Stop timer.
|
||||
self._total_time = time.time() - start_time
|
||||
build_time = self._total_time - self._fetch_time
|
||||
|
||||
tty.msg("Successfully installed %s" % self.name,
|
||||
"Fetch: %s. Build: %s. Total: %s." %
|
||||
(_hms(self._fetch_time), _hms(build_time),
|
||||
_hms(self._total_time)))
|
||||
print_pkg(self.prefix)
|
||||
|
||||
# preserve verbosity across runs
|
||||
return echo
|
||||
|
||||
# hook that allow tests to inspect this Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not self.unit_test_check():
|
||||
return
|
||||
|
||||
try:
|
||||
# Create the install prefix and fork the build process.
|
||||
if not os.path.exists(self.prefix):
|
||||
spack.store.layout.create_install_directory(self.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = get_package_group(self.spec)
|
||||
if group:
|
||||
chgrp(self.prefix, group)
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(self.prefix).st_mode
|
||||
perms = get_package_dir_permissions(self.spec)
|
||||
if mode != perms:
|
||||
os.chmod(self.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
mkdirp(spack.store.layout.metadata_path(self.spec), mode=perms)
|
||||
|
||||
# Fork a child to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
PackageBase._verbose = spack.build_environment.fork(
|
||||
self, build_process, dirty=dirty, fake=fake)
|
||||
|
||||
# If we installed then we should keep the prefix
|
||||
keep_prefix = self.last_phase is None or keep_prefix
|
||||
# note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.db.add(
|
||||
self.spec, spack.store.layout, explicit=explicit
|
||||
)
|
||||
except spack.directory_layout.InstallDirectoryAlreadyExistsError:
|
||||
# Abort install if install directory exists.
|
||||
# But do NOT remove it (you'd be overwriting someone else's stuff)
|
||||
tty.warn("Keeping existing install prefix in place.")
|
||||
raise
|
||||
except StopIteration as e:
|
||||
# A StopIteration exception means that do_install
|
||||
# was asked to stop early from clients
|
||||
tty.msg(e.message)
|
||||
tty.msg(
|
||||
'Package stage directory : {0}'.format(self.stage.source_path)
|
||||
)
|
||||
finally:
|
||||
# Remove the install prefix if anything went wrong during install.
|
||||
if not keep_prefix:
|
||||
self.remove_prefix()
|
||||
|
||||
# The subprocess *may* have removed the build stage. Mark it
|
||||
# not created so that the next time self.stage is invoked, we
|
||||
# check the filesystem for it.
|
||||
self.stage.created = False
|
||||
|
||||
@staticmethod
|
||||
def _install_bootstrap_compiler(pkg, **install_kwargs):
|
||||
tty.debug('Bootstrapping {0} compiler for {1}'.format(
|
||||
pkg.spec.compiler, pkg.name
|
||||
))
|
||||
comp_kwargs = install_kwargs.copy()
|
||||
comp_kwargs['explicit'] = False
|
||||
comp_kwargs['install_deps'] = True
|
||||
pkg.bootstrap_compiler(**comp_kwargs)
|
||||
do_install.__doc__ += install_args_docstring
|
||||
|
||||
def unit_test_check(self):
|
||||
"""Hook for unit tests to assert things about package internals.
|
||||
@@ -1855,125 +1472,6 @@ def unit_test_check(self):
|
||||
"""
|
||||
return True
|
||||
|
||||
def check_for_unfinished_installation(
|
||||
self, keep_prefix=False, restage=False):
|
||||
"""Check for leftover files from partially-completed prior install to
|
||||
prepare for a new install attempt.
|
||||
|
||||
Options control whether these files are reused (vs. destroyed).
|
||||
|
||||
Args:
|
||||
keep_prefix (bool): True if the installation prefix needs to be
|
||||
kept, False otherwise
|
||||
restage (bool): False if the stage has to be kept, True otherwise
|
||||
|
||||
Returns:
|
||||
True if the prefix exists but the install is not complete, False
|
||||
otherwise.
|
||||
"""
|
||||
if self.spec.external:
|
||||
raise ExternalPackageError("Attempted to repair external spec %s" %
|
||||
self.spec.name)
|
||||
|
||||
with spack.store.db.prefix_write_lock(self.spec):
|
||||
try:
|
||||
record = spack.store.db.get_record(self.spec)
|
||||
installed_in_db = record.installed if record else False
|
||||
except KeyError:
|
||||
installed_in_db = False
|
||||
|
||||
partial = False
|
||||
if not installed_in_db and os.path.isdir(self.prefix):
|
||||
if not keep_prefix:
|
||||
self.remove_prefix()
|
||||
else:
|
||||
partial = True
|
||||
|
||||
if restage and self.stage.managed_by_spack:
|
||||
self.stage.destroy()
|
||||
|
||||
return partial
|
||||
|
||||
def _do_install_pop_kwargs(self, kwargs):
|
||||
"""Pops kwargs from do_install before starting the installation
|
||||
|
||||
Args:
|
||||
kwargs:
|
||||
'stop_at': last installation phase to be executed (or None)
|
||||
|
||||
"""
|
||||
self.last_phase = kwargs.pop('stop_at', None)
|
||||
if self.last_phase is not None and self.last_phase not in self.phases:
|
||||
tty.die('\'{0}\' is not an allowed phase for package {1}'
|
||||
.format(self.last_phase, self.name))
|
||||
|
||||
def log(self):
|
||||
"""Copy provenance into the install directory on success."""
|
||||
packages_dir = spack.store.layout.build_packages_path(self.spec)
|
||||
|
||||
# Remove first if we're overwriting another build
|
||||
# (can happen with spack setup)
|
||||
try:
|
||||
# log and env install paths are inside this
|
||||
shutil.rmtree(packages_dir)
|
||||
except Exception as e:
|
||||
# FIXME : this potentially catches too many things...
|
||||
tty.debug(e)
|
||||
|
||||
# Archive the whole stdout + stderr for the package
|
||||
install(self.log_path, self.install_log_path)
|
||||
|
||||
# Archive the environment used for the build
|
||||
install(self.env_path, self.install_env_path)
|
||||
|
||||
# Finally, archive files that are specific to each package
|
||||
with working_dir(self.stage.path):
|
||||
errors = StringIO()
|
||||
target_dir = os.path.join(
|
||||
spack.store.layout.metadata_path(self.spec),
|
||||
'archived-files')
|
||||
|
||||
for glob_expr in self.archive_files:
|
||||
# Check that we are trying to copy things that are
|
||||
# in the stage tree (not arbitrary files)
|
||||
abs_expr = os.path.realpath(glob_expr)
|
||||
if os.path.realpath(self.stage.path) not in abs_expr:
|
||||
errors.write(
|
||||
'[OUTSIDE SOURCE PATH]: {0}\n'.format(glob_expr)
|
||||
)
|
||||
continue
|
||||
# Now that we are sure that the path is within the correct
|
||||
# folder, make it relative and check for matches
|
||||
if os.path.isabs(glob_expr):
|
||||
glob_expr = os.path.relpath(
|
||||
glob_expr, self.stage.path
|
||||
)
|
||||
files = glob.glob(glob_expr)
|
||||
for f in files:
|
||||
try:
|
||||
target = os.path.join(target_dir, f)
|
||||
# We must ensure that the directory exists before
|
||||
# copying a file in
|
||||
mkdirp(os.path.dirname(target))
|
||||
install(f, target)
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
|
||||
# Here try to be conservative, and avoid discarding
|
||||
# the whole install procedure because of copying a
|
||||
# single file failed
|
||||
errors.write('[FAILED TO ARCHIVE]: {0}'.format(f))
|
||||
|
||||
if errors.getvalue():
|
||||
error_file = os.path.join(target_dir, 'errors.txt')
|
||||
mkdirp(target_dir)
|
||||
with open(error_file, 'w') as err:
|
||||
err.write(errors.getvalue())
|
||||
tty.warn('Errors occurred when archiving files.\n\t'
|
||||
'See: {0}'.format(error_file))
|
||||
|
||||
dump_packages(self.spec, packages_dir)
|
||||
|
||||
def sanity_check_prefix(self):
|
||||
"""This function checks whether install succeeded."""
|
||||
|
||||
@@ -2539,8 +2037,6 @@ def rpath_args(self):
|
||||
"""
|
||||
return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
|
||||
|
||||
build_time_test_callbacks = None
|
||||
|
||||
@on_package_attributes(run_tests=True)
|
||||
def _run_default_build_time_test_callbacks(self):
|
||||
"""Tries to call all the methods that are listed in the attribute
|
||||
@@ -2560,8 +2056,6 @@ def _run_default_build_time_test_callbacks(self):
|
||||
msg = 'RUN-TESTS: method not implemented [{0}]'
|
||||
tty.warn(msg.format(name))
|
||||
|
||||
install_time_test_callbacks = None
|
||||
|
||||
@on_package_attributes(run_tests=True)
|
||||
def _run_default_install_time_test_callbacks(self):
|
||||
"""Tries to call all the methods that are listed in the attribute
|
||||
@@ -2652,54 +2146,6 @@ def flatten_dependencies(spec, flat_dir):
|
||||
dep_files.merge(flat_dir + '/' + name)
|
||||
|
||||
|
||||
def dump_packages(spec, path):
|
||||
"""Dump all package information for a spec and its dependencies.
|
||||
|
||||
This creates a package repository within path for every
|
||||
namespace in the spec DAG, and fills the repos wtih package
|
||||
files and patch files for every node in the DAG.
|
||||
"""
|
||||
mkdirp(path)
|
||||
|
||||
# Copy in package.py files from any dependencies.
|
||||
# Note that we copy them in as they are in the *install* directory
|
||||
# NOT as they are in the repository, because we want a snapshot of
|
||||
# how *this* particular build was done.
|
||||
for node in spec.traverse(deptype=all):
|
||||
if node is not spec:
|
||||
# Locate the dependency package in the install tree and find
|
||||
# its provenance information.
|
||||
source = spack.store.layout.build_packages_path(node)
|
||||
source_repo_root = os.path.join(source, node.namespace)
|
||||
|
||||
# There's no provenance installed for the source package. Skip it.
|
||||
# User can always get something current from the builtin repo.
|
||||
if not os.path.isdir(source_repo_root):
|
||||
continue
|
||||
|
||||
# Create a source repo and get the pkg directory out of it.
|
||||
try:
|
||||
source_repo = spack.repo.Repo(source_repo_root)
|
||||
source_pkg_dir = source_repo.dirname_for_package_name(
|
||||
node.name)
|
||||
except spack.repo.RepoError:
|
||||
tty.warn("Warning: Couldn't copy in provenance for %s" %
|
||||
node.name)
|
||||
|
||||
# Create a destination repository
|
||||
dest_repo_root = os.path.join(path, node.namespace)
|
||||
if not os.path.exists(dest_repo_root):
|
||||
spack.repo.create_repo(dest_repo_root)
|
||||
repo = spack.repo.Repo(dest_repo_root)
|
||||
|
||||
# Get the location of the package in the dest repo.
|
||||
dest_pkg_dir = repo.dirname_for_package_name(node.name)
|
||||
if node is not spec:
|
||||
install_tree(source_pkg_dir, dest_pkg_dir)
|
||||
else:
|
||||
spack.repo.path.dump_provenance(node, dest_pkg_dir)
|
||||
|
||||
|
||||
def possible_dependencies(*pkg_or_spec, **kwargs):
|
||||
"""Get the possible dependencies of a number of packages.
|
||||
|
||||
@@ -2729,28 +2175,6 @@ def possible_dependencies(*pkg_or_spec, **kwargs):
|
||||
return visited
|
||||
|
||||
|
||||
def print_pkg(message):
|
||||
"""Outputs a message with a package icon."""
|
||||
from llnl.util.tty.color import cwrite
|
||||
cwrite('@*g{[+]} ')
|
||||
print(message)
|
||||
|
||||
|
||||
def _hms(seconds):
|
||||
"""Convert time in seconds to hours, minutes, seconds."""
|
||||
m, s = divmod(seconds, 60)
|
||||
h, m = divmod(m, 60)
|
||||
|
||||
parts = []
|
||||
if h:
|
||||
parts.append("%dh" % h)
|
||||
if m:
|
||||
parts.append("%dm" % m)
|
||||
if s:
|
||||
parts.append("%.2fs" % s)
|
||||
return ' '.join(parts)
|
||||
|
||||
|
||||
class FetchError(spack.error.SpackError):
|
||||
"""Raised when something goes wrong during fetch."""
|
||||
|
||||
@@ -2758,17 +2182,6 @@ def __init__(self, message, long_msg=None):
|
||||
super(FetchError, self).__init__(message, long_msg)
|
||||
|
||||
|
||||
class InstallError(spack.error.SpackError):
|
||||
"""Raised when something goes wrong during install or uninstall."""
|
||||
|
||||
def __init__(self, message, long_msg=None):
|
||||
super(InstallError, self).__init__(message, long_msg)
|
||||
|
||||
|
||||
class ExternalPackageError(InstallError):
|
||||
"""Raised by install() when a package is only for external use."""
|
||||
|
||||
|
||||
class PackageStillNeededError(InstallError):
|
||||
"""Raised when package is still needed by another on uninstall."""
|
||||
def __init__(self, spec, dependents):
|
||||
|
@@ -16,9 +16,6 @@
|
||||
#: This file lives in $prefix/lib/spack/spack/__file__
|
||||
prefix = ancestor(__file__, 4)
|
||||
|
||||
#: User configuration location
|
||||
user_config_path = os.path.expanduser('~/.spack')
|
||||
|
||||
#: synonym for prefix
|
||||
spack_root = prefix
|
||||
|
||||
@@ -41,8 +38,6 @@
|
||||
test_path = os.path.join(module_path, "test")
|
||||
hooks_path = os.path.join(module_path, "hooks")
|
||||
var_path = os.path.join(prefix, "var", "spack")
|
||||
user_var_path = os.path.join(user_config_path, "var", "spack")
|
||||
stage_path = os.path.join(user_var_path, "stage")
|
||||
repos_path = os.path.join(var_path, "repos")
|
||||
share_path = os.path.join(prefix, "share", "spack")
|
||||
|
||||
@@ -50,6 +45,9 @@
|
||||
packages_path = os.path.join(repos_path, "builtin")
|
||||
mock_packages_path = os.path.join(repos_path, "builtin.mock")
|
||||
|
||||
#: User configuration location
|
||||
user_config_path = os.path.expanduser('~/.spack')
|
||||
|
||||
|
||||
opt_path = os.path.join(prefix, "opt")
|
||||
etc_path = os.path.join(prefix, "etc")
|
||||
|
@@ -50,7 +50,10 @@
|
||||
|
||||
from spack.package import \
|
||||
install_dependency_symlinks, flatten_dependencies, \
|
||||
DependencyConflictError, InstallError, ExternalPackageError
|
||||
DependencyConflictError
|
||||
|
||||
from spack.installer import \
|
||||
ExternalPackageError, InstallError, InstallLockError, UpstreamPackageError
|
||||
|
||||
from spack.variant import any_combination_of, auto_or_any_combination_of
|
||||
from spack.variant import disjoint_sets
|
||||
|
@@ -7,7 +7,7 @@
|
||||
import re
|
||||
import llnl.util.tty as tty
|
||||
from spack.paths import build_env_path
|
||||
from spack.util.executable import which
|
||||
from spack.util.executable import Executable
|
||||
from spack.architecture import Platform, Target, NoPlatformError
|
||||
from spack.operating_systems.cray_frontend import CrayFrontend
|
||||
from spack.operating_systems.cnl import Cnl
|
||||
@@ -117,11 +117,17 @@ def _default_target_from_env(self):
|
||||
'''
|
||||
# env -i /bin/bash -lc echo $CRAY_CPU_TARGET 2> /dev/null
|
||||
if getattr(self, 'default', None) is None:
|
||||
env = which('env')
|
||||
output = env("-i", "/bin/bash", "-lc", "echo $CRAY_CPU_TARGET",
|
||||
output=str, error=os.devnull)
|
||||
self.default = output.strip()
|
||||
tty.debug("Found default module:%s" % self.default)
|
||||
bash = Executable('/bin/bash')
|
||||
output = bash(
|
||||
'-lc', 'echo $CRAY_CPU_TARGET',
|
||||
env={'TERM': os.environ.get('TERM', '')},
|
||||
output=str,
|
||||
error=os.devnull
|
||||
)
|
||||
output = ''.join(output.split()) # remove all whitespace
|
||||
if output:
|
||||
self.default = output
|
||||
tty.debug("Found default module:%s" % self.default)
|
||||
return self.default
|
||||
|
||||
def _avail_targets(self):
|
||||
|
@@ -400,8 +400,8 @@ def replace_prefix_text(path_name, old_dir, new_dir):
|
||||
def replace_prefix_bin(path_name, old_dir, new_dir):
|
||||
"""
|
||||
Attempt to replace old install prefix with new install prefix
|
||||
in binary files by replacing with null terminated string
|
||||
that is the same length unless the old path is shorter
|
||||
in binary files by prefixing new install prefix with os.sep
|
||||
until the lengths of the prefixes are the same.
|
||||
"""
|
||||
|
||||
def replace(match):
|
||||
@@ -429,6 +429,38 @@ def replace(match):
|
||||
f.truncate()
|
||||
|
||||
|
||||
def replace_prefix_nullterm(path_name, old_dir, new_dir):
|
||||
"""
|
||||
Attempt to replace old install prefix with new install prefix
|
||||
in binary files by replacing with null terminated string
|
||||
that is the same length unless the old path is shorter
|
||||
Used on linux to replace mach-o rpaths
|
||||
"""
|
||||
|
||||
def replace(match):
|
||||
occurances = match.group().count(old_dir.encode('utf-8'))
|
||||
olen = len(old_dir.encode('utf-8'))
|
||||
nlen = len(new_dir.encode('utf-8'))
|
||||
padding = (olen - nlen) * occurances
|
||||
if padding < 0:
|
||||
return data
|
||||
return match.group().replace(old_dir.encode('utf-8'),
|
||||
new_dir.encode('utf-8')) + b'\0' * padding
|
||||
with open(path_name, 'rb+') as f:
|
||||
data = f.read()
|
||||
f.seek(0)
|
||||
original_data_len = len(data)
|
||||
pat = re.compile(old_dir.encode('utf-8') + b'([^\0]*?)\0')
|
||||
if not pat.search(data):
|
||||
return
|
||||
ndata = pat.sub(replace, data)
|
||||
if not len(ndata) == original_data_len:
|
||||
raise BinaryStringReplacementException(
|
||||
path_name, original_data_len, len(ndata))
|
||||
f.write(ndata)
|
||||
f.truncate()
|
||||
|
||||
|
||||
def relocate_macho_binaries(path_names, old_dir, new_dir, allow_root):
|
||||
"""
|
||||
Change old_dir to new_dir in LC_RPATH of mach-o files (on macOS)
|
||||
@@ -466,8 +498,7 @@ def relocate_macho_binaries(path_names, old_dir, new_dir, allow_root):
|
||||
modify_object_macholib(path_name, placeholder, new_dir)
|
||||
modify_object_macholib(path_name, old_dir, new_dir)
|
||||
if len(new_dir) <= len(old_dir):
|
||||
replace_prefix_bin(path_name, old_dir,
|
||||
new_dir)
|
||||
replace_prefix_nullterm(path_name, old_dir, new_dir)
|
||||
else:
|
||||
tty.warn('Cannot do a binary string replacement'
|
||||
' with padding for %s'
|
||||
@@ -684,13 +715,6 @@ def file_is_relocatable(file, paths_to_relocate=None):
|
||||
|
||||
strings = Executable('strings')
|
||||
|
||||
# if we're relocating patchelf itself, use it
|
||||
|
||||
if file[-13:] == "/bin/patchelf":
|
||||
patchelf = Executable(file)
|
||||
else:
|
||||
patchelf = Executable(get_patchelf())
|
||||
|
||||
# Remove the RPATHS from the strings in the executable
|
||||
set_of_strings = set(strings(file, output=str).split())
|
||||
|
||||
@@ -700,8 +724,8 @@ def file_is_relocatable(file, paths_to_relocate=None):
|
||||
|
||||
if platform.system().lower() == 'linux':
|
||||
if m_subtype == 'x-executable' or m_subtype == 'x-sharedlib':
|
||||
rpaths = patchelf('--print-rpath', file, output=str).strip()
|
||||
set_of_strings.discard(rpaths.strip())
|
||||
rpaths = ':'.join(get_existing_elf_rpaths(file))
|
||||
set_of_strings.discard(rpaths)
|
||||
if platform.system().lower() == 'darwin':
|
||||
if m_subtype == 'x-mach-binary':
|
||||
rpaths, deps, idpath = macho_get_paths(file)
|
||||
@@ -756,4 +780,5 @@ def mime_type(file):
|
||||
tty.debug('[MIME_TYPE] {0} -> {1}'.format(file, output.strip()))
|
||||
if '/' not in output:
|
||||
output += '/'
|
||||
return tuple(output.strip().split('/'))
|
||||
split_by_slash = output.strip().split('/')
|
||||
return (split_by_slash[0], "/".join(split_by_slash[1:]))
|
||||
|
@@ -44,7 +44,8 @@ def fetch_package_log(pkg):
|
||||
|
||||
|
||||
class InfoCollector(object):
|
||||
"""Decorates PackageBase.do_install to collect information
|
||||
"""Decorates PackageInstaller._install_task, which is called by
|
||||
PackageBase.do_install for each spec, to collect information
|
||||
on the installation of certain specs.
|
||||
|
||||
When exiting the context this change will be rolled-back.
|
||||
@@ -57,8 +58,8 @@ class InfoCollector(object):
|
||||
specs (list of Spec): specs whose install information will
|
||||
be recorded
|
||||
"""
|
||||
#: Backup of PackageBase.do_install
|
||||
_backup_do_install = spack.package.PackageBase.do_install
|
||||
#: Backup of PackageInstaller._install_task
|
||||
_backup__install_task = spack.package.PackageInstaller._install_task
|
||||
|
||||
def __init__(self, specs):
|
||||
#: Specs that will be installed
|
||||
@@ -108,15 +109,16 @@ def __enter__(self):
|
||||
}
|
||||
spec['packages'].append(package)
|
||||
|
||||
def gather_info(do_install):
|
||||
"""Decorates do_install to gather useful information for
|
||||
a CI report.
|
||||
def gather_info(_install_task):
|
||||
"""Decorates PackageInstaller._install_task to gather useful
|
||||
information on PackageBase.do_install for a CI report.
|
||||
|
||||
It's defined here to capture the environment and build
|
||||
this context as the installations proceed.
|
||||
"""
|
||||
@functools.wraps(do_install)
|
||||
def wrapper(pkg, *args, **kwargs):
|
||||
@functools.wraps(_install_task)
|
||||
def wrapper(installer, task, *args, **kwargs):
|
||||
pkg = task.pkg
|
||||
|
||||
# We accounted before for what is already installed
|
||||
installed_on_entry = pkg.installed
|
||||
@@ -134,7 +136,7 @@ def wrapper(pkg, *args, **kwargs):
|
||||
value = None
|
||||
try:
|
||||
|
||||
value = do_install(pkg, *args, **kwargs)
|
||||
value = _install_task(installer, task, *args, **kwargs)
|
||||
package['result'] = 'success'
|
||||
package['stdout'] = fetch_package_log(pkg)
|
||||
package['installed_from_binary_cache'] = \
|
||||
@@ -182,14 +184,15 @@ def wrapper(pkg, *args, **kwargs):
|
||||
|
||||
return wrapper
|
||||
|
||||
spack.package.PackageBase.do_install = gather_info(
|
||||
spack.package.PackageBase.do_install
|
||||
spack.package.PackageInstaller._install_task = gather_info(
|
||||
spack.package.PackageInstaller._install_task
|
||||
)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
|
||||
# Restore the original method in PackageBase
|
||||
spack.package.PackageBase.do_install = InfoCollector._backup_do_install
|
||||
# Restore the original method in PackageInstaller
|
||||
spack.package.PackageInstaller._install_task = \
|
||||
InfoCollector._backup__install_task
|
||||
|
||||
for spec in self.specs:
|
||||
spec['npackages'] = len(spec['packages'])
|
||||
@@ -208,9 +211,9 @@ class collect_info(object):
|
||||
"""Collects information to build a report while installing
|
||||
and dumps it on exit.
|
||||
|
||||
If the format name is not ``None``, this context manager
|
||||
decorates PackageBase.do_install when entering the context
|
||||
and unrolls the change when exiting.
|
||||
If the format name is not ``None``, this context manager decorates
|
||||
PackageInstaller._install_task when entering the context for a
|
||||
PackageBase.do_install operation and unrolls the change when exiting.
|
||||
|
||||
Within the context, only the specs that are passed to it
|
||||
on initialization will be recorded for the report. Data from
|
||||
@@ -255,14 +258,14 @@ def concretization_report(self, msg):
|
||||
|
||||
def __enter__(self):
|
||||
if self.format_name:
|
||||
# Start the collector and patch PackageBase.do_install
|
||||
# Start the collector and patch PackageInstaller._install_task
|
||||
self.collector = InfoCollector(self.specs)
|
||||
self.collector.__enter__()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.format_name:
|
||||
# Close the collector and restore the
|
||||
# original PackageBase.do_install
|
||||
# original PackageInstaller._install_task
|
||||
self.collector.__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
report_data = {'specs': self.collector.specs}
|
||||
|
2126
lib/spack/spack/solver/blis.lp
Normal file
2126
lib/spack/spack/solver/blis.lp
Normal file
File diff suppressed because it is too large
Load Diff
9304
lib/spack/spack/solver/hdf5.lp
Normal file
9304
lib/spack/spack/solver/hdf5.lp
Normal file
File diff suppressed because it is too large
Load Diff
282
lib/spack/spack/solver/old_concretizer.lp
Normal file
282
lib/spack/spack/solver/old_concretizer.lp
Normal file
@@ -0,0 +1,282 @@
|
||||
%=============================================================================
|
||||
% Generate
|
||||
%=============================================================================
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Version semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
|
||||
% versions are declared w/priority -- declared with priority implies declared
|
||||
version_declared(P, V) :- version_declared(P, V, _).
|
||||
|
||||
% If something is a package, it has only one version and that must be a
|
||||
% possible version.
|
||||
1 { version(P, V) : version_possible(P, V) } 1 :- node(P).
|
||||
|
||||
% If a version is declared but conflicted, it's not possible.
|
||||
version_possible(P, V) :- version_declared(P, V), not version_conflict(P, V).
|
||||
|
||||
version_weight(P, V, N) :- version(P, V), version_declared(P, V, N).
|
||||
|
||||
#defined version_conflict/2.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Dependency semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
% Dependencies of any type imply that one package "depends on" another
|
||||
depends_on(P, D) :- depends_on(P, D, _).
|
||||
|
||||
% declared dependencies are real if they're not virtual
|
||||
depends_on(P, D, T) :- declared_dependency(P, D, T), not virtual(D), node(P).
|
||||
|
||||
% if you declare a dependency on a virtual, you depend on one of its providers
|
||||
1 { depends_on(P, Q, T) : provides_virtual(Q, V) } 1
|
||||
:- declared_dependency(P, V, T), virtual(V), node(P).
|
||||
|
||||
% if a virtual was required by some root spec, one provider is in the DAG
|
||||
1 { node(P) : provides_virtual(P, V) } 1 :- virtual_node(V).
|
||||
|
||||
% for any virtual, there can be at most one provider in the DAG
|
||||
provider(P, V) :- node(P), provides_virtual(P, V).
|
||||
0 { provider(P, V) : node(P) } 1 :- virtual(V).
|
||||
|
||||
% give dependents the virtuals they want
|
||||
provider_weight(D, N)
|
||||
:- virtual(V), depends_on(P, D), provider(D, V),
|
||||
pkg_provider_preference(P, V, D, N).
|
||||
provider_weight(D, N)
|
||||
:- virtual(V), depends_on(P, D), provider(D, V),
|
||||
not pkg_provider_preference(P, V, D, _),
|
||||
default_provider_preference(V, D, N).
|
||||
|
||||
% if there's no preference for something, it costs 100 to discourage its
|
||||
% use with minimization
|
||||
provider_weight(D, 100)
|
||||
:- virtual(V), depends_on(P, D), provider(D, V),
|
||||
not pkg_provider_preference(P, V, D, _),
|
||||
not default_provider_preference(V, D, _).
|
||||
|
||||
% all nodes must be reachable from some root
|
||||
needed(D) :- root(D), node(D).
|
||||
needed(D) :- root(P), depends_on(P, D).
|
||||
needed(D) :- needed(P), depends_on(P, D), node(P).
|
||||
:- node(P), not needed(P).
|
||||
|
||||
% real dependencies imply new nodes.
|
||||
node(D) :- node(P), depends_on(P, D).
|
||||
|
||||
% do not warn if generated program contains none of these.
|
||||
#defined depends_on/3.
|
||||
#defined declared_dependency/3.
|
||||
#defined virtual/1.
|
||||
#defined virtual_node/1.
|
||||
#defined provides_virtual/2.
|
||||
#defined pkg_provider_preference/4.
|
||||
#defined default_provider_preference/3.
|
||||
#defined root/1.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Variant semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
% one variant value for single-valued variants.
|
||||
1 { variant_value(P, V, X) : variant_possible_value(P, V, X) } 1
|
||||
:- node(P), variant(P, V), variant_single_value(P, V).
|
||||
|
||||
% at least one variant value for multi-valued variants.
|
||||
1 { variant_value(P, V, X) : variant_possible_value(P, V, X) }
|
||||
:- node(P), variant(P, V), not variant_single_value(P, V).
|
||||
|
||||
% if a variant is set to anything, it is considered 'set'.
|
||||
variant_set(P, V) :- variant_set(P, V, _).
|
||||
|
||||
% variant_set is an explicitly set variant value. If it's not 'set',
|
||||
% we revert to the default value. If it is set, we force the set value
|
||||
variant_value(P, V, X) :- node(P), variant(P, V), variant_set(P, V, X).
|
||||
|
||||
% prefer default values.
|
||||
variant_not_default(P, V, X, 1)
|
||||
:- variant_value(P, V, X),
|
||||
not variant_default_value(P, V, X),
|
||||
node(P).
|
||||
|
||||
variant_not_default(P, V, X, 0)
|
||||
:- variant_value(P, V, X),
|
||||
variant_default_value(P, V, X),
|
||||
node(P).
|
||||
|
||||
% suppress wranings about this atom being unset. It's only set if some
|
||||
% spec or some package sets it, and without this, clingo will give
|
||||
% warnings like 'info: atom does not occur in any rule head'.
|
||||
#defined variant/2.
|
||||
#defined variant_set/3.
|
||||
#defined variant_single_value/2.
|
||||
#defined variant_default_value/3.
|
||||
#defined variant_possible_value/3.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Platform/OS semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
% one platform, os per node
|
||||
% TODO: convert these to use optimization, like targets.
|
||||
1 { node_platform(P, A) : node_platform(P, A) } 1 :- node(P).
|
||||
1 { node_os(P, A) : node_os(P, A) } 1 :- node(P).
|
||||
|
||||
% arch fields for pkg P are set if set to anything
|
||||
node_platform_set(P) :- node_platform_set(P, _).
|
||||
node_os_set(P) :- node_os_set(P, _).
|
||||
|
||||
% if no platform/os is set, fall back to the defaults
|
||||
node_platform(P, A)
|
||||
:- node(P), not node_platform_set(P), node_platform_default(A).
|
||||
node_os(P, A) :- node(P), not node_os_set(P), node_os_default(A).
|
||||
|
||||
% setting os/platform on a node is a hard constraint
|
||||
node_platform(P, A) :- node(P), node_platform_set(P, A).
|
||||
node_os(P, A) :- node(P), node_os_set(P, A).
|
||||
|
||||
% avoid info warnings (see variants)
|
||||
#defined node_platform_set/2.
|
||||
#defined node_os_set/2.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Target semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
% one target per node -- optimization will pick the "best" one
|
||||
1 { node_target(P, T) : target(T) } 1 :- node(P).
|
||||
|
||||
% can't use targets on node if the compiler for the node doesn't support them
|
||||
:- node_target(P, T), not compiler_supports_target(C, V, T),
|
||||
node_compiler(P, C), node_compiler_version(P, C, V).
|
||||
|
||||
% if a target is set explicitly, respect it
|
||||
node_target(P, T) :- node(P), node_target_set(P, T).
|
||||
|
||||
% each node has the weight of its assigned target
|
||||
node_target_weight(P, N) :- node(P), node_target(P, T), target_weight(T, N).
|
||||
|
||||
#defined node_target_set/2.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Compiler semantics
|
||||
%-----------------------------------------------------------------------------
|
||||
|
||||
% one compiler per node
|
||||
1 { node_compiler(P, C) : compiler(C) } 1 :- node(P).
|
||||
1 { node_compiler_version(P, C, V) : compiler_version(C, V) } 1 :- node(P).
|
||||
1 { compiler_weight(P, N) : compiler_weight(P, N) } 1 :- node(P).
|
||||
|
||||
% dependencies imply we should try to match hard compiler constraints
|
||||
% todo: look at what to do about intersecting constraints here. we'd
|
||||
% ideally go with the "lowest" pref in the DAG
|
||||
node_compiler_match_pref(P, C) :- node_compiler_hard(P, C).
|
||||
node_compiler_match_pref(D, C)
|
||||
:- depends_on(P, D), node_compiler_match_pref(P, C),
|
||||
not node_compiler_hard(D, _).
|
||||
compiler_match(P, 1) :- node_compiler(P, C), node_compiler_match_pref(P, C).
|
||||
|
||||
node_compiler_version_match_pref(P, C, V)
|
||||
:- node_compiler_version_hard(P, C, V).
|
||||
node_compiler_version_match_pref(D, C, V)
|
||||
:- depends_on(P, D), node_compiler_version_match_pref(P, C, V),
|
||||
not node_compiler_version_hard(D, C, _).
|
||||
compiler_version_match(P, 1)
|
||||
:- node_compiler_version(P, C, V),
|
||||
node_compiler_version_match_pref(P, C, V).
|
||||
|
||||
#defined node_compiler_hard/2.
|
||||
#defined node_compiler_version_hard/3.
|
||||
|
||||
% compilers weighted by preference acccording to packages.yaml
|
||||
compiler_weight(P, N)
|
||||
:- node_compiler(P, C), node_compiler_version(P, C, V),
|
||||
node_compiler_preference(P, C, V, N).
|
||||
compiler_weight(P, N)
|
||||
:- node_compiler(P, C), node_compiler_version(P, C, V),
|
||||
not node_compiler_preference(P, C, _, _),
|
||||
default_compiler_preference(C, V, N).
|
||||
compiler_weight(P, 100)
|
||||
:- node_compiler(P, C), node_compiler_version(P, C, V),
|
||||
not node_compiler_preference(P, C, _, _),
|
||||
not default_compiler_preference(C, _, _).
|
||||
|
||||
#defined node_compiler_preference/4.
|
||||
#defined default_compiler_preference/3.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Compiler flags
|
||||
%-----------------------------------------------------------------------------
|
||||
% propagate flags when compilers match
|
||||
inherit_flags(P, D)
|
||||
:- depends_on(P, D), node_compiler(P, C), node_compiler(D, C),
|
||||
compiler(C), flag_type(T).
|
||||
node_flag_inherited(D, T, F) :- node_flag_set(P, T, F), inherit_flags(P, D).
|
||||
node_flag_inherited(D, T, F)
|
||||
:- node_flag_inherited(P, T, F), inherit_flags(P, D).
|
||||
|
||||
% node with flags set to anythingg is "set"
|
||||
node_flag_set(P) :- node_flag_set(P, _, _).
|
||||
|
||||
% remember where flags came from
|
||||
node_flag_source(P, P) :- node_flag_set(P).
|
||||
node_flag_source(D, Q) :- node_flag_source(P, Q), inherit_flags(P, D).
|
||||
|
||||
% compiler flags from compilers.yaml are put on nodes if compiler matches
|
||||
node_flag(P, T, F),
|
||||
node_flag_compiler_default(P)
|
||||
:- not node_flag_set(P), compiler_version_flag(C, V, T, F),
|
||||
node_compiler(P, C), node_compiler_version(P, C, V),
|
||||
flag_type(T), compiler(C), compiler_version(C, V).
|
||||
|
||||
% if a flag is set to something or inherited, it's included
|
||||
node_flag(P, T, F) :- node_flag_set(P, T, F).
|
||||
node_flag(P, T, F) :- node_flag_inherited(P, T, F).
|
||||
|
||||
% if no node flags are set for a type, there are no flags.
|
||||
no_flags(P, T) :- not node_flag(P, T, _), node(P), flag_type(T).
|
||||
|
||||
#defined compiler_version_flag/4.
|
||||
#defined node_flag/3.
|
||||
#defined node_flag_set/3.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% How to optimize the spec (high to low priority)
|
||||
%-----------------------------------------------------------------------------
|
||||
% weight root preferences higher
|
||||
%
|
||||
% TODO: how best to deal with this issue? It's not clear how best to
|
||||
% weight all the constraints. Without this root preference, `spack solve
|
||||
% hdf5` will pick mpich instead of openmpi, even if openmpi is the
|
||||
% preferred provider, because openmpi has a version constraint on hwloc.
|
||||
% It ends up choosing between settling for an old version of hwloc, or
|
||||
% picking the second-best provider. This workaround weights root
|
||||
% preferences higher so that hdf5's prefs are more important, but it's
|
||||
% not clear this is a general solution. It would be nice to weight by
|
||||
% distance to root, but that seems to slow down the solve a lot.
|
||||
%
|
||||
% One option is to make preferences hard constraints. Or maybe we need
|
||||
% to look more closely at where a constraint came from and factor that
|
||||
% into our weights. e.g., a non-default variant resulting from a version
|
||||
% constraint counts like a version constraint. Needs more thought later.
|
||||
%
|
||||
root(D, 2) :- root(D), node(D).
|
||||
root(D, 1) :- not root(D), node(D).
|
||||
|
||||
% prefer default variants
|
||||
#minimize { N*R@10,P,V,X : variant_not_default(P, V, X, N), root(P, R) }.
|
||||
|
||||
% pick most preferred virtual providers
|
||||
#minimize{ N*R@9,D : provider_weight(D, N), root(P, R) }.
|
||||
|
||||
% prefer more recent versions.
|
||||
#minimize{ N@8,P,V : version_weight(P, V, N) }.
|
||||
|
||||
% compiler preferences
|
||||
#maximize{ N@7,P : compiler_match(P, N) }.
|
||||
#minimize{ N@6,P : compiler_weight(P, N) }.
|
||||
|
||||
% fastest target for node
|
||||
|
||||
% TODO: if these are slightly different by compiler (e.g., skylake is
|
||||
% best, gcc supports skylake and broadweell, clang's best is haswell)
|
||||
% things seem to get really slow.
|
||||
#minimize{ N@5,P : node_target_weight(P, N) }.
|
@@ -369,6 +369,10 @@ def _satisfies_target(self, other_target, strict):
|
||||
if not need_to_check:
|
||||
return True
|
||||
|
||||
# self is not concrete, but other_target is there and strict=True
|
||||
if self.target is None:
|
||||
return False
|
||||
|
||||
for target_range in str(other_target).split(','):
|
||||
t_min, sep, t_max = target_range.partition(':')
|
||||
|
||||
@@ -1919,9 +1923,7 @@ def from_dict(data):
|
||||
|
||||
yaml_deps = node[name]['dependencies']
|
||||
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
|
||||
# Fill in dependencies by looking them up by name in deps dict
|
||||
deps[name]._dependencies[dname] = DependencySpec(
|
||||
deps[name], deps[dname], dtypes)
|
||||
deps[name]._add_dependency(deps[dname], dtypes)
|
||||
|
||||
return spec
|
||||
|
||||
|
@@ -154,7 +154,6 @@ def get_stage_root():
|
||||
|
||||
if _stage_root is None:
|
||||
candidates = spack.config.get('config:build_stage')
|
||||
|
||||
if isinstance(candidates, string_types):
|
||||
candidates = [candidates]
|
||||
|
||||
@@ -308,8 +307,9 @@ def __init__(
|
||||
lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
|
||||
stage_lock_path = os.path.join(get_stage_root(), '.lock')
|
||||
|
||||
tty.debug("Creating stage lock {0}".format(self.name))
|
||||
Stage.stage_locks[self.name] = spack.util.lock.Lock(
|
||||
stage_lock_path, lock_id, 1)
|
||||
stage_lock_path, lock_id, 1, desc=self.name)
|
||||
|
||||
self._lock = Stage.stage_locks[self.name]
|
||||
|
||||
|
@@ -34,7 +34,7 @@
|
||||
import spack.directory_layout
|
||||
|
||||
#: default installation root, relative to the Spack install path
|
||||
default_root = os.path.join(spack.paths.user_config_path, 'opt/spack')
|
||||
default_root = os.path.join(spack.paths.opt_path, 'spack')
|
||||
|
||||
|
||||
class Store(object):
|
||||
@@ -70,10 +70,9 @@ def reindex(self):
|
||||
|
||||
def _store():
|
||||
"""Get the singleton store instance."""
|
||||
root = spack.config.get('config:active_tree', default_root)
|
||||
|
||||
# Canonicalize Path for Root regardless of origin
|
||||
root = spack.config.get('config:install_tree', default_root)
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
|
||||
return Store(root,
|
||||
spack.config.get('config:install_path_scheme'),
|
||||
spack.config.get('config:install_hash_length'))
|
||||
@@ -89,19 +88,11 @@ def _store():
|
||||
|
||||
|
||||
def retrieve_upstream_dbs():
|
||||
other_spack_instances = spack.config.get('upstreams', {})
|
||||
|
||||
global_fallback = {'global': {'install_tree': '$spack/opt/spack',
|
||||
'modules':
|
||||
{'tcl': '$spack/share/spack/modules',
|
||||
'lmod': '$spack/share/spack/lmod',
|
||||
'dotkit': '$spack/share/spack/dotkit'}}}
|
||||
|
||||
other_spack_instances = spack.config.get('upstreams',
|
||||
global_fallback)
|
||||
install_roots = []
|
||||
for install_properties in other_spack_instances.values():
|
||||
install_roots.append(spack.util.path.canonicalize_path(
|
||||
install_properties['install_tree']))
|
||||
install_roots.append(install_properties['install_tree'])
|
||||
|
||||
return _construct_upstream_dbs_from_install_roots(install_roots)
|
||||
|
||||
|
@@ -214,3 +214,16 @@ def test_optimization_flags_with_custom_versions(
|
||||
)
|
||||
opt_flags = target.optimization_flags(compiler)
|
||||
assert opt_flags == expected_flags
|
||||
|
||||
|
||||
@pytest.mark.regression('15306')
|
||||
@pytest.mark.parametrize('architecture_tuple,constraint_tuple', [
|
||||
(('linux', 'ubuntu18.04', None), ('linux', None, 'x86_64')),
|
||||
(('linux', 'ubuntu18.04', None), ('linux', None, 'x86_64:')),
|
||||
])
|
||||
def test_satisfy_strict_constraint_when_not_concrete(
|
||||
architecture_tuple, constraint_tuple
|
||||
):
|
||||
architecture = spack.spec.ArchSpec(architecture_tuple)
|
||||
constraint = spack.spec.ArchSpec(constraint_tuple)
|
||||
assert not architecture.satisfies(constraint, strict=True)
|
||||
|
66
lib/spack/spack/test/buildtask.py
Normal file
66
lib/spack/spack/test/buildtask.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.installer as inst
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
|
||||
def test_build_task_errors(install_mockery):
|
||||
with pytest.raises(ValueError, match='must be a package'):
|
||||
inst.BuildTask('abc', False, 0, 0, 0, [])
|
||||
|
||||
pkg = spack.repo.get('trivial-install-test-package')
|
||||
with pytest.raises(ValueError, match='must have a concrete spec'):
|
||||
inst.BuildTask(pkg, False, 0, 0, 0, [])
|
||||
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
with pytest.raises(inst.InstallError, match='Cannot create a build task'):
|
||||
inst.BuildTask(spec.package, False, 0, 0, inst.STATUS_REMOVED, [])
|
||||
|
||||
|
||||
def test_build_task_basics(install_mockery):
|
||||
spec = spack.spec.Spec('dependent-install')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
# Ensure key properties match expectations
|
||||
task = inst.BuildTask(spec.package, False, 0, 0, inst.STATUS_ADDED, [])
|
||||
assert task.priority == len(task.uninstalled_deps)
|
||||
assert task.key == (task.priority, task.sequence)
|
||||
|
||||
# Ensure flagging installed works as expected
|
||||
assert len(task.uninstalled_deps) > 0
|
||||
assert task.dependencies == task.uninstalled_deps
|
||||
task.flag_installed(task.dependencies)
|
||||
assert len(task.uninstalled_deps) == 0
|
||||
assert task.priority == 0
|
||||
|
||||
|
||||
def test_build_task_strings(install_mockery):
|
||||
"""Tests of build_task repr and str for coverage purposes."""
|
||||
# Using a package with one dependency
|
||||
spec = spack.spec.Spec('dependent-install')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
# Ensure key properties match expectations
|
||||
task = inst.BuildTask(spec.package, False, 0, 0, inst.STATUS_ADDED, [])
|
||||
|
||||
# Cover __repr__
|
||||
irep = task.__repr__()
|
||||
assert irep.startswith(task.__class__.__name__)
|
||||
assert "status='queued'" in irep # == STATUS_ADDED
|
||||
assert "sequence=" in irep
|
||||
|
||||
# Cover __str__
|
||||
istr = str(task)
|
||||
assert "status=queued" in istr # == STATUS_ADDED
|
||||
assert "#dependencies=1" in istr
|
||||
assert "priority=" in istr
|
@@ -13,6 +13,7 @@
|
||||
import spack.paths as spack_paths
|
||||
import spack.spec as spec
|
||||
import spack.util.web as web_util
|
||||
import spack.util.gpg
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -41,6 +42,15 @@ def test_urlencode_string():
|
||||
assert(s_enc == 'Spack+Test+Project')
|
||||
|
||||
|
||||
def has_gpg():
|
||||
try:
|
||||
gpg = spack.util.gpg.Gpg.gpg()
|
||||
except spack.util.gpg.SpackGPGError:
|
||||
gpg = None
|
||||
return bool(gpg)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_gpg(), reason='This test requires gpg')
|
||||
def test_import_signing_key(mock_gnupghome):
|
||||
signing_key_dir = spack_paths.mock_gpg_keys_path
|
||||
signing_key_path = os.path.join(signing_key_dir, 'package-signing-key')
|
||||
|
@@ -21,6 +21,7 @@
|
||||
from spack.test.conftest import MockPackage, MockPackageMultiRepo
|
||||
import spack.util.executable as exe
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.util.gpg
|
||||
|
||||
|
||||
ci_cmd = SpackCommand('ci')
|
||||
@@ -32,6 +33,14 @@
|
||||
git = exe.which('git', required=True)
|
||||
|
||||
|
||||
def has_gpg():
|
||||
try:
|
||||
gpg = spack.util.gpg.Gpg.gpg()
|
||||
except spack.util.gpg.SpackGPGError:
|
||||
gpg = None
|
||||
return bool(gpg)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def env_deactivate():
|
||||
yield
|
||||
@@ -494,6 +503,7 @@ def test_ci_pushyaml(tmpdir):
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.skipif(not has_gpg(), reason='This test requires gpg')
|
||||
def test_push_mirror_contents(tmpdir, mutable_mock_env_path, env_deactivate,
|
||||
install_mockery, mock_packages, mock_fetch,
|
||||
mock_stage, mock_gnupghome):
|
||||
|
@@ -117,7 +117,7 @@ def test_uninstall_deprecated(mock_packages, mock_archive, mock_fetch,
|
||||
|
||||
non_deprecated = spack.store.db.query()
|
||||
|
||||
uninstall('-y', '-g', 'libelf@0.8.10')
|
||||
uninstall('-y', 'libelf@0.8.10')
|
||||
|
||||
assert spack.store.db.query() == spack.store.db.query(installed=any)
|
||||
assert spack.store.db.query() == non_deprecated
|
||||
|
@@ -169,9 +169,11 @@ def test_env_install_same_spec_twice(install_mockery, mock_fetch, capfd):
|
||||
e = ev.read('test')
|
||||
with capfd.disabled():
|
||||
with e:
|
||||
# The first installation outputs the package prefix
|
||||
install('cmake-client')
|
||||
# The second installation attempt will also update the view
|
||||
out = install('cmake-client')
|
||||
assert 'is already installed in' in out
|
||||
assert 'Updating view at' in out
|
||||
|
||||
|
||||
def test_remove_after_concretize():
|
||||
@@ -368,6 +370,54 @@ def test_init_from_yaml(tmpdir):
|
||||
assert not e2.specs_by_hash
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_env_view_external_prefix(tmpdir_factory, mutable_database,
|
||||
mock_packages):
|
||||
fake_prefix = tmpdir_factory.mktemp('a-prefix')
|
||||
fake_bin = fake_prefix.join('bin')
|
||||
fake_bin.ensure(dir=True)
|
||||
|
||||
initial_yaml = StringIO("""\
|
||||
env:
|
||||
specs:
|
||||
- a
|
||||
view: true
|
||||
""")
|
||||
|
||||
external_config = StringIO("""\
|
||||
packages:
|
||||
a:
|
||||
paths:
|
||||
a: {a_prefix}
|
||||
buildable: false
|
||||
""".format(a_prefix=str(fake_prefix)))
|
||||
external_config_dict = spack.util.spack_yaml.load_config(external_config)
|
||||
|
||||
test_scope = spack.config.InternalConfigScope(
|
||||
'env-external-test', data=external_config_dict)
|
||||
with spack.config.override(test_scope):
|
||||
|
||||
e = ev.create('test', initial_yaml)
|
||||
e.concretize()
|
||||
# Note: normally installing specs in a test environment requires doing
|
||||
# a fake install, but not for external specs since no actions are
|
||||
# taken to install them. The installation commands also include
|
||||
# post-installation functions like DB-registration, so are important
|
||||
# to do (otherwise the package is not considered installed).
|
||||
e.install_all()
|
||||
e.write()
|
||||
|
||||
env_modifications = e.add_default_view_to_shell('sh')
|
||||
individual_modifications = env_modifications.split('\n')
|
||||
|
||||
def path_includes_fake_prefix(cmd):
|
||||
return 'export PATH' in cmd and str(fake_bin) in cmd
|
||||
|
||||
assert any(
|
||||
path_includes_fake_prefix(cmd) for cmd in individual_modifications
|
||||
)
|
||||
|
||||
|
||||
def test_init_with_file_and_remove(tmpdir):
|
||||
"""Ensure a user can remove from any position in the spack.yaml file."""
|
||||
path = tmpdir.join('spack.yaml')
|
||||
|
@@ -69,6 +69,11 @@ def check_output(ni, na):
|
||||
check_output(1, 1)
|
||||
|
||||
|
||||
def test_extensions_no_arguments(mock_packages):
|
||||
out = extensions()
|
||||
assert 'python' in out
|
||||
|
||||
|
||||
def test_extensions_raises_if_not_extendable(mock_packages):
|
||||
with pytest.raises(SpackCommandError):
|
||||
extensions("flake8")
|
||||
|
@@ -52,8 +52,16 @@ def test_no_gpg_in_path(tmpdir, mock_gnupghome, monkeypatch):
|
||||
spack.util.gpg.Gpg.gpg()
|
||||
|
||||
|
||||
def has_gpg():
|
||||
try:
|
||||
gpg = spack.util.gpg.Gpg.gpg()
|
||||
except spack.util.gpg.SpackGPGError:
|
||||
gpg = None
|
||||
return bool(gpg)
|
||||
|
||||
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.skipif(not spack.util.gpg.Gpg.gpg(),
|
||||
@pytest.mark.skipif(not has_gpg(),
|
||||
reason='These tests require gnupg2')
|
||||
def test_gpg(tmpdir, mock_gnupghome):
|
||||
# Verify a file with an empty keyring.
|
||||
|
@@ -54,46 +54,6 @@ def test_install_package_and_dependency(
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
def test_global_install_package_and_dependency(
|
||||
tmpdir, mock_packages, mock_archive, mock_fetch, config,
|
||||
install_mockery):
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
install('--global',
|
||||
'--log-format=junit',
|
||||
'--log-file=test.xml',
|
||||
'libdwarf')
|
||||
|
||||
files = tmpdir.listdir()
|
||||
filename = tmpdir.join('test.xml')
|
||||
assert filename in files
|
||||
|
||||
content = filename.open().read()
|
||||
assert 'tests="2"' in content
|
||||
assert 'failures="0"' in content
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
def test_upstream_install_package_and_dependency(
|
||||
tmpdir, mock_packages, mock_archive, mock_fetch, config,
|
||||
install_mockery):
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
install('--upstream=global',
|
||||
'--log-format=junit',
|
||||
'--log-file=test.xml',
|
||||
'libdwarf')
|
||||
|
||||
files = tmpdir.listdir()
|
||||
filename = tmpdir.join('test.xml')
|
||||
assert filename in files
|
||||
|
||||
content = filename.open().read()
|
||||
assert 'tests="2"' in content
|
||||
assert 'failures="0"' in content
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_install_runtests_notests(monkeypatch, mock_packages, install_mockery):
|
||||
def check(pkg):
|
||||
@@ -179,8 +139,7 @@ def test_install_output_on_build_error(mock_packages, mock_archive, mock_fetch,
|
||||
# capfd interferes with Spack's capturing
|
||||
with capfd.disabled():
|
||||
out = install('build-error', fail_on_error=False)
|
||||
assert isinstance(install.error, spack.build_environment.ChildError)
|
||||
assert install.error.name == 'ProcessError'
|
||||
assert 'ProcessError' in out
|
||||
assert 'configure: error: in /path/to/some/file:' in out
|
||||
assert 'configure: error: cannot run C compiled programs.' in out
|
||||
|
||||
@@ -217,9 +176,10 @@ def test_show_log_on_error(mock_packages, mock_archive, mock_fetch,
|
||||
assert install.error.pkg.name == 'build-error'
|
||||
assert 'Full build log:' in out
|
||||
|
||||
# Message shows up for ProcessError (1), ChildError (1), and output (1)
|
||||
errors = [line for line in out.split('\n')
|
||||
if 'configure: error: cannot run C compiled programs' in line]
|
||||
assert len(errors) == 2
|
||||
assert len(errors) == 3
|
||||
|
||||
|
||||
def test_install_overwrite(
|
||||
@@ -413,8 +373,12 @@ def just_throw(*args, **kwargs):
|
||||
exc_type = getattr(builtins, exc_typename)
|
||||
raise exc_type(msg)
|
||||
|
||||
monkeypatch.setattr(spack.package.PackageBase, 'do_install', just_throw)
|
||||
monkeypatch.setattr(spack.installer.PackageInstaller, '_install_task',
|
||||
just_throw)
|
||||
|
||||
# TODO: Why does junit output capture appear to swallow the exception
|
||||
# TODO: as evidenced by the two failing packages getting tagged as
|
||||
# TODO: installed?
|
||||
with tmpdir.as_cwd():
|
||||
install('--log-format=junit', '--log-file=test.xml', 'libdwarf')
|
||||
|
||||
@@ -424,14 +388,14 @@ def just_throw(*args, **kwargs):
|
||||
|
||||
content = filename.open().read()
|
||||
|
||||
# Count failures and errors correctly
|
||||
assert 'tests="1"' in content
|
||||
# Count failures and errors correctly: libdwarf _and_ libelf
|
||||
assert 'tests="2"' in content
|
||||
assert 'failures="0"' in content
|
||||
assert 'errors="1"' in content
|
||||
assert 'errors="2"' in content
|
||||
|
||||
# We want to have both stdout and stderr
|
||||
assert '<system-out>' in content
|
||||
assert msg in content
|
||||
assert 'error message="{0}"'.format(msg) in content
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('noop_install', 'config')
|
||||
@@ -518,9 +482,8 @@ def test_cdash_upload_build_error(tmpdir, mock_fetch, install_mockery,
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_cdash_upload_clean_build(tmpdir, mock_fetch, install_mockery,
|
||||
capfd):
|
||||
# capfd interferes with Spack's capturing
|
||||
def test_cdash_upload_clean_build(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
# capfd interferes with Spack's capturing of e.g., Build.xml output
|
||||
with capfd.disabled():
|
||||
with tmpdir.as_cwd():
|
||||
install(
|
||||
@@ -538,7 +501,7 @@ def test_cdash_upload_clean_build(tmpdir, mock_fetch, install_mockery,
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_cdash_upload_extra_params(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
# capfd interferes with Spack's capturing
|
||||
# capfd interferes with Spack's capture of e.g., Build.xml output
|
||||
with capfd.disabled():
|
||||
with tmpdir.as_cwd():
|
||||
install(
|
||||
@@ -560,7 +523,7 @@ def test_cdash_upload_extra_params(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_cdash_buildstamp_param(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
# capfd interferes with Spack's capturing
|
||||
# capfd interferes with Spack's capture of e.g., Build.xml output
|
||||
with capfd.disabled():
|
||||
with tmpdir.as_cwd():
|
||||
cdash_track = 'some_mocked_track'
|
||||
@@ -609,7 +572,6 @@ def test_cdash_install_from_spec_yaml(tmpdir, mock_fetch, install_mockery,
|
||||
report_file = report_dir.join('a_Configure.xml')
|
||||
assert report_file in report_dir.listdir()
|
||||
content = report_file.open().read()
|
||||
import re
|
||||
install_command_regex = re.compile(
|
||||
r'<ConfigureCommand>(.+)</ConfigureCommand>',
|
||||
re.MULTILINE | re.DOTALL)
|
||||
@@ -639,6 +601,7 @@ def test_build_warning_output(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
msg = ''
|
||||
try:
|
||||
install('build-warnings')
|
||||
assert False, "no exception was raised!"
|
||||
except spack.build_environment.ChildError as e:
|
||||
msg = e.long_message
|
||||
|
||||
@@ -647,12 +610,16 @@ def test_build_warning_output(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
|
||||
|
||||
def test_cache_only_fails(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
msg = ''
|
||||
with capfd.disabled():
|
||||
try:
|
||||
install('--cache-only', 'libdwarf')
|
||||
assert False
|
||||
except spack.main.SpackCommandError:
|
||||
pass
|
||||
except spack.installer.InstallError as e:
|
||||
msg = str(e)
|
||||
|
||||
# libelf from cache failed to install, which automatically removed the
|
||||
# the libdwarf build task and flagged the package as failed to install.
|
||||
assert 'Installation of libdwarf failed' in msg
|
||||
|
||||
|
||||
def test_install_only_dependencies(tmpdir, mock_fetch, install_mockery):
|
||||
@@ -665,6 +632,30 @@ def test_install_only_dependencies(tmpdir, mock_fetch, install_mockery):
|
||||
assert not os.path.exists(root.prefix)
|
||||
|
||||
|
||||
def test_install_only_package(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
msg = ''
|
||||
with capfd.disabled():
|
||||
try:
|
||||
install('--only', 'package', 'dependent-install')
|
||||
except spack.installer.InstallError as e:
|
||||
msg = str(e)
|
||||
|
||||
assert 'Cannot proceed with dependent-install' in msg
|
||||
assert '1 uninstalled dependency' in msg
|
||||
|
||||
|
||||
def test_install_deps_then_package(tmpdir, mock_fetch, install_mockery):
|
||||
dep = Spec('dependency-install').concretized()
|
||||
root = Spec('dependent-install').concretized()
|
||||
|
||||
install('--only', 'dependencies', 'dependent-install')
|
||||
assert os.path.exists(dep.prefix)
|
||||
assert not os.path.exists(root.prefix)
|
||||
|
||||
install('--only', 'package', 'dependent-install')
|
||||
assert os.path.exists(root.prefix)
|
||||
|
||||
|
||||
@pytest.mark.regression('12002')
|
||||
def test_install_only_dependencies_in_env(tmpdir, mock_fetch, install_mockery,
|
||||
mutable_mock_env_path):
|
||||
|
@@ -3,6 +3,8 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import pytest
|
||||
|
||||
import spack
|
||||
from spack.main import SpackCommand
|
||||
|
||||
@@ -12,3 +14,17 @@
|
||||
def test_python():
|
||||
out = python('-c', 'import spack; print(spack.spack_version)')
|
||||
assert out.strip() == spack.spack_version
|
||||
|
||||
|
||||
def test_python_with_module():
|
||||
# pytest rewrites a lot of modules, which interferes with runpy, so
|
||||
# it's hard to test this. Trying to import a module like sys, that
|
||||
# has no code associated with it, raises an error reliably in python
|
||||
# 2 and 3, which indicates we successfully ran runpy.run_module.
|
||||
with pytest.raises(ImportError, match="No code object"):
|
||||
python('-m', 'sys')
|
||||
|
||||
|
||||
def test_python_raises():
|
||||
out = python('--foobar', fail_on_error=False)
|
||||
assert "Error: Unknown arguments" in out
|
||||
|
@@ -6,6 +6,7 @@
|
||||
from spack.main import SpackCommand
|
||||
|
||||
spack_test = SpackCommand('test')
|
||||
cmd_test_py = 'lib/spack/spack/test/cmd/test.py'
|
||||
|
||||
|
||||
def test_list():
|
||||
@@ -16,13 +17,13 @@ def test_list():
|
||||
|
||||
|
||||
def test_list_with_pytest_arg():
|
||||
output = spack_test('--list', 'cmd/test.py')
|
||||
assert output.strip() == "cmd/test.py"
|
||||
output = spack_test('--list', cmd_test_py)
|
||||
assert output.strip() == cmd_test_py
|
||||
|
||||
|
||||
def test_list_with_keywords():
|
||||
output = spack_test('--list', '-k', 'cmd/test.py')
|
||||
assert output.strip() == "cmd/test.py"
|
||||
assert output.strip() == cmd_test_py
|
||||
|
||||
|
||||
def test_list_long(capsys):
|
||||
@@ -44,7 +45,7 @@ def test_list_long(capsys):
|
||||
|
||||
def test_list_long_with_pytest_arg(capsys):
|
||||
with capsys.disabled():
|
||||
output = spack_test('--list-long', 'cmd/test.py')
|
||||
output = spack_test('--list-long', cmd_test_py)
|
||||
assert "test.py::\n" in output
|
||||
assert "test_list" in output
|
||||
assert "test_list_with_pytest_arg" in output
|
||||
@@ -74,7 +75,7 @@ def test_list_names():
|
||||
|
||||
|
||||
def test_list_names_with_pytest_arg():
|
||||
output = spack_test('--list-names', 'cmd/test.py')
|
||||
output = spack_test('--list-names', cmd_test_py)
|
||||
assert "test.py::test_list\n" in output
|
||||
assert "test.py::test_list_with_pytest_arg\n" in output
|
||||
assert "test.py::test_list_with_keywords\n" in output
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import pytest
|
||||
import llnl.util.tty as tty
|
||||
import spack.store
|
||||
from spack.main import SpackCommand, SpackCommandError
|
||||
|
||||
@@ -30,7 +31,7 @@ def test_multiple_matches(mutable_database):
|
||||
|
||||
@pytest.mark.db
|
||||
def test_installed_dependents(mutable_database):
|
||||
"""Test can't uninstall when ther are installed dependents."""
|
||||
"""Test can't uninstall when there are installed dependents."""
|
||||
with pytest.raises(SpackCommandError):
|
||||
uninstall('-y', 'libelf')
|
||||
|
||||
@@ -80,41 +81,6 @@ def test_force_uninstall_spec_with_ref_count_not_zero(
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.usefixtures('mutable_database')
|
||||
def test_global_recursive_uninstall():
|
||||
"""Test recursive uninstall from global upstream"""
|
||||
uninstall('-g', '-y', '-a', '--dependents', 'callpath')
|
||||
|
||||
all_specs = spack.store.layout.all_specs()
|
||||
assert len(all_specs) == 8
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
|
||||
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
|
||||
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
|
||||
|
||||
assert len(mpileaks_specs) == 0
|
||||
assert len(callpath_specs) == 0
|
||||
assert len(mpi_specs) == 3
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.usefixtures('mutable_database')
|
||||
def test_upstream_recursive_uninstall():
|
||||
"""Test recursive uninstall from specified upstream"""
|
||||
uninstall('--upstream=global', '-y', '-a', '--dependents', 'callpath')
|
||||
|
||||
all_specs = spack.store.layout.all_specs()
|
||||
assert len(all_specs) == 8
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
|
||||
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
|
||||
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
|
||||
|
||||
assert len(mpileaks_specs) == 0
|
||||
assert len(callpath_specs) == 0
|
||||
assert len(mpi_specs) == 3
|
||||
|
||||
|
||||
def test_force_uninstall_and_reinstall_by_hash(mutable_database):
|
||||
"""Test forced uninstall and reinstall of old specs."""
|
||||
# this is the spec to be removed
|
||||
@@ -137,12 +103,12 @@ def validate_callpath_spec(installed):
|
||||
specs = spack.store.db.get_by_hash(dag_hash[:7], installed=any)
|
||||
assert len(specs) == 1 and specs[0] == callpath_spec
|
||||
|
||||
# specs = spack.store.db.get_by_hash(dag_hash, installed=not installed)
|
||||
# assert specs is None
|
||||
specs = spack.store.db.get_by_hash(dag_hash, installed=not installed)
|
||||
assert specs is None
|
||||
|
||||
# specs = spack.store.db.get_by_hash(dag_hash[:7],
|
||||
# installed=not installed)
|
||||
# assert specs is None
|
||||
specs = spack.store.db.get_by_hash(dag_hash[:7],
|
||||
installed=not installed)
|
||||
assert specs is None
|
||||
|
||||
mpileaks_spec = spack.store.db.query_one('mpileaks ^mpich')
|
||||
assert callpath_spec in mpileaks_spec
|
||||
@@ -190,3 +156,16 @@ def db_specs():
|
||||
assert len(mpileaks_specs) == 3
|
||||
assert len(callpath_specs) == 3 # back to 3
|
||||
assert len(mpi_specs) == 3
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.regression('15773')
|
||||
def test_in_memory_consistency_when_uninstalling(
|
||||
mutable_database, monkeypatch
|
||||
):
|
||||
"""Test that uninstalling doesn't raise warnings"""
|
||||
def _warn(*args, **kwargs):
|
||||
raise RuntimeError('a warning was triggered!')
|
||||
monkeypatch.setattr(tty, 'warn', _warn)
|
||||
# Now try to uninstall and check this doesn't trigger warnings
|
||||
uninstall('-y', '-a')
|
||||
|
@@ -485,3 +485,28 @@ def test_fj_version_detection(version_str, expected_version):
|
||||
def test_detecting_mixed_toolchains(compiler_spec, expected_result, config):
|
||||
compiler = spack.compilers.compilers_for_spec(compiler_spec).pop()
|
||||
assert spack.compilers.is_mixed_toolchain(compiler) is expected_result
|
||||
|
||||
|
||||
@pytest.mark.regression('14798,13733')
|
||||
def test_raising_if_compiler_target_is_over_specific(config):
|
||||
# Compiler entry with an overly specific target
|
||||
compilers = [{'compiler': {
|
||||
'spec': 'gcc@9.0.1',
|
||||
'paths': {
|
||||
'cc': '/usr/bin/gcc-9',
|
||||
'cxx': '/usr/bin/g++-9',
|
||||
'f77': '/usr/bin/gfortran-9',
|
||||
'fc': '/usr/bin/gfortran-9'
|
||||
},
|
||||
'flags': {},
|
||||
'operating_system': 'ubuntu18.04',
|
||||
'target': 'haswell',
|
||||
'modules': [],
|
||||
'environment': {},
|
||||
'extra_rpaths': []
|
||||
}}]
|
||||
arch_spec = spack.spec.ArchSpec(('linux', 'ubuntu18.04', 'haswell'))
|
||||
with spack.config.override('compilers', compilers):
|
||||
cfg = spack.compilers.get_compiler_config()
|
||||
with pytest.raises(ValueError):
|
||||
spack.compilers.get_compilers(cfg, 'gcc@9.0.1', arch_spec)
|
||||
|
@@ -620,3 +620,16 @@ def test_adjusting_default_target_based_on_compiler(
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
s = Spec(spec).concretized()
|
||||
assert str(s.architecture.target) == str(expected)
|
||||
|
||||
@pytest.mark.regression('8735,14730')
|
||||
def test_compiler_version_matches_any_entry_in_compilers_yaml(self):
|
||||
# Ensure that a concrete compiler with different compiler version
|
||||
# doesn't match (here it's 4.5 vs. 4.5.0)
|
||||
with pytest.raises(spack.concretize.UnavailableCompilerVersionError):
|
||||
s = Spec('mpileaks %gcc@4.5')
|
||||
s.concretize()
|
||||
|
||||
# An abstract compiler with a version list could resolve to 4.5.0
|
||||
s = Spec('mpileaks %gcc@4.5:')
|
||||
s.concretize()
|
||||
assert str(s.compiler.version) == '4.5.0'
|
||||
|
@@ -15,15 +15,15 @@
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def concretize_scope(config, tmpdir):
|
||||
def concretize_scope(mutable_config, tmpdir):
|
||||
"""Adds a scope for concretization preferences"""
|
||||
tmpdir.ensure_dir('concretize')
|
||||
config.push_scope(
|
||||
mutable_config.push_scope(
|
||||
ConfigScope('concretize', str(tmpdir.join('concretize'))))
|
||||
|
||||
yield
|
||||
|
||||
config.pop_scope()
|
||||
mutable_config.pop_scope()
|
||||
spack.repo.path._provider_index = None
|
||||
|
||||
|
||||
@@ -84,16 +84,24 @@ def test_preferred_variants(self):
|
||||
'mpileaks', debug=True, opt=True, shared=False, static=False
|
||||
)
|
||||
|
||||
def test_preferred_compilers(self, mutable_mock_repo):
|
||||
def test_preferred_compilers(self):
|
||||
"""Test preferred compilers are applied correctly
|
||||
"""
|
||||
update_packages('mpileaks', 'compiler', ['clang@3.3'])
|
||||
spec = concretize('mpileaks')
|
||||
assert spec.compiler == spack.spec.CompilerSpec('clang@3.3')
|
||||
# Need to make sure the test uses an available compiler
|
||||
compiler_list = spack.compilers.all_compiler_specs()
|
||||
assert compiler_list
|
||||
|
||||
update_packages('mpileaks', 'compiler', ['gcc@4.5.0'])
|
||||
# Try the first available compiler
|
||||
compiler = str(compiler_list[0])
|
||||
update_packages('mpileaks', 'compiler', [compiler])
|
||||
spec = concretize('mpileaks')
|
||||
assert spec.compiler == spack.spec.CompilerSpec('gcc@4.5.0')
|
||||
assert spec.compiler == spack.spec.CompilerSpec(compiler)
|
||||
|
||||
# Try the last available compiler
|
||||
compiler = str(compiler_list[-1])
|
||||
update_packages('mpileaks', 'compiler', [compiler])
|
||||
spec = concretize('mpileaks')
|
||||
assert spec.compiler == spack.spec.CompilerSpec(compiler)
|
||||
|
||||
def test_preferred_target(self, mutable_mock_repo):
|
||||
"""Test preferred compilers are applied correctly
|
||||
|
@@ -28,6 +28,7 @@
|
||||
import spack.database
|
||||
import spack.directory_layout
|
||||
import spack.environment as ev
|
||||
import spack.package
|
||||
import spack.package_prefs
|
||||
import spack.paths
|
||||
import spack.platforms.test
|
||||
@@ -38,7 +39,6 @@
|
||||
|
||||
from spack.util.pattern import Bunch
|
||||
from spack.dependency import Dependency
|
||||
from spack.package import PackageBase
|
||||
from spack.fetch_strategy import FetchStrategyComposite, URLFetchStrategy
|
||||
from spack.fetch_strategy import FetchError
|
||||
from spack.spec import Spec
|
||||
@@ -329,8 +329,18 @@ def mock_repo_path():
|
||||
yield spack.repo.RepoPath(spack.paths.mock_packages_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_pkg_install(monkeypatch):
|
||||
def _pkg_install_fn(pkg, spec, prefix):
|
||||
# sanity_check_prefix requires something in the install directory
|
||||
mkdirp(prefix.bin)
|
||||
|
||||
monkeypatch.setattr(spack.package.PackageBase, 'install', _pkg_install_fn,
|
||||
raising=False)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mock_packages(mock_repo_path):
|
||||
def mock_packages(mock_repo_path, mock_pkg_install):
|
||||
"""Use the 'builtin.mock' repository instead of 'builtin'"""
|
||||
with use_repo(mock_repo_path):
|
||||
yield mock_repo_path
|
||||
@@ -515,6 +525,8 @@ def database(mock_store, mock_packages, config):
|
||||
"""This activates the mock store, packages, AND config."""
|
||||
with use_store(mock_store):
|
||||
yield mock_store.db
|
||||
# Force reading the database again between tests
|
||||
mock_store.db.last_seen_verifier = ''
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
@@ -599,10 +611,10 @@ def mock_fetch(mock_archive):
|
||||
def fake_fn(self):
|
||||
return fetcher
|
||||
|
||||
orig_fn = PackageBase.fetcher
|
||||
PackageBase.fetcher = fake_fn
|
||||
orig_fn = spack.package.PackageBase.fetcher
|
||||
spack.package.PackageBase.fetcher = fake_fn
|
||||
yield
|
||||
PackageBase.fetcher = orig_fn
|
||||
spack.package.PackageBase.fetcher = orig_fn
|
||||
|
||||
|
||||
class MockLayout(object):
|
||||
|
@@ -1,5 +1,5 @@
|
||||
config:
|
||||
install_tree: ~/.spack/opt/spack
|
||||
install_tree: $spack/opt/spack
|
||||
template_dirs:
|
||||
- $spack/share/spack/templates
|
||||
- $spack/lib/spack/spack/test/data/templates
|
||||
@@ -7,7 +7,7 @@ config:
|
||||
build_stage:
|
||||
- $tempdir/$user/spack-stage
|
||||
- ~/.spack/stage
|
||||
source_cache: ~/.spack/var/spack/cache
|
||||
source_cache: $spack/var/spack/cache
|
||||
misc_cache: ~/.spack/cache
|
||||
verify_ssl: true
|
||||
checksum: true
|
||||
|
@@ -1,7 +0,0 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -5,3 +5,4 @@ tcl:
|
||||
suffixes:
|
||||
'+debug': foo
|
||||
'~debug': bar
|
||||
'^mpich': foo
|
||||
|
10
lib/spack/spack/test/data/sourceme_lmod.sh
Normal file
10
lib/spack/spack/test/data/sourceme_lmod.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
export LMOD_VARIABLE=foo
|
||||
export LMOD_ANOTHER_VARIABLE=bar
|
||||
export NEW_VAR=new
|
@@ -1,7 +0,0 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -13,8 +13,14 @@
|
||||
import os
|
||||
import pytest
|
||||
import json
|
||||
import shutil
|
||||
try:
|
||||
import uuid
|
||||
_use_uuid = True
|
||||
except ImportError:
|
||||
_use_uuid = False
|
||||
pass
|
||||
|
||||
import llnl.util.lock as lk
|
||||
from llnl.util.tty.colify import colify
|
||||
|
||||
import spack.repo
|
||||
@@ -39,19 +45,6 @@ def test_store(tmpdir):
|
||||
spack.store.store = real_store
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_global_db_initializtion():
|
||||
global_store = spack.store.store
|
||||
global_db_path = '$spack/opt/spack'
|
||||
global_db_path = spack.util.path.canonicalize_path(global_db_path)
|
||||
shutil.rmtree(os.path.join(global_db_path, '.spack-db'))
|
||||
global_store = spack.store.Store(str(global_db_path))
|
||||
|
||||
yield
|
||||
|
||||
spack.store.store = global_store
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
|
||||
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
|
||||
@@ -482,6 +475,21 @@ def test_015_write_and_read(mutable_database):
|
||||
assert new_rec.installed == rec.installed
|
||||
|
||||
|
||||
def test_017_write_and_read_without_uuid(mutable_database, monkeypatch):
|
||||
monkeypatch.setattr(spack.database, '_use_uuid', False)
|
||||
# write and read DB
|
||||
with spack.store.db.write_transaction():
|
||||
specs = spack.store.db.query()
|
||||
recs = [spack.store.db.get_record(s) for s in specs]
|
||||
|
||||
for spec, rec in zip(specs, recs):
|
||||
new_rec = spack.store.db.get_record(spec)
|
||||
assert new_rec.ref_count == rec.ref_count
|
||||
assert new_rec.spec == rec.spec
|
||||
assert new_rec.path == rec.path
|
||||
assert new_rec.installed == rec.installed
|
||||
|
||||
|
||||
def test_020_db_sanity(database):
|
||||
"""Make sure query() returns what's actually in the db."""
|
||||
_check_db_sanity(database)
|
||||
@@ -716,6 +724,9 @@ def test_old_external_entries_prefix(mutable_database):
|
||||
|
||||
with open(spack.store.db._index_path, 'w') as f:
|
||||
f.write(json.dumps(db_obj))
|
||||
if _use_uuid:
|
||||
with open(spack.store.db._verifier_path, 'w') as f:
|
||||
f.write(str(uuid.uuid4()))
|
||||
|
||||
record = spack.store.db.get_record(s)
|
||||
|
||||
@@ -763,3 +774,118 @@ def test_query_spec_with_non_conditional_virtual_dependency(database):
|
||||
# dependency that are not conditional on variants
|
||||
results = spack.store.db.query_local('mpileaks ^mpich')
|
||||
assert len(results) == 1
|
||||
|
||||
|
||||
def test_failed_spec_path_error(database):
|
||||
"""Ensure spec not concrete check is covered."""
|
||||
s = spack.spec.Spec('a')
|
||||
with pytest.raises(ValueError, match='Concrete spec required'):
|
||||
spack.store.db._failed_spec_path(s)
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
|
||||
"""Add test coverage for clear_failure operation when to be retained."""
|
||||
def _is(db, spec):
|
||||
return True
|
||||
|
||||
# Pretend the spec has been failure locked
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
|
||||
|
||||
s = spack.spec.Spec('a')
|
||||
spack.store.db.clear_failure(s)
|
||||
out = capfd.readouterr()[0]
|
||||
assert 'Retaining failure marking' in out
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
|
||||
"""Add test coverage for clear_failure operation when force."""
|
||||
def _is(db, spec):
|
||||
return True
|
||||
|
||||
# Pretend the spec has been failure locked
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
|
||||
# Ensure raise OSError when try to remove the non-existent marking
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
|
||||
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
spack.store.db.clear_failure(s, force=True)
|
||||
out = capfd.readouterr()[1]
|
||||
assert 'Removing failure marking despite lock' in out
|
||||
assert 'Unable to remove failure marking' in out
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
|
||||
"""Add coverage to mark_failed."""
|
||||
def _raise_exc(lock):
|
||||
raise lk.LockTimeoutError('Mock acquire_write failure')
|
||||
|
||||
# Ensure attempt to acquire write lock on the mark raises the exception
|
||||
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
spack.store.db.mark_failed(s)
|
||||
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert 'Unable to mark a as failed' in out
|
||||
|
||||
# Clean up the failure mark to ensure it does not interfere with other
|
||||
# tests using the same spec.
|
||||
del spack.store.db._prefix_failures[s.prefix]
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
def test_prefix_failed(mutable_database, monkeypatch):
|
||||
"""Add coverage to prefix_failed operation."""
|
||||
def _is(db, spec):
|
||||
return True
|
||||
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
|
||||
# Confirm the spec is not already marked as failed
|
||||
assert not spack.store.db.prefix_failed(s)
|
||||
|
||||
# Check that a failure entry is sufficient
|
||||
spack.store.db._prefix_failures[s.prefix] = None
|
||||
assert spack.store.db.prefix_failed(s)
|
||||
|
||||
# Remove the entry and check again
|
||||
del spack.store.db._prefix_failures[s.prefix]
|
||||
assert not spack.store.db.prefix_failed(s)
|
||||
|
||||
# Now pretend that the prefix failure is locked
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
|
||||
assert spack.store.db.prefix_failed(s)
|
||||
|
||||
|
||||
def test_prefix_read_lock_error(mutable_database, monkeypatch):
|
||||
"""Cover the prefix read lock exception."""
|
||||
def _raise(db, spec):
|
||||
raise lk.LockError('Mock lock error')
|
||||
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
|
||||
# Ensure subsequent lock operations fail
|
||||
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
with spack.store.db.prefix_read_lock(s):
|
||||
assert False
|
||||
|
||||
|
||||
def test_prefix_write_lock_error(mutable_database, monkeypatch):
|
||||
"""Cover the prefix write lock exception."""
|
||||
def _raise(db, spec):
|
||||
raise lk.LockError('Mock lock error')
|
||||
|
||||
s = spack.spec.Spec('a').concretized()
|
||||
|
||||
# Ensure subsequent lock operations fail
|
||||
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
with spack.store.db.prefix_write_lock(s):
|
||||
assert False
|
||||
|
@@ -437,3 +437,14 @@ def test_from_environment_diff(before, after, search_list):
|
||||
|
||||
for item in search_list:
|
||||
assert item in mod
|
||||
|
||||
|
||||
@pytest.mark.regression('15775')
|
||||
def test_blacklist_lmod_variables():
|
||||
# Construct the list of environment modifications
|
||||
file = os.path.join(datadir, 'sourceme_lmod.sh')
|
||||
env = EnvironmentModifications.from_sourcing_file(file)
|
||||
|
||||
# Check that variables related to lmod are not in there
|
||||
modifications = env.group_by_name()
|
||||
assert not any(x.startswith('LMOD_') for x in modifications)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
import pytest
|
||||
import shutil
|
||||
|
||||
from llnl.util.filesystem import mkdirp, touch, working_dir
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
from spack.package import InstallError, PackageBase, PackageStillNeededError
|
||||
import spack.error
|
||||
@@ -15,7 +15,8 @@
|
||||
import spack.repo
|
||||
import spack.store
|
||||
from spack.spec import Spec
|
||||
from spack.package import _spack_build_envfile, _spack_build_logfile
|
||||
from spack.package import (_spack_build_envfile, _spack_build_logfile,
|
||||
_spack_configure_argsfile)
|
||||
|
||||
|
||||
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
|
||||
@@ -100,6 +101,9 @@ def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
|
||||
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
|
||||
spack.package.Package.remove_prefix = rm_prefix_checker.remove_prefix
|
||||
|
||||
# must clear failure markings for the package before re-installing it
|
||||
spack.store.db.clear_failure(spec, True)
|
||||
|
||||
pkg.succeed = True
|
||||
pkg.stage = MockStage(pkg.stage)
|
||||
|
||||
@@ -264,6 +268,9 @@ def test_partial_install_keep_prefix(install_mockery, mock_fetch):
|
||||
pkg.do_install(keep_prefix=True)
|
||||
assert os.path.exists(pkg.prefix)
|
||||
|
||||
# must clear failure markings for the package before re-installing it
|
||||
spack.store.db.clear_failure(spec, True)
|
||||
|
||||
pkg.succeed = True # make the build succeed
|
||||
pkg.stage = MockStage(pkg.stage)
|
||||
pkg.do_install(keep_prefix=True)
|
||||
@@ -300,12 +307,13 @@ def test_store(install_mockery, mock_fetch):
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_failing_build(install_mockery, mock_fetch):
|
||||
def test_failing_build(install_mockery, mock_fetch, capfd):
|
||||
spec = Spec('failing-build').concretized()
|
||||
pkg = spec.package
|
||||
|
||||
with pytest.raises(spack.build_environment.ChildError):
|
||||
pkg.do_install()
|
||||
assert 'InstallError: Expected Failure' in capfd.readouterr()[0]
|
||||
|
||||
|
||||
class MockInstallError(spack.error.SpackError):
|
||||
@@ -372,11 +380,11 @@ def test_pkg_build_paths(install_mockery):
|
||||
|
||||
# Backward compatibility checks
|
||||
log_dir = os.path.dirname(log_path)
|
||||
mkdirp(log_dir)
|
||||
with working_dir(log_dir):
|
||||
fs.mkdirp(log_dir)
|
||||
with fs.working_dir(log_dir):
|
||||
# Start with the older of the previous log filenames
|
||||
older_log = 'spack-build.out'
|
||||
touch(older_log)
|
||||
fs.touch(older_log)
|
||||
assert spec.package.log_path.endswith(older_log)
|
||||
|
||||
# Now check the newer log filename
|
||||
@@ -403,13 +411,16 @@ def test_pkg_install_paths(install_mockery):
|
||||
env_path = os.path.join(spec.prefix, '.spack', _spack_build_envfile)
|
||||
assert spec.package.install_env_path == env_path
|
||||
|
||||
args_path = os.path.join(spec.prefix, '.spack', _spack_configure_argsfile)
|
||||
assert spec.package.install_configure_args_path == args_path
|
||||
|
||||
# Backward compatibility checks
|
||||
log_dir = os.path.dirname(log_path)
|
||||
mkdirp(log_dir)
|
||||
with working_dir(log_dir):
|
||||
fs.mkdirp(log_dir)
|
||||
with fs.working_dir(log_dir):
|
||||
# Start with the older of the previous install log filenames
|
||||
older_log = 'build.out'
|
||||
touch(older_log)
|
||||
fs.touch(older_log)
|
||||
assert spec.package.install_log_path.endswith(older_log)
|
||||
|
||||
# Now check the newer install log filename
|
||||
@@ -426,29 +437,70 @@ def test_pkg_install_paths(install_mockery):
|
||||
shutil.rmtree(log_dir)
|
||||
|
||||
|
||||
def test_pkg_install_log(install_mockery):
|
||||
def test_log_install_without_build_files(install_mockery):
|
||||
"""Test the installer log function when no build files are present."""
|
||||
# Get a basic concrete spec for the trivial install package.
|
||||
spec = Spec('trivial-install-test-package').concretized()
|
||||
|
||||
# Attempt installing log without the build log file
|
||||
with pytest.raises(IOError, match="No such file or directory"):
|
||||
spec.package.log()
|
||||
spack.installer.log(spec.package)
|
||||
|
||||
# Set up mock build files and try again
|
||||
|
||||
def test_log_install_with_build_files(install_mockery, monkeypatch):
|
||||
"""Test the installer's log function when have build files."""
|
||||
config_log = 'config.log'
|
||||
|
||||
# Retain the original function for use in the monkey patch that is used
|
||||
# to raise an exception under the desired condition for test coverage.
|
||||
orig_install_fn = fs.install
|
||||
|
||||
def _install(src, dest):
|
||||
orig_install_fn(src, dest)
|
||||
if src.endswith(config_log):
|
||||
raise Exception('Mock log install error')
|
||||
|
||||
monkeypatch.setattr(fs, 'install', _install)
|
||||
|
||||
spec = Spec('trivial-install-test-package').concretized()
|
||||
|
||||
# Set up mock build files and try again to include archive failure
|
||||
log_path = spec.package.log_path
|
||||
log_dir = os.path.dirname(log_path)
|
||||
mkdirp(log_dir)
|
||||
with working_dir(log_dir):
|
||||
touch(log_path)
|
||||
touch(spec.package.env_path)
|
||||
fs.mkdirp(log_dir)
|
||||
with fs.working_dir(log_dir):
|
||||
fs.touch(log_path)
|
||||
fs.touch(spec.package.env_path)
|
||||
fs.touch(spec.package.configure_args_path)
|
||||
|
||||
install_path = os.path.dirname(spec.package.install_log_path)
|
||||
mkdirp(install_path)
|
||||
fs.mkdirp(install_path)
|
||||
|
||||
spec.package.log()
|
||||
source = spec.package.stage.source_path
|
||||
config = os.path.join(source, 'config.log')
|
||||
fs.touchp(config)
|
||||
spec.package.archive_files = ['missing', '..', config]
|
||||
|
||||
spack.installer.log(spec.package)
|
||||
|
||||
assert os.path.exists(spec.package.install_log_path)
|
||||
assert os.path.exists(spec.package.install_env_path)
|
||||
assert os.path.exists(spec.package.install_configure_args_path)
|
||||
|
||||
archive_dir = os.path.join(install_path, 'archived-files')
|
||||
source_dir = os.path.dirname(source)
|
||||
rel_config = os.path.relpath(config, source_dir)
|
||||
|
||||
assert os.path.exists(os.path.join(archive_dir, rel_config))
|
||||
assert not os.path.exists(os.path.join(archive_dir, 'missing'))
|
||||
|
||||
expected_errs = [
|
||||
'OUTSIDE SOURCE PATH', # for '..'
|
||||
'FAILED TO ARCHIVE' # for rel_config
|
||||
]
|
||||
with open(os.path.join(archive_dir, 'errors.txt'), 'r') as fd:
|
||||
for ln, expected in zip(fd, expected_errs):
|
||||
assert expected in ln
|
||||
|
||||
# Cleanup
|
||||
shutil.rmtree(log_dir)
|
||||
@@ -469,3 +521,14 @@ def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
|
||||
|
||||
with pytest.raises(ValueError, match="only patch concrete packages"):
|
||||
spec.package.do_patch()
|
||||
|
||||
|
||||
def test_install_error():
|
||||
try:
|
||||
msg = 'test install error'
|
||||
long_msg = 'this is the long version of test install error'
|
||||
raise InstallError(msg, long_msg=long_msg)
|
||||
except Exception as exc:
|
||||
assert exc.__class__.__name__ == 'InstallError'
|
||||
assert exc.message == msg
|
||||
assert exc.long_message == long_msg
|
||||
|
845
lib/spack/spack/test/installer.py
Normal file
845
lib/spack/spack/test/installer.py
Normal file
@@ -0,0 +1,845 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import py
|
||||
import pytest
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.lock as ulk
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.compilers
|
||||
import spack.directory_layout as dl
|
||||
import spack.installer as inst
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.lock as lk
|
||||
|
||||
|
||||
def _mock_repo(root, namespace):
|
||||
"""Create an empty repository at the specified root
|
||||
|
||||
Args:
|
||||
root (str): path to the mock repository root
|
||||
namespace (str): mock repo's namespace
|
||||
"""
|
||||
repodir = py.path.local(root) if isinstance(root, str) else root
|
||||
repodir.ensure(spack.repo.packages_dir_name, dir=True)
|
||||
yaml = repodir.join('repo.yaml')
|
||||
yaml.write("""
|
||||
repo:
|
||||
namespace: {0}
|
||||
""".format(namespace))
|
||||
|
||||
|
||||
def _noop(*args, **kwargs):
|
||||
"""Generic monkeypatch no-op routine."""
|
||||
pass
|
||||
|
||||
|
||||
def _none(*args, **kwargs):
|
||||
"""Generic monkeypatch function that always returns None."""
|
||||
return None
|
||||
|
||||
|
||||
def _not_locked(installer, lock_type, pkg):
|
||||
"""Generic monkeypatch function for _ensure_locked to return no lock"""
|
||||
tty.msg('{0} locked {1}' .format(lock_type, pkg.spec.name))
|
||||
return lock_type, None
|
||||
|
||||
|
||||
def _true(*args, **kwargs):
|
||||
"""Generic monkeypatch function that always returns True."""
|
||||
return True
|
||||
|
||||
|
||||
def create_build_task(pkg):
|
||||
"""
|
||||
Create a built task for the given (concretized) package
|
||||
|
||||
Args:
|
||||
pkg (PackageBase): concretized package associated with the task
|
||||
|
||||
Return:
|
||||
(BuildTask) A basic package build task
|
||||
"""
|
||||
return inst.BuildTask(pkg, False, 0, 0, inst.STATUS_ADDED, [])
|
||||
|
||||
|
||||
def create_installer(spec_name):
|
||||
"""
|
||||
Create an installer for the named spec
|
||||
|
||||
Args:
|
||||
spec_name (str): Name of the explicit install spec
|
||||
|
||||
Return:
|
||||
spec (Spec): concretized spec
|
||||
installer (PackageInstaller): the associated package installer
|
||||
"""
|
||||
spec = spack.spec.Spec(spec_name)
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
return spec, inst.PackageInstaller(spec.package)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('sec,result', [
|
||||
(86400, "24h"),
|
||||
(3600, "1h"),
|
||||
(60, "1m"),
|
||||
(1.802, "1.80s"),
|
||||
(3723.456, "1h 2m 3.46s")])
|
||||
def test_hms(sec, result):
|
||||
assert inst._hms(sec) == result
|
||||
|
||||
|
||||
def test_install_msg():
|
||||
name = 'some-package'
|
||||
pid = 123456
|
||||
expected = "{0}: Installing {1}".format(pid, name)
|
||||
assert inst.install_msg(name, pid) == expected
|
||||
|
||||
|
||||
def test_install_from_cache_errors(install_mockery, capsys):
|
||||
"""Test to ensure cover _install_from_cache errors."""
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
# Check with cache-only
|
||||
with pytest.raises(SystemExit):
|
||||
inst._install_from_cache(spec.package, True, True, False)
|
||||
|
||||
captured = str(capsys.readouterr())
|
||||
assert 'No binary' in captured
|
||||
assert 'found when cache-only specified' in captured
|
||||
assert not spec.package.installed_from_binary_cache
|
||||
|
||||
# Check when don't expect to install only from binary cache
|
||||
assert not inst._install_from_cache(spec.package, False, True, False)
|
||||
assert not spec.package.installed_from_binary_cache
|
||||
|
||||
|
||||
def test_install_from_cache_ok(install_mockery, monkeypatch):
|
||||
"""Test to ensure cover _install_from_cache to the return."""
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
monkeypatch.setattr(inst, '_try_install_from_binary_cache', _true)
|
||||
monkeypatch.setattr(spack.hooks, 'post_install', _noop)
|
||||
|
||||
assert inst._install_from_cache(spec.package, True, True, False)
|
||||
|
||||
|
||||
def test_process_external_package_module(install_mockery, monkeypatch, capfd):
|
||||
"""Test to simply cover the external module message path."""
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
# Ensure take the external module path WITHOUT any changes to the database
|
||||
monkeypatch.setattr(spack.database.Database, 'get_record', _none)
|
||||
|
||||
spec.external_path = '/actual/external/path/not/checked'
|
||||
spec.external_module = 'unchecked_module'
|
||||
inst._process_external_package(spec.package, False)
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
assert 'has external module in {0}'.format(spec.external_module) in out
|
||||
assert 'is actually installed in {0}'.format(spec.external_path) in out
|
||||
|
||||
|
||||
def test_process_binary_cache_tarball_none(install_mockery, monkeypatch,
|
||||
capfd):
|
||||
"""Tests of _process_binary_cache_tarball when no tarball."""
|
||||
monkeypatch.setattr(spack.binary_distribution, 'download_tarball', _none)
|
||||
|
||||
pkg = spack.repo.get('trivial-install-test-package')
|
||||
assert not inst._process_binary_cache_tarball(pkg, None, False, False)
|
||||
|
||||
assert 'exists in binary cache but' in capfd.readouterr()[0]
|
||||
|
||||
|
||||
def test_process_binary_cache_tarball_tar(install_mockery, monkeypatch, capfd):
|
||||
"""Tests of _process_binary_cache_tarball with a tar file."""
|
||||
def _spec(spec):
|
||||
return spec
|
||||
|
||||
# Skip binary distribution functionality since assume tested elsewhere
|
||||
monkeypatch.setattr(spack.binary_distribution, 'download_tarball', _spec)
|
||||
monkeypatch.setattr(spack.binary_distribution, 'extract_tarball', _noop)
|
||||
|
||||
# Skip database updates
|
||||
monkeypatch.setattr(spack.database.Database, 'add', _noop)
|
||||
|
||||
spec = spack.spec.Spec('a').concretized()
|
||||
assert inst._process_binary_cache_tarball(spec.package, spec, False, False)
|
||||
|
||||
assert 'Installing a from binary cache' in capfd.readouterr()[0]
|
||||
|
||||
|
||||
def test_try_install_from_binary_cache(install_mockery, mock_packages,
|
||||
monkeypatch, capsys):
|
||||
"""Tests SystemExit path for_try_install_from_binary_cache."""
|
||||
def _spec(spec, force):
|
||||
spec = spack.spec.Spec('mpi').concretized()
|
||||
return {spec: None}
|
||||
|
||||
spec = spack.spec.Spec('mpich')
|
||||
spec.concretize()
|
||||
|
||||
monkeypatch.setattr(spack.binary_distribution, 'get_spec', _spec)
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
inst._try_install_from_binary_cache(spec.package, False, False)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert 'add a spack mirror to allow download' in str(captured)
|
||||
|
||||
|
||||
def test_installer_init_errors(install_mockery):
|
||||
"""Test to ensure cover installer constructor errors."""
|
||||
with pytest.raises(ValueError, match='must be a package'):
|
||||
inst.PackageInstaller('abc')
|
||||
|
||||
pkg = spack.repo.get('trivial-install-test-package')
|
||||
with pytest.raises(ValueError, match='Can only install concrete'):
|
||||
inst.PackageInstaller(pkg)
|
||||
|
||||
|
||||
def test_installer_repr(install_mockery):
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
irep = installer.__repr__()
|
||||
assert irep.startswith(installer.__class__.__name__)
|
||||
assert "installed=" in irep
|
||||
assert "failed=" in irep
|
||||
|
||||
|
||||
def test_installer_str(install_mockery):
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
istr = str(installer)
|
||||
assert "#tasks=0" in istr
|
||||
assert "installed (0)" in istr
|
||||
assert "failed (0)" in istr
|
||||
|
||||
|
||||
def test_installer_last_phase_error(install_mockery, capsys):
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
with pytest.raises(SystemExit):
|
||||
installer = inst.PackageInstaller(spec.package)
|
||||
installer.install(stop_at='badphase')
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert 'is not an allowed phase' in str(captured)
|
||||
|
||||
|
||||
def test_installer_ensure_ready_errors(install_mockery):
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
fmt = r'cannot be installed locally.*{0}'
|
||||
# Force an external package error
|
||||
path, module = spec.external_path, spec.external_module
|
||||
spec.external_path = '/actual/external/path/not/checked'
|
||||
spec.external_module = 'unchecked_module'
|
||||
msg = fmt.format('is external')
|
||||
with pytest.raises(inst.ExternalPackageError, match=msg):
|
||||
installer._ensure_install_ready(spec.package)
|
||||
|
||||
# Force an upstream package error
|
||||
spec.external_path, spec.external_module = path, module
|
||||
spec.package._installed_upstream = True
|
||||
msg = fmt.format('is upstream')
|
||||
with pytest.raises(inst.UpstreamPackageError, match=msg):
|
||||
installer._ensure_install_ready(spec.package)
|
||||
|
||||
# Force an install lock error, which should occur naturally since
|
||||
# we are calling an internal method prior to any lock-related setup
|
||||
spec.package._installed_upstream = False
|
||||
assert len(installer.locks) == 0
|
||||
with pytest.raises(inst.InstallLockError, match=fmt.format('not locked')):
|
||||
installer._ensure_install_ready(spec.package)
|
||||
|
||||
|
||||
def test_ensure_locked_err(install_mockery, monkeypatch, tmpdir, capsys):
|
||||
"""Test _ensure_locked when a non-lock exception is raised."""
|
||||
mock_err_msg = 'Mock exception error'
|
||||
|
||||
def _raise(lock, timeout):
|
||||
raise RuntimeError(mock_err_msg)
|
||||
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
monkeypatch.setattr(ulk.Lock, 'acquire_read', _raise)
|
||||
with tmpdir.as_cwd():
|
||||
with pytest.raises(RuntimeError):
|
||||
installer._ensure_locked('read', spec.package)
|
||||
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert 'Failed to acquire a read lock' in out
|
||||
assert mock_err_msg in out
|
||||
|
||||
|
||||
def test_ensure_locked_have(install_mockery, tmpdir, capsys):
|
||||
"""Test _ensure_locked when already have lock."""
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
# Test "downgrade" of a read lock (to a read lock)
|
||||
lock = lk.Lock('./test', default_timeout=1e-9, desc='test')
|
||||
lock_type = 'read'
|
||||
tpl = (lock_type, lock)
|
||||
installer.locks[installer.pkg_id] = tpl
|
||||
assert installer._ensure_locked(lock_type, spec.package) == tpl
|
||||
|
||||
# Test "upgrade" of a read lock without read count to a write
|
||||
lock_type = 'write'
|
||||
err = 'Cannot upgrade lock'
|
||||
with pytest.raises(ulk.LockUpgradeError, match=err):
|
||||
installer._ensure_locked(lock_type, spec.package)
|
||||
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert 'Failed to upgrade to a write lock' in out
|
||||
assert 'exception when releasing read lock' in out
|
||||
|
||||
# Test "upgrade" of the read lock *with* read count to a write
|
||||
lock._reads = 1
|
||||
tpl = (lock_type, lock)
|
||||
assert installer._ensure_locked(lock_type, spec.package) == tpl
|
||||
|
||||
# Test "downgrade" of the write lock to a read lock
|
||||
lock_type = 'read'
|
||||
tpl = (lock_type, lock)
|
||||
assert installer._ensure_locked(lock_type, spec.package) == tpl
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lock_type,reads,writes', [
|
||||
('read', 1, 0),
|
||||
('write', 0, 1)])
|
||||
def test_ensure_locked_new_lock(
|
||||
install_mockery, tmpdir, lock_type, reads, writes):
|
||||
pkg_id = 'a'
|
||||
spec, installer = create_installer(pkg_id)
|
||||
with tmpdir.as_cwd():
|
||||
ltype, lock = installer._ensure_locked(lock_type, spec.package)
|
||||
assert ltype == lock_type
|
||||
assert lock is not None
|
||||
assert lock._reads == reads
|
||||
assert lock._writes == writes
|
||||
|
||||
|
||||
def test_ensure_locked_new_warn(install_mockery, monkeypatch, tmpdir, capsys):
|
||||
orig_pl = spack.database.Database.prefix_lock
|
||||
|
||||
def _pl(db, spec, timeout):
|
||||
lock = orig_pl(db, spec, timeout)
|
||||
lock.default_timeout = 1e-9 if timeout is None else None
|
||||
return lock
|
||||
|
||||
pkg_id = 'a'
|
||||
spec, installer = create_installer(pkg_id)
|
||||
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_lock', _pl)
|
||||
|
||||
lock_type = 'read'
|
||||
ltype, lock = installer._ensure_locked(lock_type, spec.package)
|
||||
assert ltype == lock_type
|
||||
assert lock is not None
|
||||
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert 'Expected prefix lock timeout' in out
|
||||
|
||||
|
||||
def test_package_id_err(install_mockery):
|
||||
pkg = spack.repo.get('trivial-install-test-package')
|
||||
with pytest.raises(ValueError, match='spec is not concretized'):
|
||||
inst.package_id(pkg)
|
||||
|
||||
|
||||
def test_package_id_ok(install_mockery):
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
pkg = spec.package
|
||||
assert pkg.name in inst.package_id(pkg)
|
||||
|
||||
|
||||
def test_fake_install(install_mockery):
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
pkg = spec.package
|
||||
inst._do_fake_install(pkg)
|
||||
assert os.path.isdir(pkg.prefix.lib)
|
||||
|
||||
|
||||
def test_packages_needed_to_bootstrap_compiler_none(install_mockery):
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
packages = inst._packages_needed_to_bootstrap_compiler(spec.package)
|
||||
assert not packages
|
||||
|
||||
|
||||
def test_packages_needed_to_bootstrap_compiler_packages(install_mockery,
|
||||
monkeypatch):
|
||||
spec = spack.spec.Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
|
||||
def _conc_spec(compiler):
|
||||
return spack.spec.Spec('a').concretized()
|
||||
|
||||
# Ensure we can get past functions that are precluding obtaining
|
||||
# packages.
|
||||
monkeypatch.setattr(spack.compilers, 'compilers_for_spec', _none)
|
||||
monkeypatch.setattr(spack.compilers, 'pkg_spec_for_compiler', _conc_spec)
|
||||
monkeypatch.setattr(spack.spec.Spec, 'concretize', _noop)
|
||||
|
||||
packages = inst._packages_needed_to_bootstrap_compiler(spec.package)
|
||||
assert packages
|
||||
|
||||
|
||||
def test_dump_packages_deps_ok(install_mockery, tmpdir, mock_repo_path):
|
||||
"""Test happy path for dump_packages with dependencies."""
|
||||
|
||||
spec_name = 'simple-inheritance'
|
||||
spec = spack.spec.Spec(spec_name).concretized()
|
||||
inst.dump_packages(spec, str(tmpdir))
|
||||
|
||||
repo = mock_repo_path.repos[0]
|
||||
dest_pkg = repo.filename_for_package_name(spec_name)
|
||||
assert os.path.isfile(dest_pkg)
|
||||
|
||||
|
||||
def test_dump_packages_deps_errs(install_mockery, tmpdir, monkeypatch, capsys):
|
||||
"""Test error paths for dump_packages with dependencies."""
|
||||
orig_bpp = spack.store.layout.build_packages_path
|
||||
orig_dirname = spack.repo.Repo.dirname_for_package_name
|
||||
repo_err_msg = "Mock dirname_for_package_name"
|
||||
|
||||
def bpp_path(spec):
|
||||
# Perform the original function
|
||||
source = orig_bpp(spec)
|
||||
# Mock the required directory structure for the repository
|
||||
_mock_repo(os.path.join(source, spec.namespace), spec.namespace)
|
||||
return source
|
||||
|
||||
def _repoerr(repo, name):
|
||||
if name == 'cmake':
|
||||
raise spack.repo.RepoError(repo_err_msg)
|
||||
else:
|
||||
return orig_dirname(repo, name)
|
||||
|
||||
# Now mock the creation of the required directory structure to cover
|
||||
# the try-except block
|
||||
monkeypatch.setattr(spack.store.layout, 'build_packages_path', bpp_path)
|
||||
|
||||
spec = spack.spec.Spec('simple-inheritance').concretized()
|
||||
path = str(tmpdir)
|
||||
|
||||
# The call to install_tree will raise the exception since not mocking
|
||||
# creation of dependency package files within *install* directories.
|
||||
with pytest.raises(IOError, match=path):
|
||||
inst.dump_packages(spec, path)
|
||||
|
||||
# Now try the error path, which requires the mock directory structure
|
||||
# above
|
||||
monkeypatch.setattr(spack.repo.Repo, 'dirname_for_package_name', _repoerr)
|
||||
with pytest.raises(spack.repo.RepoError, match=repo_err_msg):
|
||||
inst.dump_packages(spec, path)
|
||||
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert "Couldn't copy in provenance for cmake" in out
|
||||
|
||||
|
||||
def test_check_deps_status_install_failure(install_mockery, monkeypatch):
|
||||
spec, installer = create_installer('a')
|
||||
|
||||
# Make sure the package is identified as failed
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failed', _true)
|
||||
|
||||
with pytest.raises(inst.InstallError, match='install failure'):
|
||||
installer._check_deps_status()
|
||||
|
||||
|
||||
def test_check_deps_status_write_locked(install_mockery, monkeypatch):
|
||||
spec, installer = create_installer('a')
|
||||
|
||||
# Ensure the lock is not acquired
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_ensure_locked', _not_locked)
|
||||
|
||||
with pytest.raises(inst.InstallError, match='write locked by another'):
|
||||
installer._check_deps_status()
|
||||
|
||||
|
||||
def test_check_deps_status_external(install_mockery, monkeypatch):
|
||||
spec, installer = create_installer('a')
|
||||
|
||||
# Mock the known dependent, b, as external so assumed to be installed
|
||||
monkeypatch.setattr(spack.spec.Spec, 'external', True)
|
||||
installer._check_deps_status()
|
||||
assert 'b' in installer.installed
|
||||
|
||||
|
||||
def test_check_deps_status_upstream(install_mockery, monkeypatch):
|
||||
spec, installer = create_installer('a')
|
||||
|
||||
# Mock the known dependent, b, as installed upstream
|
||||
monkeypatch.setattr(spack.package.PackageBase, 'installed_upstream', True)
|
||||
installer._check_deps_status()
|
||||
assert 'b' in installer.installed
|
||||
|
||||
|
||||
def test_add_bootstrap_compilers(install_mockery, monkeypatch):
|
||||
def _pkgs(pkg):
|
||||
spec = spack.spec.Spec('mpi').concretized()
|
||||
return [(spec.package, True)]
|
||||
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
monkeypatch.setattr(inst, '_packages_needed_to_bootstrap_compiler', _pkgs)
|
||||
installer._add_bootstrap_compilers(spec.package)
|
||||
|
||||
ids = list(installer.build_tasks)
|
||||
assert len(ids) == 1
|
||||
task = installer.build_tasks[ids[0]]
|
||||
assert task.compiler
|
||||
|
||||
|
||||
def test_prepare_for_install_on_installed(install_mockery, monkeypatch):
|
||||
"""Test of _prepare_for_install's early return for installed task path."""
|
||||
spec, installer = create_installer('dependent-install')
|
||||
task = create_build_task(spec.package)
|
||||
installer.installed.add(task.pkg_id)
|
||||
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_ensure_install_ready', _noop)
|
||||
installer._prepare_for_install(task, True, True, False)
|
||||
|
||||
|
||||
def test_installer_init_queue(install_mockery):
|
||||
"""Test of installer queue functions."""
|
||||
with spack.config.override('config:install_missing_compilers', True):
|
||||
spec, installer = create_installer('dependent-install')
|
||||
installer._init_queue(True, True)
|
||||
|
||||
ids = list(installer.build_tasks)
|
||||
assert len(ids) == 2
|
||||
assert 'dependency-install' in ids
|
||||
assert 'dependent-install' in ids
|
||||
|
||||
|
||||
def test_install_task_use_cache(install_mockery, monkeypatch):
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
task = create_build_task(spec.package)
|
||||
|
||||
monkeypatch.setattr(inst, '_install_from_cache', _true)
|
||||
installer._install_task(task)
|
||||
assert spec.package.name in installer.installed
|
||||
|
||||
|
||||
def test_install_task_add_compiler(install_mockery, monkeypatch, capfd):
|
||||
config_msg = 'mock add_compilers_to_config'
|
||||
|
||||
def _add(_compilers):
|
||||
tty.msg(config_msg)
|
||||
|
||||
spec, installer = create_installer('a')
|
||||
task = create_build_task(spec.package)
|
||||
task.compiler = True
|
||||
|
||||
# Preclude any meaningful side-effects
|
||||
monkeypatch.setattr(spack.package.PackageBase, 'unit_test_check', _true)
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_setup_install_dir', _noop)
|
||||
monkeypatch.setattr(spack.build_environment, 'fork', _noop)
|
||||
monkeypatch.setattr(spack.database.Database, 'add', _noop)
|
||||
monkeypatch.setattr(spack.compilers, 'add_compilers_to_config', _add)
|
||||
|
||||
installer._install_task(task)
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
assert config_msg in out
|
||||
|
||||
|
||||
def test_release_lock_write_n_exception(install_mockery, tmpdir, capsys):
|
||||
"""Test _release_lock for supposed write lock with exception."""
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
pkg_id = 'test'
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('./test', default_timeout=1e-9, desc='test')
|
||||
installer.locks[pkg_id] = ('write', lock)
|
||||
assert lock._writes == 0
|
||||
|
||||
installer._release_lock(pkg_id)
|
||||
out = str(capsys.readouterr()[1])
|
||||
msg = 'exception when releasing write lock for {0}'.format(pkg_id)
|
||||
assert msg in out
|
||||
|
||||
|
||||
def test_requeue_task(install_mockery, capfd):
|
||||
"""Test to ensure cover _requeue_task."""
|
||||
spec, installer = create_installer('a')
|
||||
task = create_build_task(spec.package)
|
||||
|
||||
installer._requeue_task(task)
|
||||
|
||||
ids = list(installer.build_tasks)
|
||||
assert len(ids) == 1
|
||||
qtask = installer.build_tasks[ids[0]]
|
||||
assert qtask.status == inst.STATUS_INSTALLING
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
assert 'Installing a in progress by another process' in out
|
||||
|
||||
|
||||
def test_cleanup_all_tasks(install_mockery, monkeypatch):
|
||||
"""Test to ensure cover _cleanup_all_tasks."""
|
||||
def _mktask(pkg):
|
||||
return create_build_task(pkg)
|
||||
|
||||
def _rmtask(installer, pkg_id):
|
||||
raise RuntimeError('Raise an exception to test except path')
|
||||
|
||||
spec, installer = create_installer('a')
|
||||
|
||||
# Cover task removal happy path
|
||||
installer.build_tasks['a'] = _mktask(spec.package)
|
||||
installer._cleanup_all_tasks()
|
||||
assert len(installer.build_tasks) == 0
|
||||
|
||||
# Cover task removal exception path
|
||||
installer.build_tasks['a'] = _mktask(spec.package)
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_remove_task', _rmtask)
|
||||
installer._cleanup_all_tasks()
|
||||
assert len(installer.build_tasks) == 1
|
||||
|
||||
|
||||
def test_setup_install_dir_grp(install_mockery, monkeypatch, capfd):
|
||||
"""Test _setup_install_dir's group change."""
|
||||
mock_group = 'mockgroup'
|
||||
mock_chgrp_msg = 'Changing group for {0} to {1}'
|
||||
|
||||
def _get_group(spec):
|
||||
return mock_group
|
||||
|
||||
def _chgrp(path, group):
|
||||
tty.msg(mock_chgrp_msg.format(path, group))
|
||||
|
||||
monkeypatch.setattr(prefs, 'get_package_group', _get_group)
|
||||
monkeypatch.setattr(fs, 'chgrp', _chgrp)
|
||||
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
fs.touchp(spec.prefix)
|
||||
metadatadir = spack.store.layout.metadata_path(spec)
|
||||
# Should fail with a "not a directory" error
|
||||
with pytest.raises(OSError, match=metadatadir):
|
||||
installer._setup_install_dir(spec.package)
|
||||
|
||||
out = str(capfd.readouterr()[0])
|
||||
|
||||
expected_msg = mock_chgrp_msg.format(spec.prefix, mock_group)
|
||||
assert expected_msg in out
|
||||
|
||||
|
||||
def test_cleanup_failed_err(install_mockery, tmpdir, monkeypatch, capsys):
|
||||
"""Test _cleanup_failed exception path."""
|
||||
msg = 'Fake release_write exception'
|
||||
|
||||
def _raise_except(lock):
|
||||
raise RuntimeError(msg)
|
||||
|
||||
spec, installer = create_installer('trivial-install-test-package')
|
||||
|
||||
monkeypatch.setattr(lk.Lock, 'release_write', _raise_except)
|
||||
pkg_id = 'test'
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('./test', default_timeout=1e-9, desc='test')
|
||||
installer.failed[pkg_id] = lock
|
||||
|
||||
installer._cleanup_failed(pkg_id)
|
||||
out = str(capsys.readouterr()[1])
|
||||
assert 'exception when removing failure mark' in out
|
||||
assert msg in out
|
||||
|
||||
|
||||
def test_update_failed_no_dependent_task(install_mockery):
|
||||
"""Test _update_failed with missing dependent build tasks."""
|
||||
spec, installer = create_installer('dependent-install')
|
||||
|
||||
for dep in spec.traverse(root=False):
|
||||
task = create_build_task(dep.package)
|
||||
installer._update_failed(task, mark=False)
|
||||
assert installer.failed[task.pkg_id] is None
|
||||
|
||||
|
||||
def test_install_uninstalled_deps(install_mockery, monkeypatch, capsys):
|
||||
"""Test install with uninstalled dependencies."""
|
||||
spec, installer = create_installer('dependent-install')
|
||||
|
||||
# Skip the actual installation and any status updates
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _noop)
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_update_installed', _noop)
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_update_failed', _noop)
|
||||
|
||||
msg = 'Cannot proceed with dependent-install'
|
||||
with pytest.raises(spack.installer.InstallError, match=msg):
|
||||
installer.install()
|
||||
|
||||
out = str(capsys.readouterr())
|
||||
assert 'Detected uninstalled dependencies for' in out
|
||||
|
||||
|
||||
def test_install_failed(install_mockery, monkeypatch, capsys):
|
||||
"""Test install with failed install."""
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
# Make sure the package is identified as failed
|
||||
monkeypatch.setattr(spack.database.Database, 'prefix_failed', _true)
|
||||
|
||||
# Skip the actual installation though it should never get there
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _noop)
|
||||
|
||||
msg = 'Installation of b failed'
|
||||
with pytest.raises(spack.installer.InstallError, match=msg):
|
||||
installer.install()
|
||||
|
||||
out = str(capsys.readouterr())
|
||||
assert 'Warning: b failed to install' in out
|
||||
|
||||
|
||||
def test_install_lock_failures(install_mockery, monkeypatch, capfd):
|
||||
"""Cover basic install lock failure handling in a single pass."""
|
||||
def _requeued(installer, task):
|
||||
tty.msg('requeued {0}' .format(task.pkg.spec.name))
|
||||
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
# Ensure never acquire a lock
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_ensure_locked', _not_locked)
|
||||
|
||||
# Ensure don't continually requeue the task
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_requeue_task', _requeued)
|
||||
|
||||
# Skip the actual installation though should never reach it
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _noop)
|
||||
|
||||
installer.install()
|
||||
out = capfd.readouterr()[0]
|
||||
expected = ['write locked', 'read locked', 'requeued']
|
||||
for exp, ln in zip(expected, out.split('\n')):
|
||||
assert exp in ln
|
||||
|
||||
|
||||
def test_install_lock_installed_requeue(install_mockery, monkeypatch, capfd):
|
||||
"""Cover basic install handling for installed package."""
|
||||
def _install(installer, task, **kwargs):
|
||||
tty.msg('{0} installing'.format(task.pkg.spec.name))
|
||||
|
||||
def _prep(installer, task, keep_prefix, keep_stage, restage):
|
||||
installer.installed.add('b')
|
||||
tty.msg('{0} is installed' .format(task.pkg.spec.name))
|
||||
|
||||
# also do not allow the package to be locked again
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_ensure_locked',
|
||||
_not_locked)
|
||||
|
||||
def _requeued(installer, task):
|
||||
tty.msg('requeued {0}' .format(task.pkg.spec.name))
|
||||
|
||||
# Skip the actual installation though should never reach it
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _install)
|
||||
|
||||
# Flag the package as installed
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_prepare_for_install', _prep)
|
||||
|
||||
# Ensure don't continually requeue the task
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_requeue_task', _requeued)
|
||||
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
installer.install()
|
||||
assert 'b' not in installer.installed
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
expected = ['is installed', 'read locked', 'requeued']
|
||||
for exp, ln in zip(expected, out.split('\n')):
|
||||
assert exp in ln
|
||||
|
||||
|
||||
def test_install_read_locked_requeue(install_mockery, monkeypatch, capfd):
|
||||
"""Cover basic read lock handling for uninstalled package with requeue."""
|
||||
orig_fn = inst.PackageInstaller._ensure_locked
|
||||
|
||||
def _install(installer, task, **kwargs):
|
||||
tty.msg('{0} installing'.format(task.pkg.spec.name))
|
||||
|
||||
def _read(installer, lock_type, pkg):
|
||||
tty.msg('{0}->read locked {1}' .format(lock_type, pkg.spec.name))
|
||||
return orig_fn(installer, 'read', pkg)
|
||||
|
||||
def _prep(installer, task, keep_prefix, keep_stage, restage):
|
||||
tty.msg('preparing {0}' .format(task.pkg.spec.name))
|
||||
assert task.pkg.spec.name not in installer.installed
|
||||
|
||||
def _requeued(installer, task):
|
||||
tty.msg('requeued {0}' .format(task.pkg.spec.name))
|
||||
|
||||
# Force a read lock
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_ensure_locked', _read)
|
||||
|
||||
# Skip the actual installation though should never reach it
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _install)
|
||||
|
||||
# Flag the package as installed
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_prepare_for_install', _prep)
|
||||
|
||||
# Ensure don't continually requeue the task
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_requeue_task', _requeued)
|
||||
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
installer.install()
|
||||
assert 'b' not in installer.installed
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
expected = ['write->read locked', 'preparing', 'requeued']
|
||||
for exp, ln in zip(expected, out.split('\n')):
|
||||
assert exp in ln
|
||||
|
||||
|
||||
def test_install_dir_exists(install_mockery, monkeypatch, capfd):
|
||||
"""Cover capture of install directory exists error."""
|
||||
err = 'Mock directory exists error'
|
||||
|
||||
def _install(installer, task, **kwargs):
|
||||
raise dl.InstallDirectoryAlreadyExistsError(err)
|
||||
|
||||
# Skip the actual installation though should never reach it
|
||||
monkeypatch.setattr(inst.PackageInstaller, '_install_task', _install)
|
||||
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
with pytest.raises(dl.InstallDirectoryAlreadyExistsError, match=err):
|
||||
installer.install()
|
||||
|
||||
assert 'b' in installer.installed
|
||||
|
||||
|
||||
def test_install_skip_patch(install_mockery, mock_fetch):
|
||||
"""Test the path skip_patch install path."""
|
||||
spec, installer = create_installer('b')
|
||||
|
||||
installer.install(fake=False, skip_patch=True)
|
||||
|
||||
assert 'b' in installer.installed
|
@@ -130,3 +130,10 @@ def test_load_modules_from_file(module_path):
|
||||
foo = llnl.util.lang.load_module_from_file('foo', module_path)
|
||||
assert foo.value == 1
|
||||
assert foo.path == os.path.join('/usr', 'bin')
|
||||
|
||||
|
||||
def test_uniq():
|
||||
assert [1, 2, 3] == llnl.util.lang.uniq([1, 2, 3])
|
||||
assert [1, 2, 3] == llnl.util.lang.uniq([1, 1, 1, 1, 2, 2, 2, 3, 3])
|
||||
assert [1, 2, 1] == llnl.util.lang.uniq([1, 1, 1, 1, 2, 2, 2, 1, 1])
|
||||
assert [] == llnl.util.lang.uniq([])
|
||||
|
@@ -1240,3 +1240,57 @@ def test_lock_in_current_directory(tmpdir):
|
||||
pass
|
||||
with lk.WriteTransaction(lock):
|
||||
pass
|
||||
|
||||
|
||||
def test_attempts_str():
|
||||
assert lk._attempts_str(0, 0) == ''
|
||||
assert lk._attempts_str(0.12, 1) == ''
|
||||
assert lk._attempts_str(12.345, 2) == ' after 12.35s and 2 attempts'
|
||||
|
||||
|
||||
def test_lock_str():
|
||||
lock = lk.Lock('lockfile')
|
||||
lockstr = str(lock)
|
||||
assert 'lockfile[0:0]' in lockstr
|
||||
assert 'timeout=None' in lockstr
|
||||
assert '#reads=0, #writes=0' in lockstr
|
||||
|
||||
|
||||
def test_downgrade_write_okay(tmpdir):
|
||||
"""Test the lock write-to-read downgrade operation."""
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('lockfile')
|
||||
lock.acquire_write()
|
||||
lock.downgrade_write_to_read()
|
||||
assert lock._reads == 1
|
||||
assert lock._writes == 0
|
||||
|
||||
|
||||
def test_downgrade_write_fails(tmpdir):
|
||||
"""Test failing the lock write-to-read downgrade operation."""
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('lockfile')
|
||||
lock.acquire_read()
|
||||
msg = 'Cannot downgrade lock from write to read on file: lockfile'
|
||||
with pytest.raises(lk.LockDowngradeError, match=msg):
|
||||
lock.downgrade_write_to_read()
|
||||
|
||||
|
||||
def test_upgrade_read_okay(tmpdir):
|
||||
"""Test the lock read-to-write upgrade operation."""
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('lockfile')
|
||||
lock.acquire_read()
|
||||
lock.upgrade_read_to_write()
|
||||
assert lock._reads == 0
|
||||
assert lock._writes == 1
|
||||
|
||||
|
||||
def test_upgrade_read_fails(tmpdir):
|
||||
"""Test failing the lock read-to-write upgrade operation."""
|
||||
with tmpdir.as_cwd():
|
||||
lock = lk.Lock('lockfile')
|
||||
lock.acquire_write()
|
||||
msg = 'Cannot upgrade lock from read to write on file: lockfile'
|
||||
with pytest.raises(lk.LockUpgradeError, match=msg):
|
||||
lock.upgrade_read_to_write()
|
||||
|
@@ -1,81 +0,0 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from __future__ import print_function
|
||||
import pytest
|
||||
|
||||
from llnl.util.tty.log import log_output
|
||||
from spack.util.executable import which
|
||||
|
||||
|
||||
def test_log_python_output_with_python_stream(capsys, tmpdir):
|
||||
# pytest's DontReadFromInput object does not like what we do here, so
|
||||
# disable capsys or things hang.
|
||||
with tmpdir.as_cwd():
|
||||
with capsys.disabled():
|
||||
with log_output('foo.txt'):
|
||||
print('logged')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
||||
|
||||
assert capsys.readouterr() == ('', '')
|
||||
|
||||
|
||||
def test_log_python_output_with_fd_stream(capfd, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt'):
|
||||
print('logged')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
||||
|
||||
assert capfd.readouterr() == ('', '')
|
||||
|
||||
|
||||
def test_log_python_output_and_echo_output(capfd, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt') as logger:
|
||||
with logger.force_echo():
|
||||
print('echo')
|
||||
print('logged')
|
||||
|
||||
assert capfd.readouterr() == ('echo\n', '')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'echo\nlogged\n'
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which('echo'), reason="needs echo command")
|
||||
def test_log_subproc_output(capsys, tmpdir):
|
||||
echo = which('echo')
|
||||
|
||||
# pytest seems to interfere here, so we need to use capsys.disabled()
|
||||
# TODO: figure out why this is and whether it means we're doing
|
||||
# sometihng wrong with OUR redirects. Seems like it should work even
|
||||
# with capsys enabled.
|
||||
with tmpdir.as_cwd():
|
||||
with capsys.disabled():
|
||||
with log_output('foo.txt'):
|
||||
echo('logged')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which('echo'), reason="needs echo command")
|
||||
def test_log_subproc_and_echo_output(capfd, tmpdir):
|
||||
echo = which('echo')
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt') as logger:
|
||||
with logger.force_echo():
|
||||
echo('echo')
|
||||
print('logged')
|
||||
|
||||
assert capfd.readouterr() == ('echo\n', '')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
442
lib/spack/spack/test/llnl/util/tty/log.py
Normal file
442
lib/spack/spack/test/llnl/util/tty/log.py
Normal file
@@ -0,0 +1,442 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from __future__ import print_function
|
||||
import contextlib
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
termios = None
|
||||
|
||||
import pytest
|
||||
|
||||
import llnl.util.tty.log
|
||||
from llnl.util.lang import uniq
|
||||
from llnl.util.tty.log import log_output
|
||||
from llnl.util.tty.pty import PseudoShell
|
||||
|
||||
from spack.util.executable import which
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def nullcontext():
|
||||
yield
|
||||
|
||||
|
||||
def test_log_python_output_with_echo(capfd, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt', echo=True):
|
||||
print('logged')
|
||||
|
||||
# foo.txt has output
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
||||
|
||||
# output is also echoed.
|
||||
assert capfd.readouterr()[0] == 'logged\n'
|
||||
|
||||
|
||||
def test_log_python_output_without_echo(capfd, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt'):
|
||||
print('logged')
|
||||
|
||||
# foo.txt has output
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'logged\n'
|
||||
|
||||
# nothing on stdout or stderr
|
||||
assert capfd.readouterr()[0] == ''
|
||||
|
||||
|
||||
def test_log_python_output_and_echo_output(capfd, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
# echo two lines
|
||||
with log_output('foo.txt') as logger:
|
||||
with logger.force_echo():
|
||||
print('force echo')
|
||||
print('logged')
|
||||
|
||||
# log file contains everything
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'force echo\nlogged\n'
|
||||
|
||||
# only force-echo'd stuff is in output
|
||||
assert capfd.readouterr()[0] == 'force echo\n'
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which('echo'), reason="needs echo command")
|
||||
def test_log_subproc_and_echo_output_no_capfd(capfd, tmpdir):
|
||||
echo = which('echo')
|
||||
|
||||
# this is split into two tests because capfd interferes with the
|
||||
# output logged to file when using a subprocess. We test the file
|
||||
# here, and echoing in test_log_subproc_and_echo_output_capfd below.
|
||||
with capfd.disabled():
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt') as logger:
|
||||
with logger.force_echo():
|
||||
echo('echo')
|
||||
print('logged')
|
||||
|
||||
with open('foo.txt') as f:
|
||||
assert f.read() == 'echo\nlogged\n'
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which('echo'), reason="needs echo command")
|
||||
def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
|
||||
echo = which('echo')
|
||||
|
||||
# This tests *only* what is echoed when using a subprocess, as capfd
|
||||
# interferes with the logged data. See
|
||||
# test_log_subproc_and_echo_output_no_capfd for tests on the logfile.
|
||||
with tmpdir.as_cwd():
|
||||
with log_output('foo.txt') as logger:
|
||||
with logger.force_echo():
|
||||
echo('echo')
|
||||
print('logged')
|
||||
|
||||
assert capfd.readouterr()[0] == "echo\n"
|
||||
|
||||
|
||||
#
|
||||
# Tests below use a pseudoterminal to test llnl.util.tty.log
|
||||
#
|
||||
def simple_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input."""
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
|
||||
log_path = kwargs["log_path"]
|
||||
running = [True]
|
||||
with log_output(log_path):
|
||||
while running[0]:
|
||||
print("line")
|
||||
time.sleep(1e-3)
|
||||
|
||||
|
||||
def mock_shell_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_termios():
|
||||
saved = llnl.util.tty.log.termios
|
||||
llnl.util.tty.log.termios = None
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
llnl.util.tty.log.termios = saved
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which("ps"), reason="requires ps utility")
|
||||
@pytest.mark.skipif(not termios, reason="requires termios support")
|
||||
@pytest.mark.parametrize('test_fn,termios_on_or_off', [
|
||||
# tests with termios
|
||||
(mock_shell_fg, nullcontext),
|
||||
(mock_shell_bg, nullcontext),
|
||||
(mock_shell_bg_fg, nullcontext),
|
||||
(mock_shell_fg_bg, nullcontext),
|
||||
(mock_shell_tstp_cont, nullcontext),
|
||||
(mock_shell_tstp_tstp_cont, nullcontext),
|
||||
(mock_shell_tstp_tstp_cont_cont, nullcontext),
|
||||
# tests without termios
|
||||
(mock_shell_fg_no_termios, no_termios),
|
||||
(mock_shell_bg, no_termios),
|
||||
(mock_shell_bg_fg_no_termios, no_termios),
|
||||
(mock_shell_fg_bg_no_termios, no_termios),
|
||||
(mock_shell_tstp_cont, no_termios),
|
||||
(mock_shell_tstp_tstp_cont, no_termios),
|
||||
(mock_shell_tstp_tstp_cont_cont, no_termios),
|
||||
])
|
||||
def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
|
||||
"""Functional tests for foregrounding and backgrounding a logged process.
|
||||
|
||||
This ensures that things like SIGTTOU are not raised and that
|
||||
terminal settings are corrected on foreground/background and on
|
||||
process stop and start.
|
||||
|
||||
"""
|
||||
shell = PseudoShell(test_fn, simple_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# run the shell test
|
||||
with termios_on_or_off():
|
||||
shell.start(log_path=log_path, debug=True)
|
||||
exitcode = shell.join()
|
||||
|
||||
# processes completed successfully
|
||||
assert exitcode == 0
|
||||
|
||||
# assert log was created
|
||||
assert os.path.exists(log_path)
|
||||
|
||||
|
||||
def synchronized_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input.
|
||||
|
||||
This logger synchronizes with the parent process to test that 'v' can
|
||||
toggle output. It is used in ``test_foreground_background_output`` below.
|
||||
|
||||
"""
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
|
||||
log_path = kwargs["log_path"]
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
running = [True]
|
||||
sys.stderr.write(os.getcwd() + "\n")
|
||||
with log_output(log_path) as logger:
|
||||
with logger.force_echo():
|
||||
print("forced output")
|
||||
|
||||
while running[0]:
|
||||
with write_lock:
|
||||
if v_lock.acquire(False): # non-blocking acquire
|
||||
print("off")
|
||||
v_lock.release()
|
||||
else:
|
||||
print("on") # lock held; v is toggled on
|
||||
time.sleep(1e-2)
|
||||
|
||||
|
||||
def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
ctl.fg()
|
||||
ctl.wait_enabled()
|
||||
time.sleep(.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
v_lock.acquire() # enable v lock
|
||||
ctl.write(b'v') # toggle v on stdin
|
||||
time.sleep(.1)
|
||||
write_lock.release() # resume writing
|
||||
|
||||
time.sleep(.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
ctl.write(b'v') # toggle v on stdin
|
||||
time.sleep(.1)
|
||||
v_lock.release() # disable v lock
|
||||
write_lock.release() # resume writing
|
||||
time.sleep(.1)
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
ctl.fg()
|
||||
ctl.wait_disabled_fg()
|
||||
time.sleep(.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
v_lock.acquire() # enable v lock
|
||||
ctl.write(b'v\n') # toggle v on stdin
|
||||
time.sleep(.1)
|
||||
write_lock.release() # resume writing
|
||||
|
||||
time.sleep(.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
ctl.write(b'v\n') # toggle v on stdin
|
||||
time.sleep(.1)
|
||||
v_lock.release() # disable v lock
|
||||
write_lock.release() # resume writing
|
||||
time.sleep(.1)
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which("ps"), reason="requires ps utility")
|
||||
@pytest.mark.skipif(not termios, reason="requires termios support")
|
||||
@pytest.mark.parametrize('test_fn,termios_on_or_off', [
|
||||
(mock_shell_v_v, nullcontext),
|
||||
(mock_shell_v_v_no_termios, no_termios),
|
||||
])
|
||||
def test_foreground_background_output(
|
||||
test_fn, capfd, termios_on_or_off, tmpdir):
|
||||
"""Tests hitting 'v' toggles output, and that force_echo works."""
|
||||
shell = PseudoShell(test_fn, synchronized_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# Locks for synchronizing with child
|
||||
write_lock = multiprocessing.Lock() # must be held by child to write
|
||||
v_lock = multiprocessing.Lock() # held while master is in v mode
|
||||
|
||||
with termios_on_or_off():
|
||||
shell.start(
|
||||
write_lock=write_lock,
|
||||
v_lock=v_lock,
|
||||
debug=True,
|
||||
log_path=log_path
|
||||
)
|
||||
|
||||
exitcode = shell.join()
|
||||
out, err = capfd.readouterr()
|
||||
print(err) # will be shown if something goes wrong
|
||||
print(out)
|
||||
|
||||
# processes completed successfully
|
||||
assert exitcode == 0
|
||||
|
||||
# split output into lines
|
||||
output = out.strip().split("\n")
|
||||
|
||||
# also get lines of log file
|
||||
assert os.path.exists(log_path)
|
||||
with open(log_path) as log:
|
||||
log = log.read().strip().split("\n")
|
||||
|
||||
# Master and child process coordinate with locks such that the child
|
||||
# writes "off" when echo is off, and "on" when echo is on. The
|
||||
# output should contain mostly "on" lines, but may contain an "off"
|
||||
# or two. This is because the master toggles echo by sending "v" on
|
||||
# stdin to the child, but this is not synchronized with our locks.
|
||||
# It's good enough for a test, though. We allow at most 2 "off"'s in
|
||||
# the output to account for the race.
|
||||
assert (
|
||||
['forced output', 'on'] == uniq(output) or
|
||||
output.count("off") <= 2 # if master_fd is a bit slow
|
||||
)
|
||||
|
||||
# log should be off for a while, then on, then off
|
||||
assert (
|
||||
['forced output', 'off', 'on', 'off'] == uniq(log) and
|
||||
log.count("off") > 2 # ensure some "off" lines were omitted
|
||||
)
|
@@ -11,6 +11,7 @@
|
||||
import spack.spec
|
||||
import spack.modules.tcl
|
||||
from spack.modules.common import UpstreamModuleIndex
|
||||
from spack.spec import Spec
|
||||
|
||||
import spack.error
|
||||
|
||||
@@ -183,3 +184,33 @@ def test_get_module_upstream():
|
||||
assert m1_path == '/path/to/a'
|
||||
finally:
|
||||
spack.modules.common.upstream_module_index = old_index
|
||||
|
||||
|
||||
def test_load_installed_package_not_in_repo(install_mockery, mock_fetch,
|
||||
monkeypatch):
|
||||
# Get a basic concrete spec for the trivial install package.
|
||||
spec = Spec('trivial-install-test-package')
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
|
||||
# Get the package
|
||||
pkg = spec.package
|
||||
|
||||
def find_nothing(*args):
|
||||
raise spack.repo.UnknownPackageError(
|
||||
'Repo package access is disabled for test')
|
||||
|
||||
try:
|
||||
pkg.do_install()
|
||||
|
||||
spec._package = None
|
||||
monkeypatch.setattr(spack.repo, 'get', find_nothing)
|
||||
with pytest.raises(spack.repo.UnknownPackageError):
|
||||
spec.package
|
||||
|
||||
module_path = spack.modules.common.get_module('tcl', spec, True)
|
||||
assert module_path
|
||||
pkg.do_uninstall()
|
||||
except Exception:
|
||||
pkg.remove_prefix()
|
||||
raise
|
||||
|
@@ -215,9 +215,10 @@ def test_suffixes(self, module_configuration, factory):
|
||||
|
||||
writer, spec = factory('mpileaks+debug arch=x86-linux')
|
||||
assert 'foo' in writer.layout.use_name
|
||||
assert 'foo-foo' not in writer.layout.use_name
|
||||
|
||||
writer, spec = factory('mpileaks~debug arch=x86-linux')
|
||||
assert 'bar' in writer.layout.use_name
|
||||
assert 'bar-foo' in writer.layout.use_name
|
||||
|
||||
def test_setup_environment(self, modulefile_content, module_configuration):
|
||||
"""Tests the internal set-up of run-time environment."""
|
||||
|
@@ -98,6 +98,10 @@ def test_all_same_but_archive_hash(self):
|
||||
assert spec1.package.content_hash(content=content1) != \
|
||||
spec2.package.content_hash(content=content2)
|
||||
|
||||
def test_parse_dynamic_function_call(self):
|
||||
spec = Spec("hash-test4").concretized()
|
||||
spec.package.content_hash()
|
||||
|
||||
# Below tests target direct imports of spack packages from the
|
||||
# spack.pkg namespace
|
||||
def test_import_package(self):
|
||||
|
@@ -31,6 +31,14 @@
|
||||
from spack.relocate import modify_macho_object, macho_get_paths
|
||||
|
||||
|
||||
def has_gpg():
|
||||
try:
|
||||
gpg = spack.util.gpg.Gpg.gpg()
|
||||
except spack.util.gpg.SpackGPGError:
|
||||
gpg = None
|
||||
return bool(gpg)
|
||||
|
||||
|
||||
def fake_fetchify(url, pkg):
|
||||
"""Fake the URL for a package so it downloads from a file."""
|
||||
fetcher = FetchStrategyComposite()
|
||||
@@ -38,6 +46,7 @@ def fake_fetchify(url, pkg):
|
||||
pkg.fetcher = fetcher
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_gpg(), reason='This test requires gpg')
|
||||
@pytest.mark.usefixtures('install_mockery', 'mock_gnupghome')
|
||||
def test_buildcache(mock_archive, tmpdir):
|
||||
# tweak patchelf to only do a download
|
||||
@@ -107,11 +116,6 @@ def test_buildcache(mock_archive, tmpdir):
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
files = os.listdir(spec.prefix)
|
||||
assert 'link_to_dummy.txt' in files
|
||||
assert 'dummy.txt' in files
|
||||
assert os.path.realpath(
|
||||
os.path.join(spec.prefix, 'link_to_dummy.txt')
|
||||
) == os.path.realpath(os.path.join(spec.prefix, 'dummy.txt'))
|
||||
|
||||
# create build cache with relative path and signing
|
||||
args = parser.parse_args(
|
||||
@@ -129,13 +133,6 @@ def test_buildcache(mock_archive, tmpdir):
|
||||
args = parser.parse_args(['install', '-f', str(pkghash)])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
files = os.listdir(spec.prefix)
|
||||
assert 'link_to_dummy.txt' in files
|
||||
assert 'dummy.txt' in files
|
||||
assert os.path.realpath(
|
||||
os.path.join(spec.prefix, 'link_to_dummy.txt')
|
||||
) == os.path.realpath(os.path.join(spec.prefix, 'dummy.txt'))
|
||||
|
||||
else:
|
||||
# create build cache without signing
|
||||
args = parser.parse_args(
|
||||
@@ -152,10 +149,6 @@ def test_buildcache(mock_archive, tmpdir):
|
||||
files = os.listdir(spec.prefix)
|
||||
assert 'link_to_dummy.txt' in files
|
||||
assert 'dummy.txt' in files
|
||||
assert os.path.realpath(
|
||||
os.path.join(spec.prefix, 'link_to_dummy.txt')
|
||||
) == os.path.realpath(os.path.join(spec.prefix, 'dummy.txt'))
|
||||
|
||||
# test overwrite install without verification
|
||||
args = parser.parse_args(['install', '-f', '-u', str(pkghash)])
|
||||
buildcache.buildcache(parser, args)
|
||||
@@ -242,7 +235,7 @@ def test_relocate_links(tmpdir):
|
||||
old_src = os.path.join(old_dir, filename)
|
||||
os.symlink(old_src, filename)
|
||||
filenames = [filename]
|
||||
new_dir = '/opt/rh/devtoolset/'
|
||||
new_dir = '/opt/rh/devtoolset'
|
||||
relocate_links(filenames, old_dir, new_dir)
|
||||
assert os.path.realpath(filename) == os.path.join(new_dir, filename)
|
||||
|
||||
|
@@ -16,7 +16,7 @@
|
||||
from spack.parse import Token
|
||||
from spack.spec import Spec
|
||||
from spack.spec import SpecParseError, RedundantSpecError
|
||||
from spack.spec import AmbiguousHashError, InvalidHashError
|
||||
from spack.spec import AmbiguousHashError, InvalidHashError, NoSuchHashError
|
||||
from spack.spec import DuplicateArchitectureError
|
||||
from spack.spec import DuplicateDependencyError, DuplicateCompilerSpecError
|
||||
from spack.spec import SpecFilenameError, NoSuchSpecFileError
|
||||
@@ -363,9 +363,9 @@ def test_nonexistent_hash(self, database):
|
||||
hashes = [s._hash for s in specs]
|
||||
assert no_such_hash not in [h[:len(no_such_hash)] for h in hashes]
|
||||
|
||||
# self._check_raises(NoSuchHashError, [
|
||||
# '/' + no_such_hash,
|
||||
# 'mpileaks /' + no_such_hash])
|
||||
self._check_raises(NoSuchHashError, [
|
||||
'/' + no_such_hash,
|
||||
'mpileaks /' + no_such_hash])
|
||||
|
||||
@pytest.mark.db
|
||||
def test_redundant_spec(self, database):
|
||||
|
@@ -32,7 +32,7 @@ def pkg_factory():
|
||||
def factory(url, urls):
|
||||
|
||||
def fn(v):
|
||||
main_url = url or urls.pop(0)
|
||||
main_url = url or urls[0]
|
||||
return spack.url.substitute_version(main_url, v)
|
||||
|
||||
return Pkg(
|
||||
|
@@ -65,7 +65,7 @@ def environment_modifications_for_spec(spec, view=None):
|
||||
This list is specific to the location of the spec or its projection in
|
||||
the view."""
|
||||
spec = spec.copy()
|
||||
if view:
|
||||
if view and not spec.external:
|
||||
spec.prefix = prefix.Prefix(view.view().get_projection_for_spec(spec))
|
||||
|
||||
# generic environment modifications determined by inspecting the spec
|
||||
|
@@ -597,12 +597,15 @@ def from_sourcing_file(filename, *arguments, **kwargs):
|
||||
'SHLVL', '_', 'PWD', 'OLDPWD', 'PS1', 'PS2', 'ENV',
|
||||
# Environment modules v4
|
||||
'LOADEDMODULES', '_LMFILES_', 'BASH_FUNC_module()', 'MODULEPATH',
|
||||
'MODULES_(.*)', r'(\w*)_mod(quar|share)'
|
||||
'MODULES_(.*)', r'(\w*)_mod(quar|share)',
|
||||
# Lmod configuration
|
||||
r'LMOD_(.*)', 'MODULERCFILE'
|
||||
])
|
||||
|
||||
# Compute the environments before and after sourcing
|
||||
before = sanitize(
|
||||
dict(os.environ), blacklist=blacklist, whitelist=whitelist
|
||||
environment_after_sourcing_files(os.devnull, **kwargs),
|
||||
blacklist=blacklist, whitelist=whitelist
|
||||
)
|
||||
file_and_args = (filename,) + arguments
|
||||
after = sanitize(
|
||||
|
@@ -41,8 +41,23 @@ def __init__(self, spec):
|
||||
self.spec = spec
|
||||
|
||||
def is_directive(self, node):
|
||||
"""Check to determine if the node is a valid directive
|
||||
|
||||
Directives are assumed to be represented in the AST as a named function
|
||||
call expression. This means that they will NOT be represented by a
|
||||
named function call within a function call expression (e.g., as
|
||||
callbacks are sometimes represented).
|
||||
|
||||
Args:
|
||||
node (AST): the AST node being checked
|
||||
|
||||
Returns:
|
||||
(bool): ``True`` if the node represents a known directive,
|
||||
``False`` otherwise
|
||||
"""
|
||||
return (isinstance(node, ast.Expr) and
|
||||
node.value and isinstance(node.value, ast.Call) and
|
||||
isinstance(node.value.func, ast.Name) and
|
||||
node.value.func.id in spack.directives.__all__)
|
||||
|
||||
def is_spack_attr(self, node):
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
addopts = --durations=20 -ra
|
||||
testpaths = .
|
||||
testpaths = lib/spack/spack/test
|
||||
python_files = *.py
|
||||
markers =
|
||||
db: tests that require creating a DB
|
@@ -226,7 +226,7 @@ _config_sections() {
|
||||
_extensions() {
|
||||
if [[ -z "${SPACK_EXTENSIONS:-}" ]]
|
||||
then
|
||||
SPACK_EXTENSIONS="aspell go-bootstrap go icedtea jdk kim-api lua matlab mofem-cephas octave openjdk perl python r ruby rust tcl yorick"
|
||||
SPACK_EXTENSIONS="$(spack extensions)"
|
||||
fi
|
||||
SPACK_COMPREPLY="$SPACK_EXTENSIONS"
|
||||
}
|
||||
|
@@ -66,7 +66,7 @@ case cd:
|
||||
[ $#_sp_args -gt 0 ] && set _sp_arg = ($_sp_args[1])
|
||||
shift _sp_args
|
||||
|
||||
if ( "$_sp_arg" == "-h" ) then
|
||||
if ( "$_sp_arg" == "-h" || "$_sp_args" == "--help" ) then
|
||||
\spack cd -h
|
||||
else
|
||||
cd `\spack location $_sp_arg $_sp_args`
|
||||
@@ -78,7 +78,7 @@ case env:
|
||||
set _sp_arg=""
|
||||
[ $#_sp_args -gt 0 ] && set _sp_arg = ($_sp_args[1])
|
||||
|
||||
if ( "$_sp_arg" == "-h" ) then
|
||||
if ( "$_sp_arg" == "-h" || "$_sp_arg" == "--help" ) then
|
||||
\spack env -h
|
||||
else
|
||||
switch ($_sp_arg)
|
||||
@@ -86,12 +86,18 @@ case env:
|
||||
set _sp_env_arg=""
|
||||
[ $#_sp_args -gt 1 ] && set _sp_env_arg = ($_sp_args[2])
|
||||
|
||||
if ( "$_sp_env_arg" == "" || "$_sp_args" =~ "*--sh*" || "$_sp_args" =~ "*--csh*" || "$_sp_args" =~ "*-h*" ) then
|
||||
# no args or args contain -h/--help, --sh, or --csh: just execute
|
||||
# Space needed here to differentiate between `-h`
|
||||
# argument and environments with "-h" in the name.
|
||||
if ( "$_sp_env_arg" == "" || \
|
||||
"$_sp_args" =~ "* --sh*" || \
|
||||
"$_sp_args" =~ "* --csh*" || \
|
||||
"$_sp_args" =~ "* -h*" || \
|
||||
"$_sp_args" =~ "* --help*" ) then
|
||||
# No args or args contain --sh, --csh, or -h/--help: just execute.
|
||||
\spack $_sp_flags env $_sp_args
|
||||
else
|
||||
shift _sp_args # consume 'activate' or 'deactivate'
|
||||
# actual call to activate: source the output
|
||||
# Actual call to activate: source the output.
|
||||
eval `\spack $_sp_flags env activate --csh $_sp_args`
|
||||
endif
|
||||
breaksw
|
||||
@@ -99,30 +105,40 @@ case env:
|
||||
set _sp_env_arg=""
|
||||
[ $#_sp_args -gt 1 ] && set _sp_env_arg = ($_sp_args[2])
|
||||
|
||||
if ( "$_sp_env_arg" != "" ) then
|
||||
# with args: execute the command
|
||||
# Space needed here to differentiate between `--sh`
|
||||
# argument and environments with "--sh" in the name.
|
||||
if ( "$_sp_args" =~ "* --sh*" || \
|
||||
"$_sp_args" =~ "* --csh*" ) then
|
||||
# Args contain --sh or --csh: just execute.
|
||||
\spack $_sp_flags env $_sp_args
|
||||
else if ( "$_sp_env_arg" != "" ) then
|
||||
# Any other arguments are an error or -h/--help: just run help.
|
||||
\spack $_sp_flags env deactivate -h
|
||||
else
|
||||
# no args: source the output
|
||||
# No args: source the output of the command.
|
||||
eval `\spack $_sp_flags env deactivate --csh`
|
||||
endif
|
||||
breaksw
|
||||
default:
|
||||
echo default
|
||||
\spack $_sp_flags env $_sp_args
|
||||
breaksw
|
||||
endsw
|
||||
endif
|
||||
breaksw
|
||||
|
||||
case load:
|
||||
case unload:
|
||||
# Space in `-h` portion is important for differentiating -h option
|
||||
# from variants that begin with "h" or packages with "-h" in name
|
||||
if ( "$_sp_spec" =~ "*--sh*" || "$_sp_spec" =~ "*--csh*" || \
|
||||
" $_sp_spec" =~ "* -h*" || "$_sp_spec" =~ "*--help*") then
|
||||
# IF a shell is given, print shell output
|
||||
# Get --sh, --csh, -h, or --help arguments.
|
||||
# Space needed here to differentiate between `-h`
|
||||
# argument and specs with "-h" in the name.
|
||||
if ( " $_sp_spec" =~ "* --sh*" || \
|
||||
" $_sp_spec" =~ "* --csh*" || \
|
||||
" $_sp_spec" =~ "* -h*" || \
|
||||
" $_sp_spec" =~ "* --help*") then
|
||||
# Args contain --sh, --csh, or -h/--help: just execute.
|
||||
\spack $_sp_flags $_sp_subcommand $_sp_spec
|
||||
else
|
||||
# otherwise eval with csh
|
||||
# Otherwise, eval with csh.
|
||||
eval `\spack $_sp_flags $_sp_subcommand --csh $_sp_spec || \
|
||||
echo "exit 1"`
|
||||
endif
|
||||
|
@@ -37,16 +37,12 @@ bin/spack -h
|
||||
bin/spack help -a
|
||||
|
||||
# Profile and print top 20 lines for a simple call to spack spec
|
||||
bin/spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Run unit tests with code coverage
|
||||
#-----------------------------------------------------------
|
||||
extra_args=""
|
||||
if [[ -n "$@" ]]; then
|
||||
extra_args="-k $@"
|
||||
fi
|
||||
$coverage_run bin/spack test -x --verbose "$extra_args"
|
||||
$coverage_run $(which spack) test -x --verbose
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Run tests for setup-env.sh
|
||||
|
@@ -115,31 +115,44 @@ spack() {
|
||||
else
|
||||
case $_sp_arg in
|
||||
activate)
|
||||
_a="$@"
|
||||
# Get --sh, --csh, or -h/--help arguments.
|
||||
# Space needed here becauses regexes start with a space
|
||||
# and `-h` may be the only argument.
|
||||
_a=" $@"
|
||||
# Space needed here to differentiate between `-h`
|
||||
# argument and environments with "-h" in the name.
|
||||
# Also see: https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html#Shell-Parameter-Expansion
|
||||
if [ -z ${1+x} ] || \
|
||||
[ "${_a#*--sh}" != "$_a" ] || \
|
||||
[ "${_a#*--csh}" != "$_a" ] || \
|
||||
[ "${_a#*-h}" != "$_a" ];
|
||||
[ "${_a#* --sh}" != "$_a" ] || \
|
||||
[ "${_a#* --csh}" != "$_a" ] || \
|
||||
[ "${_a#* -h}" != "$_a" ] || \
|
||||
[ "${_a#* --help}" != "$_a" ];
|
||||
then
|
||||
# no args or args contain -h/--help, --sh, or --csh: just execute
|
||||
# No args or args contain --sh, --csh, or -h/--help: just execute.
|
||||
command spack env activate "$@"
|
||||
else
|
||||
# actual call to activate: source the output
|
||||
# Actual call to activate: source the output.
|
||||
eval $(command spack $_sp_flags env activate --sh "$@")
|
||||
fi
|
||||
;;
|
||||
deactivate)
|
||||
_a="$@"
|
||||
if [ "${_a#*--sh}" != "$_a" ] || \
|
||||
[ "${_a#*--csh}" != "$_a" ];
|
||||
# Get --sh, --csh, or -h/--help arguments.
|
||||
# Space needed here becauses regexes start with a space
|
||||
# and `-h` may be the only argument.
|
||||
_a=" $@"
|
||||
# Space needed here to differentiate between `--sh`
|
||||
# argument and environments with "--sh" in the name.
|
||||
# Also see: https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html#Shell-Parameter-Expansion
|
||||
if [ "${_a#* --sh}" != "$_a" ] || \
|
||||
[ "${_a#* --csh}" != "$_a" ];
|
||||
then
|
||||
# just execute the command if --sh or --csh are provided
|
||||
# Args contain --sh or --csh: just execute.
|
||||
command spack env deactivate "$@"
|
||||
elif [ -n "$*" ]; then
|
||||
# any other arguments are an error or help, so just run help
|
||||
# Any other arguments are an error or -h/--help: just run help.
|
||||
command spack env deactivate -h
|
||||
else
|
||||
# no args: source the output of the command
|
||||
# No args: source the output of the command.
|
||||
eval $(command spack $_sp_flags env deactivate --sh)
|
||||
fi
|
||||
;;
|
||||
@@ -151,17 +164,19 @@ spack() {
|
||||
return
|
||||
;;
|
||||
"load"|"unload")
|
||||
# get --sh, --csh, --help, or -h arguments
|
||||
# space is important for -h case to differentiate between `-h`
|
||||
# argument and specs with "-h" in package name or variant settings
|
||||
# Get --sh, --csh, -h, or --help arguments.
|
||||
# Space needed here becauses regexes start with a space
|
||||
# and `-h` may be the only argument.
|
||||
_a=" $@"
|
||||
# Space needed here to differentiate between `-h`
|
||||
# argument and specs with "-h" in the name.
|
||||
# Also see: https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html#Shell-Parameter-Expansion
|
||||
if [ "${_a#* --sh}" != "$_a" ] || \
|
||||
[ "${_a#* --csh}" != "$_a" ] || \
|
||||
[ "${_a#* -h}" != "$_a" ] || \
|
||||
[ "${_a#* --help}" != "$_a" ];
|
||||
then
|
||||
# just execute the command if --sh or --csh are provided
|
||||
# or if the -h or --help arguments are provided
|
||||
# Args contain --sh, --csh, or -h/--help: just execute.
|
||||
command spack $_sp_flags $_sp_subcommand "$@"
|
||||
else
|
||||
eval $(command spack $_sp_flags $_sp_subcommand --sh "$@" || \
|
||||
@@ -212,7 +227,8 @@ _spack_determine_shell() {
|
||||
# If procfs is present this seems a more reliable
|
||||
# way to detect the current shell
|
||||
_sp_exe=$(readlink /proc/$$/exe)
|
||||
basename ${_sp_exe}
|
||||
# Shell may contain number, like zsh5 instead of zsh
|
||||
basename ${_sp_exe} | tr -d '0123456789'
|
||||
elif [ -n "${BASH:-}" ]; then
|
||||
echo bash
|
||||
elif [ -n "${ZSH_NAME:-}" ]; then
|
||||
|
@@ -226,7 +226,7 @@ _config_sections() {
|
||||
_extensions() {
|
||||
if [[ -z "${SPACK_EXTENSIONS:-}" ]]
|
||||
then
|
||||
SPACK_EXTENSIONS="aspell go-bootstrap go icedtea jdk kim-api lua matlab mofem-cephas octave openjdk perl python r ruby rust tcl yorick"
|
||||
SPACK_EXTENSIONS="$(spack extensions)"
|
||||
fi
|
||||
SPACK_COMPREPLY="$SPACK_EXTENSIONS"
|
||||
}
|
||||
@@ -945,7 +945,7 @@ _spack_info() {
|
||||
_spack_install() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --no-check-signature --show-log-on-error --source -n --no-checksum -v --verbose --fake --only-concrete -f --file --upstream -g --global --clean --dirty --test --run-tests --log-format --log-file --help-cdash --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp -y --yes-to-all"
|
||||
SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --no-check-signature --show-log-on-error --source -n --no-checksum -v --verbose --fake --only-concrete -f --file --clean --dirty --test --run-tests --log-format --log-file --help-cdash --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp -y --yes-to-all"
|
||||
else
|
||||
_all_packages
|
||||
fi
|
||||
@@ -1272,7 +1272,7 @@ _spack_pydoc() {
|
||||
_spack_python() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help -c"
|
||||
SPACK_COMPREPLY="-h --help -c -m"
|
||||
else
|
||||
SPACK_COMPREPLY=""
|
||||
fi
|
||||
@@ -1419,7 +1419,7 @@ _spack_test() {
|
||||
_spack_uninstall() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help -f --force -R --dependents -y --yes-to-all -a --all -u --upstream -g --global"
|
||||
SPACK_COMPREPLY="-h --help -f --force -R --dependents -y --yes-to-all -a --all"
|
||||
else
|
||||
_installed_packages
|
||||
fi
|
||||
|
@@ -49,4 +49,6 @@ def build(self, spec, prefix):
|
||||
pass
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
# sanity_check_prefix requires something in the install directory
|
||||
# Test requires overriding the one provided by `AutotoolsPackage`
|
||||
mkdirp(prefix.bin)
|
||||
|
@@ -13,6 +13,3 @@ class B(Package):
|
||||
url = "http://www.example.com/b-1.0.tar.gz"
|
||||
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -59,6 +59,3 @@ class Boost(Package):
|
||||
description="Build the Boost Graph library")
|
||||
variant('taggedlayout', default=False,
|
||||
description="Augment library names with build options")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -13,6 +13,3 @@ class C(Package):
|
||||
url = "http://www.example.com/c-1.0.tar.gz"
|
||||
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -17,6 +17,3 @@ class ConflictingDependent(Package):
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
depends_on('dependency-install@:1.0')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -25,6 +25,3 @@ class DepDiamondPatchMid1(Package):
|
||||
|
||||
# single patch file in repo
|
||||
depends_on('patch', patches='mid1.patch')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -28,6 +28,3 @@ class DepDiamondPatchMid2(Package):
|
||||
patch('http://example.com/urlpatch.patch',
|
||||
sha256='mid21234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234'), # noqa: E501
|
||||
])
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -27,6 +27,3 @@ class DepDiamondPatchTop(Package):
|
||||
depends_on('patch', patches='top.patch')
|
||||
depends_on('dep-diamond-patch-mid1')
|
||||
depends_on('dep-diamond-patch-mid2')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -13,6 +13,3 @@ class DevelopTest(Package):
|
||||
|
||||
version('develop', git='https://github.com/dummy/repo.git')
|
||||
version('0.2.15', 'b1190f3d3471685f17cfd1ec1d252ac9')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -13,6 +13,3 @@ class DevelopTest2(Package):
|
||||
|
||||
version('0.2.15.develop', git='https://github.com/dummy/repo.git')
|
||||
version('0.2.15', 'b1190f3d3471685f17cfd1ec1d252ac9')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -13,6 +13,3 @@ class DirectMpich(Package):
|
||||
version('1.0', 'foobarbaz')
|
||||
|
||||
depends_on('mpich')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -12,6 +12,3 @@ class DtDiamondBottom(Package):
|
||||
url = "http://www.example.com/dt-diamond-bottom-1.0.tar.gz"
|
||||
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -14,6 +14,3 @@ class DtDiamondLeft(Package):
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
depends_on('dt-diamond-bottom', type='build')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -14,6 +14,3 @@ class DtDiamondRight(Package):
|
||||
version('1.0', '0123456789abcdef0123456789abcdef')
|
||||
|
||||
depends_on('dt-diamond-bottom', type=('build', 'link', 'run'))
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -15,6 +15,3 @@ class DtDiamond(Package):
|
||||
|
||||
depends_on('dt-diamond-left')
|
||||
depends_on('dt-diamond-right')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
@@ -18,6 +18,3 @@ class Dtbuild1(Package):
|
||||
depends_on('dtbuild2', type='build')
|
||||
depends_on('dtlink2')
|
||||
depends_on('dtrun2', type='run')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pass
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user