spack chain (#8772)

* initial work to make use of an 'upstream' spack installation: this uses the DB of the upstream installation to check if a package is installed

* need to query upstream dbs when adding new record to local db

* prevent reindexing upstream DBs

* set prefix on specs read from DB based on path stored in install record

* check that Spack does not install packages that are recorded as installed in an upstream db

* externals do not add their path to install records - need to use 'external_path' to get path of upstream externals

* views need to check for upstream installations when linking metadata

* package and spec now calculate upstream installation properties on-demand themselves rather than depending on concretization to set these properties up-front. The added tests for upstream installations don't work with this new strategy so they need to be updated

* only refresh modules for local specs (not those in upstream packages); optionally generate local module files for packages installed upstream

* when a user tries to locate a module file for a package installed upstream, tell them to use the upstream spack instance to locate it

* support recursive upstream databases (allow upstream databases to use their own upstream databases)

* separate upstream config into separate file with its own schema; each entry now also includes a name

* metadata_dir is no longer customizable on a per-instance basis for YamlDirectoryLayout

* treat metadata_dir as an instance variable but dont set it from kwargs; this follows several other hardcoded variables which must be consistent between upstream and downstream DBs. Also update DirectoryLayout.metadata_path to work entirely with Spec.prefix, since Spec.prefix is set from the DB when available (so metadata_path was duplicating that logic)
This commit is contained in:
Peter Scheibel 2019-03-27 13:06:46 -07:00 committed by Greg Becker
parent 298a55b28f
commit 99f35c3338
21 changed files with 791 additions and 171 deletions

View File

@ -32,6 +32,11 @@ def setup_parser(subparser):
help='delete the module file tree before refresh',
action='store_true'
)
refresh_parser.add_argument(
'--upstream-modules',
help='generate modules for packages installed upstream',
action='store_true'
)
arguments.add_common_arguments(
refresh_parser, ['constraint', 'yes_to_all']
)
@ -125,10 +130,14 @@ def loads(module_type, specs, args, out=sys.stdout):
)
module_cls = spack.modules.module_types[module_type]
modules = [
(spec, module_cls(spec).layout.use_name)
for spec in specs if os.path.exists(module_cls(spec).layout.filename)
]
modules = list()
for spec in specs:
if os.path.exists(module_cls(spec).layout.filename):
modules.append((spec, module_cls(spec).layout.use_name))
elif spec.package.installed_upstream:
tty.debug("Using upstream module for {0}".format(spec))
module = spack.modules.common.upstream_module(spec, module_type)
modules.append((spec, module.use_name))
module_commands = {
'tcl': 'module load ',
@ -159,6 +168,12 @@ def find(module_type, specs, args):
spec = one_spec_or_raise(specs)
if spec.package.installed_upstream:
module = spack.modules.common.upstream_module(spec, module_type)
if module:
print(module.path)
return
# Check if the module file is present
def module_exists(spec):
writer = spack.modules.module_types[module_type](spec)
@ -232,6 +247,9 @@ def refresh(module_type, specs, args):
tty.msg('No package matches your query')
return
if not args.upstream_modules:
specs = list(s for s in specs if not s.package.installed_upstream)
if not args.yes_to_all:
msg = 'You are about to regenerate {types} module files for:\n'
tty.msg(msg.format(types=module_type))
@ -276,6 +294,7 @@ def refresh(module_type, specs, args):
# If we arrived here we have at least one writer
module_type_root = writers[0].layout.dirname()
spack.modules.common.generate_module_index(module_type_root, writers)
# Proceed regenerating module files
tty.msg('Regenerating {name} module files'.format(name=module_type))
if os.path.isdir(module_type_root) and args.delete_tree:

View File

@ -80,7 +80,7 @@ def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
specs_from_cli = []
has_errors = False
for spec in specs:
matching = spack.store.db.query(spec, hashes=hashes)
matching = spack.store.db.query_local(spec, hashes=hashes)
# For each spec provided, make sure it refers to only one package.
# Fail and ask user to be unambiguous if it doesn't
if not allow_multiple_matches and len(matching) > 1:

View File

@ -55,6 +55,7 @@
import spack.schema.packages
import spack.schema.modules
import spack.schema.config
import spack.schema.upstreams
from spack.error import SpackError
# Hacked yaml for configuration files preserves line numbers.
@ -69,6 +70,7 @@
'packages': spack.schema.packages.schema,
'modules': spack.schema.modules.schema,
'config': spack.schema.config.schema,
'upstreams': spack.schema.upstreams.schema
}
#: Builtin paths to configuration files in Spack

View File

@ -135,12 +135,23 @@ def from_dict(cls, spec, dictionary):
return InstallRecord(spec, **d)
class ForbiddenLockError(SpackError):
"""Raised when an upstream DB attempts to acquire a lock"""
class ForbiddenLock(object):
def __getattribute__(self, name):
raise ForbiddenLockError(
"Cannot access attribute '{0}' of lock".format(name))
class Database(object):
"""Per-process lock objects for each install prefix."""
_prefix_locks = {}
def __init__(self, root, db_dir=None):
def __init__(self, root, db_dir=None, upstream_dbs=None,
is_upstream=False):
"""Create a Database for Spack installations under ``root``.
A Database is a cache of Specs data from ``$prefix/spec.yaml``
@ -183,6 +194,13 @@ def __init__(self, root, db_dir=None):
if not os.path.exists(self._db_dir):
mkdirp(self._db_dir)
self.is_upstream = is_upstream
if self.is_upstream:
self.lock = ForbiddenLock()
else:
self.lock = Lock(self._lock_path)
# initialize rest of state.
self.db_lock_timeout = (
spack.config.get('config:db_lock_timeout') or _db_lock_timeout)
@ -198,9 +216,16 @@ def __init__(self, root, db_dir=None):
default_timeout=self.db_lock_timeout)
self._data = {}
self.upstream_dbs = list(upstream_dbs) if upstream_dbs else []
# whether there was an error at the start of a read transaction
self._error = None
# For testing: if this is true, an exception is thrown when missing
# dependencies are detected (rather than just printing a warning
# message)
self._fail_when_missing_deps = False
def write_transaction(self):
"""Get a write lock context manager for use in a `with` block."""
return WriteTransaction(self.lock, self._read, self._write)
@ -311,23 +336,56 @@ def _read_spec_from_dict(self, hash_key, installs):
spec = spack.spec.Spec.from_node_dict(spec_dict)
return spec
def db_for_spec_hash(self, hash_key):
with self.read_transaction():
if hash_key in self._data:
return self
for db in self.upstream_dbs:
if hash_key in db._data:
return db
def query_by_spec_hash(self, hash_key, data=None):
if data and hash_key in data:
return False, data[hash_key]
if not data:
with self.read_transaction():
if hash_key in self._data:
return False, self._data[hash_key]
for db in self.upstream_dbs:
if hash_key in db._data:
return True, db._data[hash_key]
return False, None
def _assign_dependencies(self, hash_key, installs, data):
# Add dependencies from other records in the install DB to
# form a full spec.
spec = data[hash_key].spec
spec_dict = installs[hash_key]['spec']
if 'dependencies' in spec_dict[spec.name]:
yaml_deps = spec_dict[spec.name]['dependencies']
for dname, dhash, dtypes in spack.spec.Spec.read_yaml_dep_specs(
yaml_deps):
if dhash not in data:
tty.warn("Missing dependency not in database: ",
"%s needs %s-%s" % (
spec.cformat('$_$/'), dname, dhash[:7]))
# It is important that we always check upstream installations
# in the same order, and that we always check the local
# installation first: if a downstream Spack installs a package
# then dependents in that installation could be using it.
# If a hash is installed locally and upstream, there isn't
# enough information to determine which one a local package
# depends on, so the convention ensures that this isn't an
# issue.
upstream, record = self.query_by_spec_hash(dhash, data=data)
child = record.spec if record else None
if not child:
msg = ("Missing dependency not in database: "
"%s needs %s-%s" % (
spec.cformat('$_$/'), dname, dhash[:7]))
if self._fail_when_missing_deps:
raise MissingDependenciesError(msg)
tty.warn(msg)
continue
child = data[dhash].spec
spec._add_dependency(child, dtypes)
def _read_from_file(self, stream, format='json'):
@ -407,7 +465,6 @@ def invalid_record(hash_key, error):
# TODO: would a more immmutable spec implementation simplify
# this?
data[hash_key] = InstallRecord.from_dict(spec, rec)
except Exception as e:
invalid_record(hash_key, e)
@ -415,6 +472,8 @@ def invalid_record(hash_key, error):
for hash_key in data:
try:
self._assign_dependencies(hash_key, installs, data)
except MissingDependenciesError:
raise
except Exception as e:
invalid_record(hash_key, e)
@ -434,6 +493,10 @@ def reindex(self, directory_layout):
Locks the DB if it isn't locked already.
"""
if self.is_upstream:
raise UpstreamDatabaseLockingError(
"Cannot reindex an upstream database")
# Special transaction to avoid recursive reindex calls and to
# ignore errors if we need to rebuild a corrupt database.
def _read_suppress_error():
@ -456,89 +519,93 @@ def _read_suppress_error():
)
self._error = None
# Read first the `spec.yaml` files in the prefixes. They should be
# considered authoritative with respect to DB reindexing, as
# entries in the DB may be corrupted in a way that still makes
# them readable. If we considered DB entries authoritative
# instead, we would perpetuate errors over a reindex.
old_data = self._data
try:
# Initialize data in the reconstructed DB
self._data = {}
# Start inspecting the installed prefixes
processed_specs = set()
for spec in directory_layout.all_specs():
# Try to recover explicit value from old DB, but
# default it to True if DB was corrupt. This is
# just to be conservative in case a command like
# "autoremove" is run by the user after a reindex.
tty.debug(
'RECONSTRUCTING FROM SPEC.YAML: {0}'.format(spec))
explicit = True
inst_time = os.stat(spec.prefix).st_ctime
if old_data is not None:
old_info = old_data.get(spec.dag_hash())
if old_info is not None:
explicit = old_info.explicit
inst_time = old_info.installation_time
extra_args = {
'explicit': explicit,
'installation_time': inst_time
}
self._add(spec, directory_layout, **extra_args)
processed_specs.add(spec)
for key, entry in old_data.items():
# We already took care of this spec using
# `spec.yaml` from its prefix.
if entry.spec in processed_specs:
msg = 'SKIPPING RECONSTRUCTION FROM OLD DB: {0}'
msg += ' [already reconstructed from spec.yaml]'
tty.debug(msg.format(entry.spec))
continue
# If we arrived here it very likely means that
# we have external specs that are not dependencies
# of other specs. This may be the case for externally
# installed compilers or externally installed
# applications.
tty.debug(
'RECONSTRUCTING FROM OLD DB: {0}'.format(entry.spec))
try:
layout = spack.store.layout
if entry.spec.external:
layout = None
install_check = True
else:
install_check = layout.check_installed(entry.spec)
if install_check:
kwargs = {
'spec': entry.spec,
'directory_layout': layout,
'explicit': entry.explicit,
'installation_time': entry.installation_time # noqa: E501
}
self._add(**kwargs)
processed_specs.add(entry.spec)
except Exception as e:
# Something went wrong, so the spec was not restored
# from old data
tty.debug(e.message)
pass
self._check_ref_counts()
self._construct_from_directory_layout(
directory_layout, old_data)
except BaseException:
# If anything explodes, restore old data, skip write.
self._data = old_data
raise
def _construct_from_directory_layout(self, directory_layout, old_data):
# Read first the `spec.yaml` files in the prefixes. They should be
# considered authoritative with respect to DB reindexing, as
# entries in the DB may be corrupted in a way that still makes
# them readable. If we considered DB entries authoritative
# instead, we would perpetuate errors over a reindex.
with directory_layout.disable_upstream_check():
# Initialize data in the reconstructed DB
self._data = {}
# Start inspecting the installed prefixes
processed_specs = set()
for spec in directory_layout.all_specs():
# Try to recover explicit value from old DB, but
# default it to True if DB was corrupt. This is
# just to be conservative in case a command like
# "autoremove" is run by the user after a reindex.
tty.debug(
'RECONSTRUCTING FROM SPEC.YAML: {0}'.format(spec))
explicit = True
inst_time = os.stat(spec.prefix).st_ctime
if old_data is not None:
old_info = old_data.get(spec.dag_hash())
if old_info is not None:
explicit = old_info.explicit
inst_time = old_info.installation_time
extra_args = {
'explicit': explicit,
'installation_time': inst_time
}
self._add(spec, directory_layout, **extra_args)
processed_specs.add(spec)
for key, entry in old_data.items():
# We already took care of this spec using
# `spec.yaml` from its prefix.
if entry.spec in processed_specs:
msg = 'SKIPPING RECONSTRUCTION FROM OLD DB: {0}'
msg += ' [already reconstructed from spec.yaml]'
tty.debug(msg.format(entry.spec))
continue
# If we arrived here it very likely means that
# we have external specs that are not dependencies
# of other specs. This may be the case for externally
# installed compilers or externally installed
# applications.
tty.debug(
'RECONSTRUCTING FROM OLD DB: {0}'.format(entry.spec))
try:
layout = spack.store.layout
if entry.spec.external:
layout = None
install_check = True
else:
install_check = layout.check_installed(entry.spec)
if install_check:
kwargs = {
'spec': entry.spec,
'directory_layout': layout,
'explicit': entry.explicit,
'installation_time': entry.installation_time # noqa: E501
}
self._add(**kwargs)
processed_specs.add(entry.spec)
except Exception as e:
# Something went wrong, so the spec was not restored
# from old data
tty.debug(e.message)
pass
self._check_ref_counts()
def _check_ref_counts(self):
"""Ensure consistency of reference counts in the DB.
@ -606,7 +673,8 @@ def _read(self):
self._read_from_file(self._index_path, format='json')
elif os.path.isfile(self._old_yaml_index_path):
if os.access(self._db_dir, os.R_OK | os.W_OK):
if (not self.is_upstream) and os.access(
self._db_dir, os.R_OK | os.W_OK):
# if we can write, then read AND write a JSON file.
self._read_from_file(self._old_yaml_index_path, format='yaml')
with WriteTransaction(self.lock):
@ -616,6 +684,10 @@ def _read(self):
self._read_from_file(self._old_yaml_index_path, format='yaml')
else:
if self.is_upstream:
raise UpstreamDatabaseLockingError(
"No database index file is present, and upstream"
" databases cannot generate an index file")
# The file doesn't exist, try to traverse the directory.
# reindex() takes its own write lock, so no lock here.
with WriteTransaction(self.lock):
@ -657,6 +729,11 @@ def _add(
raise NonConcreteSpecAddError(
"Specs added to DB must be concrete.")
key = spec.dag_hash()
upstream, record = self.query_by_spec_hash(key)
if upstream:
return
# Retrieve optional arguments
installation_time = installation_time or _now()
@ -669,7 +746,6 @@ def _add(
}
self._add(dep, directory_layout, **extra_args)
key = spec.dag_hash()
if key not in self._data:
installed = bool(spec.external)
path = None
@ -682,6 +758,8 @@ def _add(
tty.warn(
'Dependency missing due to corrupt install directory:',
path, str(e))
elif spec.external_path:
path = spec.external_path
# Create a new install record with no deps initially.
new_spec = spec.copy(deps=False)
@ -696,8 +774,10 @@ def _add(
# Connect dependencies from the DB to the new copy.
for name, dep in iteritems(spec.dependencies_dict(_tracked_deps)):
dkey = dep.spec.dag_hash()
new_spec._add_dependency(self._data[dkey].spec, dep.deptypes)
self._data[dkey].ref_count += 1
upstream, record = self.query_by_spec_hash(dkey)
new_spec._add_dependency(record.spec, dep.deptypes)
if not upstream:
record.ref_count += 1
# Mark concrete once everything is built, and preserve
# the original hash of concrete specs.
@ -725,7 +805,8 @@ def add(self, spec, directory_layout, explicit=False):
def _get_matching_spec_key(self, spec, **kwargs):
"""Get the exact spec OR get a single spec that matches."""
key = spec.dag_hash()
if key not in self._data:
upstream, record = self.query_by_spec_hash(key)
if not record:
match = self.query_one(spec, **kwargs)
if match:
return match.dag_hash()
@ -735,7 +816,8 @@ def _get_matching_spec_key(self, spec, **kwargs):
@_autospec
def get_record(self, spec, **kwargs):
key = self._get_matching_spec_key(spec, **kwargs)
return self._data[key]
upstream, record = self.query_by_spec_hash(key)
return record
def _decrement_ref_count(self, spec):
key = spec.dag_hash()
@ -804,14 +886,18 @@ def installed_relatives(self, spec, direction='children', transitive=True):
for relative in to_add:
hash_key = relative.dag_hash()
if hash_key not in self._data:
upstream, record = self.query_by_spec_hash(hash_key)
if not record:
reltype = ('Dependent' if direction == 'parents'
else 'Dependency')
tty.warn("Inconsistent state! %s %s of %s not in DB"
% (reltype, hash_key, spec.dag_hash()))
msg = ("Inconsistent state! %s %s of %s not in DB"
% (reltype, hash_key, spec.dag_hash()))
if self._fail_when_missing_deps:
raise MissingDependenciesError(msg)
tty.warn(msg)
continue
if not self._data[hash_key].installed:
if not record.installed:
continue
relatives.add(relative)
@ -844,7 +930,7 @@ def activated_extensions_for(self, extendee_spec, extensions_layout=None):
continue
# TODO: conditional way to do this instead of catching exceptions
def query(
def _query(
self,
query_spec=any,
known=any,
@ -898,48 +984,65 @@ def query(
# TODO: like installed and known that can be queried? Or are
# TODO: these really special cases that only belong here?
# TODO: handling of hashes restriction is not particularly elegant.
# Just look up concrete specs with hashes; no fancy search.
if isinstance(query_spec, spack.spec.Spec) and query_spec.concrete:
# TODO: handling of hashes restriction is not particularly elegant.
hash_key = query_spec.dag_hash()
if (hash_key in self._data and
(not hashes or hash_key in hashes)):
return [self._data[hash_key].spec]
else:
return []
# Abstract specs require more work -- currently we test
# against everything.
results = []
start_date = start_date or datetime.datetime.min
end_date = end_date or datetime.datetime.max
for key, rec in self._data.items():
if hashes is not None and rec.spec.dag_hash() not in hashes:
continue
if installed is not any and rec.installed != installed:
continue
if explicit is not any and rec.explicit != explicit:
continue
if known is not any and spack.repo.path.exists(
rec.spec.name) != known:
continue
inst_date = datetime.datetime.fromtimestamp(
rec.installation_time
)
if not (start_date < inst_date < end_date):
continue
if query_spec is any or rec.spec.satisfies(query_spec):
results.append(rec.spec)
return results
def query_local(self, *args, **kwargs):
with self.read_transaction():
# Just look up concrete specs with hashes; no fancy search.
if isinstance(query_spec, spack.spec.Spec) and query_spec.concrete:
return sorted(self._query(*args, **kwargs))
hash_key = query_spec.dag_hash()
if (hash_key in self._data and
(not hashes or hash_key in hashes)):
return [self._data[hash_key].spec]
else:
return []
def query(self, *args, **kwargs):
upstream_results = []
for upstream_db in self.upstream_dbs:
# queries for upstream DBs need to *not* lock - we may not
# have permissions to do this and the upstream DBs won't know about
# us anyway (so e.g. they should never uninstall specs)
upstream_results.extend(upstream_db._query(*args, **kwargs) or [])
# Abstract specs require more work -- currently we test
# against everything.
results = []
start_date = start_date or datetime.datetime.min
end_date = end_date or datetime.datetime.max
local_results = set(self.query_local(*args, **kwargs))
for key, rec in self._data.items():
if hashes is not None and rec.spec.dag_hash() not in hashes:
continue
results = list(local_results) + list(
x for x in upstream_results if x not in local_results)
if installed is not any and rec.installed != installed:
continue
if explicit is not any and rec.explicit != explicit:
continue
if known is not any and spack.repo.path.exists(
rec.spec.name) != known:
continue
inst_date = datetime.datetime.fromtimestamp(
rec.installation_time
)
if not (start_date < inst_date < end_date):
continue
if query_spec is any or rec.spec.satisfies(query_spec):
results.append(rec.spec)
return sorted(results)
return sorted(results)
def query_one(self, query_spec, known=any, installed=True):
"""Query for exactly one spec that matches the query spec.
@ -954,9 +1057,13 @@ def query_one(self, query_spec, known=any, installed=True):
return concrete_specs[0] if concrete_specs else None
def missing(self, spec):
with self.read_transaction():
key = spec.dag_hash()
return key in self._data and not self._data[key].installed
key = spec.dag_hash()
upstream, record = self.query_by_spec_hash(key)
return record and not record.installed
class UpstreamDatabaseLockingError(SpackError):
"""Raised when an operation would need to lock an upstream database"""
class CorruptDatabaseError(SpackError):
@ -967,6 +1074,10 @@ class NonConcreteSpecAddError(SpackError):
"""Raised when attemptint to add non-concrete spec to DB."""
class MissingDependenciesError(SpackError):
"""Raised when DB cannot find records for dependencies"""
class InvalidDatabaseVersionError(SpackError):
def __init__(self, expected, found):

View File

@ -8,6 +8,7 @@
import glob
import tempfile
import re
from contextlib import contextmanager
import ruamel.yaml as yaml
@ -33,6 +34,7 @@ class DirectoryLayout(object):
def __init__(self, root):
self.root = root
self.check_upstream = True
@property
def hidden_file_paths(self):
@ -74,6 +76,13 @@ def path_for_spec(self, spec):
"""Return absolute path from the root to a directory for the spec."""
_check_concrete(spec)
if spec.external:
return spec.external_path
if self.check_upstream and spec.package.installed_upstream:
raise SpackError(
"Internal error: attempted to call path_for_spec on"
" upstream-installed package.")
path = self.relative_path_for_spec(spec)
assert(not path.startswith(self.root))
return os.path.join(self.root, path)
@ -164,7 +173,6 @@ class YamlDirectoryLayout(DirectoryLayout):
def __init__(self, root, **kwargs):
super(YamlDirectoryLayout, self).__init__(root)
self.metadata_dir = kwargs.get('metadata_dir', '.spack')
self.hash_len = kwargs.get('hash_len')
self.path_scheme = kwargs.get('path_scheme') or (
"${ARCHITECTURE}/"
@ -177,6 +185,9 @@ def __init__(self, root, **kwargs):
self.path_scheme = self.path_scheme.replace(
"${HASH}", "${HASH:%d}" % self.hash_len)
# If any of these paths change, downstream databases may not be able to
# locate files in older upstream databases
self.metadata_dir = '.spack'
self.spec_file_name = 'spec.yaml'
self.extension_file_name = 'extensions.yaml'
self.build_log_name = 'build.out' # build log.
@ -190,9 +201,6 @@ def hidden_file_paths(self):
def relative_path_for_spec(self, spec):
_check_concrete(spec)
if spec.external:
return spec.external_path
path = spec.format(self.path_scheme)
return path
@ -222,20 +230,23 @@ def spec_file_path(self, spec):
_check_concrete(spec)
return os.path.join(self.metadata_path(spec), self.spec_file_name)
@contextmanager
def disable_upstream_check(self):
self.check_upstream = False
yield
self.check_upstream = True
def metadata_path(self, spec):
return os.path.join(self.path_for_spec(spec), self.metadata_dir)
return os.path.join(spec.prefix, self.metadata_dir)
def build_log_path(self, spec):
return os.path.join(self.path_for_spec(spec), self.metadata_dir,
self.build_log_name)
return os.path.join(self.metadata_path(spec), self.build_log_name)
def build_env_path(self, spec):
return os.path.join(self.path_for_spec(spec), self.metadata_dir,
self.build_env_name)
return os.path.join(self.metadata_path(spec), self.build_env_name)
def build_packages_path(self, spec):
return os.path.join(self.path_for_spec(spec), self.metadata_dir,
self.packages_dir)
return os.path.join(self.metadata_path(spec), self.packages_dir)
def create_install_directory(self, spec):
_check_concrete(spec)

View File

@ -577,9 +577,28 @@ def shell_set(var, value):
# print roots for all module systems
module_roots = spack.config.get('config:module_roots')
module_to_roots = {
'tcl': list(),
'dotkit': list(),
'lmod': list()
}
for name, path in module_roots.items():
path = spack.util.path.canonicalize_path(path)
shell_set('_sp_%s_root' % name, path)
module_to_roots[name].append(path)
other_spack_instances = spack.config.get(
'upstreams') or {}
for install_properties in other_spack_instances.values():
upstream_module_roots = install_properties.get('modules', {})
for module_type, root in upstream_module_roots.items():
module_to_roots[module_type].append(root)
for name, paths in module_to_roots.items():
# Environment setup prepends paths, so the order is reversed here to
# preserve the intended priority: the modules of the local Spack
# instance are the highest-precedence.
roots_val = ':'.join(reversed(paths))
shell_set('_sp_%s_roots' % name, roots_val)
# print environment module system if available. This can be expensive
# on clusters, so skip it if not needed.

View File

@ -33,6 +33,7 @@
import inspect
import os.path
import re
import collections
import six
import llnl.util.filesystem
@ -45,6 +46,7 @@
import spack.util.path
import spack.util.environment
import spack.error
import spack.util.spack_yaml as syaml
#: config section for this file
configuration = spack.config.get('modules')
@ -215,6 +217,64 @@ def root_path(name):
return spack.util.path.canonicalize_path(path)
def generate_module_index(root, modules):
entries = syaml.syaml_dict()
for m in modules:
entry = {
'path': m.layout.filename,
'use_name': m.layout.use_name
}
entries[m.spec.dag_hash()] = entry
index = {'module_index': entries}
index_path = os.path.join(root, 'module-index.yaml')
llnl.util.filesystem.mkdirp(root)
with open(index_path, 'w') as index_file:
syaml.dump(index, index_file, default_flow_style=False)
ModuleIndexEntry = collections.namedtuple(
'ModuleIndexEntry', ['path', 'use_name'])
def read_module_index(root):
index_path = os.path.join(root, 'module-index.yaml')
if not os.path.exists(index_path):
return {}
with open(index_path, 'r') as index_file:
yaml_content = syaml.load(index_file)
index = {}
yaml_index = yaml_content['module_index']
for dag_hash, module_properties in yaml_index.items():
index[dag_hash] = ModuleIndexEntry(
module_properties['path'],
module_properties['use_name'])
return index
def read_module_indices():
module_type_to_indices = {}
other_spack_instances = spack.config.get(
'upstreams') or {}
for install_properties in other_spack_instances.values():
module_type_to_root = install_properties.get('modules', {})
for module_type, root in module_type_to_root.items():
indices = module_type_to_indices.setdefault(module_type, [])
indices.append(read_module_index(root))
return module_type_to_indices
module_type_to_indices = read_module_indices()
def upstream_module(spec, module_type):
indices = module_type_to_indices[module_type]
for index in indices:
if spec.dag_hash() in index:
return index[spec.dag_hash()]
class BaseConfiguration(object):
"""Manipulates the information needed to generate a module file to make
querying easier. It needs to be sub-classed for specific module types.

View File

@ -519,6 +519,15 @@ def __init__(self, spec):
super(PackageBase, self).__init__()
@property
def installed_upstream(self):
if not hasattr(self, '_installed_upstream'):
upstream, record = spack.store.db.query_by_spec_hash(
self.spec.dag_hash())
self._installed_upstream = upstream
return self._installed_upstream
def possible_dependencies(
self, transitive=True, expand_virtuals=True, visited=None):
"""Return set of possible dependencies of this package.
@ -1396,6 +1405,14 @@ def do_install(self, **kwargs):
if self.spec.external:
return self._process_external_package(explicit)
if self.installed_upstream:
tty.msg("{0.name} is installed in an upstream Spack instance"
" at {0.prefix}".format(self))
# Note this skips all post-install hooks. In the case of modules
# this is considered correct because we want to retrieve the
# module from the upstream Spack instance.
return
partial = self.check_for_unfinished_installation(keep_prefix, restage)
# Ensure package is not already installed

View File

@ -16,6 +16,7 @@
import spack.schema.modules
import spack.schema.packages
import spack.schema.repos
import spack.schema.upstreams
#: Properties for inclusion in other schemas
@ -25,7 +26,8 @@
spack.schema.mirrors.properties,
spack.schema.modules.properties,
spack.schema.packages.properties,
spack.schema.repos.properties
spack.schema.repos.properties,
spack.schema.upstreams.properties
)

View File

@ -0,0 +1,40 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: Properties for inclusion in other schemas
properties = {
'upstreams': {
'type': 'object',
'default': {},
'patternProperties': {
r'\w[\w-]*': {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'install_tree': {'type': 'string'},
'modules': {
'type': 'object',
'properties': {
'tcl': {'type': 'string'},
'lmod': {'type': 'string'},
'dotkit': {'type': 'string'}
}
}
}
}
}
}
}
#: Full schema with metadata
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack core configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}

View File

@ -1267,7 +1267,12 @@ def cshort_spec(self):
@property
def prefix(self):
if self._prefix is None:
self.prefix = spack.store.layout.path_for_spec(self)
upstream, record = spack.store.db.query_by_spec_hash(
self.dag_hash())
if record and record.path:
self.prefix = record.path
else:
self.prefix = spack.store.layout.path_for_spec(self)
return self._prefix
@prefix.setter
@ -3302,7 +3307,9 @@ def tree(self, **kwargs):
if status_fn:
status = status_fn(node)
if status is None:
if node.package.installed_upstream:
out += colorize("@g{[^]} ", color=color)
elif status is None:
out += colorize("@K{ - } ", color=color) # not installed
elif status:
out += colorize("@g{[+]} ", color=color) # installed

View File

@ -58,7 +58,8 @@ class Store(object):
"""
def __init__(self, root, path_scheme=None, hash_length=None):
self.root = root
self.db = spack.database.Database(root)
self.db = spack.database.Database(
root, upstream_dbs=retrieve_upstream_dbs())
self.layout = spack.directory_layout.YamlDirectoryLayout(
root, hash_len=hash_length, path_scheme=path_scheme)
@ -84,3 +85,27 @@ def _store():
root = llnl.util.lang.LazyReference(lambda: store.root)
db = llnl.util.lang.LazyReference(lambda: store.db)
layout = llnl.util.lang.LazyReference(lambda: store.layout)
def retrieve_upstream_dbs():
other_spack_instances = spack.config.get('upstreams', {})
install_roots = []
for install_properties in other_spack_instances.values():
install_roots.append(install_properties['install_tree'])
return _construct_upstream_dbs_from_install_roots(install_roots)
def _construct_upstream_dbs_from_install_roots(
install_roots, _test=False):
accumulated_upstream_dbs = []
for install_root in reversed(install_roots):
upstream_dbs = list(accumulated_upstream_dbs)
next_db = spack.database.Database(
install_root, is_upstream=True, upstream_dbs=upstream_dbs)
next_db._fail_when_missing_deps = _test
next_db._read()
accumulated_upstream_dbs.insert(0, next_db)
return accumulated_upstream_dbs

View File

@ -11,8 +11,8 @@ def test_print_shell_vars_sh(capsys):
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_root=" in out
assert "_sp_lmod_root=" in out
assert "_sp_tcl_roots=" in out
assert "_sp_lmod_roots=" in out
assert "_sp_module_prefix" not in out
@ -21,8 +21,8 @@ def test_print_shell_vars_csh(capsys):
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_root = " in out
assert "set _sp_lmod_root = " in out
assert "set _sp_tcl_roots = " in out
assert "set _sp_lmod_roots = " in out
assert "set _sp_module_prefix = " not in out
@ -31,8 +31,8 @@ def test_print_shell_vars_sh_modules(capsys):
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_root=" in out
assert "_sp_lmod_root=" in out
assert "_sp_tcl_roots=" in out
assert "_sp_lmod_roots=" in out
assert "_sp_module_prefix=" in out
@ -41,6 +41,6 @@ def test_print_shell_vars_csh_modules(capsys):
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_root = " in out
assert "set _sp_lmod_root = " in out
assert "set _sp_tcl_roots = " in out
assert "set _sp_lmod_roots = " in out
assert "set _sp_module_prefix = " in out

View File

@ -435,6 +435,29 @@ def fake_fn(self):
PackageBase.fetcher = orig_fn
class MockLayout(object):
def __init__(self, root):
self.root = root
def path_for_spec(self, spec):
return '/'.join([self.root, spec.name])
def check_installed(self, spec):
return True
@pytest.fixture()
def gen_mock_layout(tmpdir):
# Generate a MockLayout in a temporary directory. In general the prefixes
# specified by MockLayout should never be written to, but this ensures
# that even if they are, that it causes no harm
def create_layout(root):
subroot = tmpdir.mkdir(root)
return MockLayout(str(subroot))
yield create_layout
@pytest.fixture()
def module_configuration(monkeypatch, request):
"""Reads the module configuration file from the mock ones prepared
@ -758,6 +781,7 @@ def __init__(self, name, dependencies, dependency_types, conditions=None,
self.name = name
self.spec = None
self.dependencies = ordereddict_backport.OrderedDict()
self._installed_upstream = False
assert len(dependencies) == len(dependency_types)
for dep, dtype in zip(dependencies, dependency_types):

View File

@ -17,13 +17,188 @@
import spack.repo
import spack.store
from spack.test.conftest import MockPackageMultiRepo
import spack.database
import spack.spec
from spack.test.conftest import MockPackage, MockPackageMultiRepo
from spack.util.executable import Executable
pytestmark = pytest.mark.db
@pytest.fixture()
def test_store(tmpdir):
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('test_store')))
yield
spack.store.store = real_store
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_db = spack.database.Database(mock_db_root)
# Generate initial DB file to avoid reindex
with open(upstream_db._index_path, 'w') as db_file:
upstream_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_db, upstream_layout, downstream_db, downstream_layout
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_db, upstream_layout, downstream_db, downstream_layout = (
upstream_and_downstream_db)
default = ('build', 'link')
x = MockPackage('x', [], [])
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
w = MockPackage('w', [x, y], [default, default])
mock_repo = MockPackageMultiRepo([w, x, y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_db.add(dep, upstream_layout)
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_db, upstream_layout, downstream_db, downstream_layout = (
upstream_and_downstream_db)
default = ('build', 'link')
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
mock_repo = MockPackageMultiRepo([y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_db.add(spec['z'], upstream_layout)
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_db.remove(new_spec['z'])
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_db, upstream_layout, downstream_db, downstream_layout = (
upstream_and_downstream_db)
x = MockPackage('x', [], [])
mock_repo = MockPackageMultiRepo([x])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_db.add(spec, upstream_layout)
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.usefixtures('config')
def test_recursive_upstream_dbs(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
x = MockPackage('x', [y], [default])
mock_repo = MockPackageMultiRepo([x, y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
dbs = spack.store._construct_upstream_dbs_from_install_roots(
roots, _test=True)
assert dbs[0].db_for_spec_hash(spec.dag_hash()) == dbs[0]
assert dbs[0].db_for_spec_hash(spec['y'].dag_hash()) == dbs[1]
assert dbs[0].db_for_spec_hash(spec['z'].dag_hash()) == dbs[2]
dbs[0]._check_ref_counts()
dbs[1]._check_ref_counts()
dbs[2]._check_ref_counts()
assert (dbs[0].installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (dbs[0].installed_relatives(spec['z'], direction='parents') ==
set([spec, spec['y']]))
assert not dbs[2].installed_relatives(spec['z'], direction='parents')
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This

View File

@ -22,7 +22,11 @@
@pytest.fixture()
def layout_and_dir(tmpdir):
"""Returns a directory layout and the corresponding directory."""
yield YamlDirectoryLayout(str(tmpdir)), str(tmpdir)
layout = YamlDirectoryLayout(str(tmpdir))
old_layout = spack.store.layout
spack.store.layout = layout
yield layout, str(tmpdir)
spack.store.layout = old_layout
def test_yaml_directory_layout_parameters(

View File

@ -125,6 +125,77 @@ def test_installed_dependency_request_conflicts(
dependent.concretize()
def test_installed_upstream_external(
tmpdir_factory, install_mockery, mock_fetch, gen_mock_layout):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
dependency = spack.spec.Spec('externaltool')
dependency.concretize()
prepared_db.add(dependency, upstream_layout)
try:
original_db = spack.store.db
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
spack.store.db = spack.database.Database(
downstream_db_root, upstream_dbs=[prepared_db])
dependent = spack.spec.Spec('externaltest')
dependent.concretize()
new_dependency = dependent['externaltool']
assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
finally:
spack.store.db = original_db
def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch,
gen_mock_layout):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
dependency = spack.spec.Spec('dependency-install')
dependency.concretize()
prepared_db.add(dependency, upstream_layout)
try:
original_db = spack.store.db
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
spack.store.db = spack.database.Database(
downstream_db_root, upstream_dbs=[prepared_db])
dependent = spack.spec.Spec('dependent-install')
dependent.concretize()
new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream
assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency))
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
finally:
spack.store.db = original_db
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()

View File

@ -192,6 +192,23 @@ def test_conflicts(self, modulefile_content, module_configuration):
with pytest.raises(SystemExit):
modulefile_content('mpileaks')
def test_module_index(
self, module_configuration, factory, tmpdir_factory):
module_configuration('suffix')
w1, s1 = factory('mpileaks')
w2, s2 = factory('callpath')
test_root = str(tmpdir_factory.mktemp('module-root'))
spack.modules.common.generate_module_index(test_root, [w1, w2])
index = spack.modules.common.read_module_index(test_root)
assert index[s1.dag_hash()].use_name == w1.layout.use_name
assert index[s2.dag_hash()].path == w2.layout.filename
def test_suffixes(self, module_configuration, factory):
"""Tests adding suffixes to module file name."""
module_configuration('suffix')

View File

@ -25,8 +25,15 @@ if ($?SPACK_ROOT) then
eval `spack --print-shell-vars csh`
# Set up modules and dotkit search paths in the user environment
_spack_pathadd DK_NODE "$_sp_dotkit_root/$_sp_sys_type"
_spack_pathadd MODULEPATH "$_sp_tcl_root/$_sp_sys_type"
set tcl_roots = `echo $_sp_tcl_roots:q | sed 's/:/ /g'`
foreach tcl_root ($tcl_roots:q)
_spack_pathadd MODULEPATH "$tcl_root/$_sp_sys_type"
end
set dotkit_roots = `echo $_sp_dotkit_roots:q | sed 's/:/ /g'`
foreach dotkit_root ($dotkit_roots)
_spack_pathadd DK_NODE "$dotkit_root/$_sp_sys_type"
end
else
echo "ERROR: Sourcing spack setup-env.csh requires setting SPACK_ROOT to "
echo " the root of your spack installation."

View File

@ -290,8 +290,17 @@ fi;
#
# set module system roots
#
_spack_pathadd DK_NODE "${_sp_dotkit_root%/}/$_sp_sys_type"
_spack_pathadd MODULEPATH "${_sp_tcl_root%/}/$_sp_sys_type"
_sp_multi_pathadd() {
local IFS=':'
if [[ -n "${ZSH_VERSION:-}" ]]; then
setopt sh_word_split
fi
for pth in "$2"; do
_spack_pathadd "$1" "${pth}/${_sp_sys_type}"
done
}
_sp_multi_pathadd MODULEPATH "$_sp_tcl_roots"
_sp_multi_pathadd DK_NODE "$_sp_dotkit_roots"
# Add programmable tab completion for Bash
#

View File

@ -16,4 +16,4 @@ class Externaltest(Package):
depends_on('externaltool')
def install(self, spec, prefix):
pass
touch(join_path(prefix, 'an_installation_file'))