Merge pull request #1015 from LLNL/features/faster-virtuals
Faster virtuals and concretization
This commit is contained in:
commit
a095fd517f
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,5 +1,7 @@
|
|||||||
/var/spack/stage
|
/var/spack/stage
|
||||||
/var/spack/cache
|
/var/spack/cache
|
||||||
|
/var/spack/repos/*/index.yaml
|
||||||
|
/var/spack/repos/*/lock
|
||||||
*.pyc
|
*.pyc
|
||||||
/opt
|
/opt
|
||||||
*~
|
*~
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
import time
|
import time
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
|
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
|
||||||
|
'LockError']
|
||||||
|
|
||||||
# Default timeout in seconds, after which locks will raise exceptions.
|
# Default timeout in seconds, after which locks will raise exceptions.
|
||||||
_default_timeout = 60
|
_default_timeout = 60
|
||||||
|
|
||||||
@ -36,13 +39,20 @@
|
|||||||
|
|
||||||
|
|
||||||
class Lock(object):
|
class Lock(object):
|
||||||
|
"""This is an implementation of a filesystem lock using Python's lockf.
|
||||||
|
|
||||||
|
In Python, `lockf` actually calls `fcntl`, so this should work with any
|
||||||
|
filesystem implementation that supports locking through the fcntl calls.
|
||||||
|
This includes distributed filesystems like Lustre (when flock is enabled)
|
||||||
|
and recent NFS versions.
|
||||||
|
|
||||||
|
"""
|
||||||
def __init__(self, file_path):
|
def __init__(self, file_path):
|
||||||
self._file_path = file_path
|
self._file_path = file_path
|
||||||
self._fd = None
|
self._fd = None
|
||||||
self._reads = 0
|
self._reads = 0
|
||||||
self._writes = 0
|
self._writes = 0
|
||||||
|
|
||||||
|
|
||||||
def _lock(self, op, timeout):
|
def _lock(self, op, timeout):
|
||||||
"""This takes a lock using POSIX locks (``fnctl.lockf``).
|
"""This takes a lock using POSIX locks (``fnctl.lockf``).
|
||||||
|
|
||||||
@ -63,7 +73,9 @@ def _lock(self, op, timeout):
|
|||||||
|
|
||||||
fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
|
fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
|
||||||
if op == fcntl.LOCK_EX:
|
if op == fcntl.LOCK_EX:
|
||||||
os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
|
os.write(
|
||||||
|
self._fd,
|
||||||
|
"pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
|
||||||
return
|
return
|
||||||
|
|
||||||
except IOError as error:
|
except IOError as error:
|
||||||
@ -75,7 +87,6 @@ def _lock(self, op, timeout):
|
|||||||
|
|
||||||
raise LockError("Timed out waiting for lock.")
|
raise LockError("Timed out waiting for lock.")
|
||||||
|
|
||||||
|
|
||||||
def _unlock(self):
|
def _unlock(self):
|
||||||
"""Releases a lock using POSIX locks (``fcntl.lockf``)
|
"""Releases a lock using POSIX locks (``fcntl.lockf``)
|
||||||
|
|
||||||
@ -87,7 +98,6 @@ def _unlock(self):
|
|||||||
os.close(self._fd)
|
os.close(self._fd)
|
||||||
self._fd = None
|
self._fd = None
|
||||||
|
|
||||||
|
|
||||||
def acquire_read(self, timeout=_default_timeout):
|
def acquire_read(self, timeout=_default_timeout):
|
||||||
"""Acquires a recursive, shared lock for reading.
|
"""Acquires a recursive, shared lock for reading.
|
||||||
|
|
||||||
@ -107,7 +117,6 @@ def acquire_read(self, timeout=_default_timeout):
|
|||||||
self._reads += 1
|
self._reads += 1
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def acquire_write(self, timeout=_default_timeout):
|
def acquire_write(self, timeout=_default_timeout):
|
||||||
"""Acquires a recursive, exclusive lock for writing.
|
"""Acquires a recursive, exclusive lock for writing.
|
||||||
|
|
||||||
@ -127,7 +136,6 @@ def acquire_write(self, timeout=_default_timeout):
|
|||||||
self._writes += 1
|
self._writes += 1
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def release_read(self):
|
def release_read(self):
|
||||||
"""Releases a read lock.
|
"""Releases a read lock.
|
||||||
|
|
||||||
@ -148,7 +156,6 @@ def release_read(self):
|
|||||||
self._reads -= 1
|
self._reads -= 1
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def release_write(self):
|
def release_write(self):
|
||||||
"""Releases a write lock.
|
"""Releases a write lock.
|
||||||
|
|
||||||
@ -170,6 +177,68 @@ def release_write(self):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class LockTransaction(object):
|
||||||
|
"""Simple nested transaction context manager that uses a file lock.
|
||||||
|
|
||||||
|
This class can trigger actions when the lock is acquired for the
|
||||||
|
first time and released for the last.
|
||||||
|
|
||||||
|
If the acquire_fn returns a value, it is used as the return value for
|
||||||
|
__enter__, allowing it to be passed as the `as` argument of a `with`
|
||||||
|
statement.
|
||||||
|
|
||||||
|
If acquire_fn returns a context manager, *its* `__enter__` function will be
|
||||||
|
called in `__enter__` after acquire_fn, and its `__exit__` funciton will be
|
||||||
|
called before `release_fn` in `__exit__`, allowing you to nest a context
|
||||||
|
manager to be used along with the lock.
|
||||||
|
|
||||||
|
Timeout for lock is customizable.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, lock, acquire_fn=None, release_fn=None,
|
||||||
|
timeout=_default_timeout):
|
||||||
|
self._lock = lock
|
||||||
|
self._timeout = timeout
|
||||||
|
self._acquire_fn = acquire_fn
|
||||||
|
self._release_fn = release_fn
|
||||||
|
self._as = None
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if self._enter() and self._acquire_fn:
|
||||||
|
self._as = self._acquire_fn()
|
||||||
|
if hasattr(self._as, '__enter__'):
|
||||||
|
return self._as.__enter__()
|
||||||
|
else:
|
||||||
|
return self._as
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
suppress = False
|
||||||
|
if self._exit():
|
||||||
|
if self._as and hasattr(self._as, '__exit__'):
|
||||||
|
if self._as.__exit__(type, value, traceback):
|
||||||
|
suppress = True
|
||||||
|
if self._release_fn:
|
||||||
|
if self._release_fn(type, value, traceback):
|
||||||
|
suppress = True
|
||||||
|
return suppress
|
||||||
|
|
||||||
|
|
||||||
|
class ReadTransaction(LockTransaction):
|
||||||
|
def _enter(self):
|
||||||
|
return self._lock.acquire_read(self._timeout)
|
||||||
|
|
||||||
|
def _exit(self):
|
||||||
|
return self._lock.release_read()
|
||||||
|
|
||||||
|
|
||||||
|
class WriteTransaction(LockTransaction):
|
||||||
|
def _enter(self):
|
||||||
|
return self._lock.acquire_write(self._timeout)
|
||||||
|
|
||||||
|
def _exit(self):
|
||||||
|
return self._lock.release_write()
|
||||||
|
|
||||||
|
|
||||||
class LockError(Exception):
|
class LockError(Exception):
|
||||||
"""Raised when an attempt to acquire a lock times out."""
|
"""Raised when an attempt to acquire a lock times out."""
|
||||||
pass
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# flake8: noqa
|
||||||
##############################################################################
|
##############################################################################
|
||||||
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
|
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
|
||||||
# Produced at the Lawrence Livermore National Laboratory.
|
# Produced at the Lawrence Livermore National Laboratory.
|
||||||
@ -50,8 +51,15 @@
|
|||||||
share_path = join_path(spack_root, "share", "spack")
|
share_path = join_path(spack_root, "share", "spack")
|
||||||
cache_path = join_path(var_path, "cache")
|
cache_path = join_path(var_path, "cache")
|
||||||
|
|
||||||
|
# User configuration location
|
||||||
|
user_config_path = os.path.expanduser('~/.spack')
|
||||||
|
|
||||||
import spack.fetch_strategy
|
import spack.fetch_strategy
|
||||||
cache = spack.fetch_strategy.FsCache(cache_path)
|
fetch_cache = spack.fetch_strategy.FsCache(cache_path)
|
||||||
|
|
||||||
|
from spack.file_cache import FileCache
|
||||||
|
user_cache_path = join_path(user_config_path, 'cache')
|
||||||
|
user_cache = FileCache(user_cache_path)
|
||||||
|
|
||||||
prefix = spack_root
|
prefix = spack_root
|
||||||
opt_path = join_path(prefix, "opt")
|
opt_path = join_path(prefix, "opt")
|
||||||
@ -140,7 +148,7 @@
|
|||||||
_tmp_candidates = (_default_tmp, '/nfs/tmp2', '/tmp', '/var/tmp')
|
_tmp_candidates = (_default_tmp, '/nfs/tmp2', '/tmp', '/var/tmp')
|
||||||
for path in _tmp_candidates:
|
for path in _tmp_candidates:
|
||||||
# don't add a second username if it's already unique by user.
|
# don't add a second username if it's already unique by user.
|
||||||
if not _tmp_user in path:
|
if _tmp_user not in path:
|
||||||
tmp_dirs.append(join_path(path, '%u', 'spack-stage'))
|
tmp_dirs.append(join_path(path, '%u', 'spack-stage'))
|
||||||
else:
|
else:
|
||||||
tmp_dirs.append(join_path(path, 'spack-stage'))
|
tmp_dirs.append(join_path(path, 'spack-stage'))
|
||||||
@ -172,11 +180,12 @@
|
|||||||
# Spack internal code should call 'import spack' and accesses other
|
# Spack internal code should call 'import spack' and accesses other
|
||||||
# variables (spack.repo, paths, etc.) directly.
|
# variables (spack.repo, paths, etc.) directly.
|
||||||
#
|
#
|
||||||
# TODO: maybe this should be separated out and should go in build_environment.py?
|
# TODO: maybe this should be separated out to build_environment.py?
|
||||||
# TODO: it's not clear where all the stuff that needs to be included in packages
|
# TODO: it's not clear where all the stuff that needs to be included in
|
||||||
# should live. This file is overloaded for spack core vs. for packages.
|
# packages should live. This file is overloaded for spack core vs.
|
||||||
|
# for packages.
|
||||||
#
|
#
|
||||||
__all__ = ['Package', 'StagedPackage', 'CMakePackage', \
|
__all__ = ['Package', 'StagedPackage', 'CMakePackage',
|
||||||
'Version', 'when', 'ver', 'alldeps', 'nolink']
|
'Version', 'when', 'ver', 'alldeps', 'nolink']
|
||||||
from spack.package import Package, ExtensionConflictError
|
from spack.package import Package, ExtensionConflictError
|
||||||
from spack.package import StagedPackage, CMakePackage
|
from spack.package import StagedPackage, CMakePackage
|
||||||
@ -197,8 +206,8 @@
|
|||||||
__all__ += spack.util.executable.__all__
|
__all__ += spack.util.executable.__all__
|
||||||
|
|
||||||
from spack.package import \
|
from spack.package import \
|
||||||
install_dependency_symlinks, flatten_dependencies, DependencyConflictError, \
|
install_dependency_symlinks, flatten_dependencies, \
|
||||||
InstallError, ExternalPackageError
|
DependencyConflictError, InstallError, ExternalPackageError
|
||||||
__all__ += [
|
__all__ += [
|
||||||
'install_dependency_symlinks', 'flatten_dependencies', 'DependencyConflictError',
|
'install_dependency_symlinks', 'flatten_dependencies',
|
||||||
'InstallError', 'ExternalPackageError']
|
'DependencyConflictError', 'InstallError', 'ExternalPackageError']
|
||||||
|
@ -383,6 +383,13 @@ def __str__(self):
|
|||||||
def __contains__(self, string):
|
def __contains__(self, string):
|
||||||
return string in str(self)
|
return string in str(self)
|
||||||
|
|
||||||
|
# TODO: make this unnecessary: don't include an empty arch on *every* spec.
|
||||||
|
def __nonzero__(self):
|
||||||
|
return (self.platform is not None or
|
||||||
|
self.platform_os is not None or
|
||||||
|
self.target is not None)
|
||||||
|
__bool__ = __nonzero__
|
||||||
|
|
||||||
def _cmp_key(self):
|
def _cmp_key(self):
|
||||||
if isinstance(self.platform, Platform):
|
if isinstance(self.platform, Platform):
|
||||||
platform = self.platform.name
|
platform = self.platform.name
|
||||||
|
@ -33,7 +33,11 @@ def setup_parser(subparser):
|
|||||||
'-s', '--stage', action='store_true', default=True,
|
'-s', '--stage', action='store_true', default=True,
|
||||||
help="Remove all temporary build stages (default).")
|
help="Remove all temporary build stages (default).")
|
||||||
subparser.add_argument(
|
subparser.add_argument(
|
||||||
'-c', '--cache', action='store_true', help="Remove cached downloads.")
|
'-d', '--downloads', action='store_true',
|
||||||
|
help="Remove cached downloads.")
|
||||||
|
subparser.add_argument(
|
||||||
|
'-u', '--user-cache', action='store_true',
|
||||||
|
help="Remove caches in user home directory. Includes virtual indices.")
|
||||||
subparser.add_argument(
|
subparser.add_argument(
|
||||||
'-a', '--all', action='store_true',
|
'-a', '--all', action='store_true',
|
||||||
help="Remove all of the above.")
|
help="Remove all of the above.")
|
||||||
@ -41,12 +45,14 @@ def setup_parser(subparser):
|
|||||||
|
|
||||||
def purge(parser, args):
|
def purge(parser, args):
|
||||||
# Special case: no flags.
|
# Special case: no flags.
|
||||||
if not any((args.stage, args.cache, args.all)):
|
if not any((args.stage, args.downloads, args.user_cache, args.all)):
|
||||||
stage.purge()
|
stage.purge()
|
||||||
return
|
return
|
||||||
|
|
||||||
# handle other flags with fall through.
|
# handle other flags with fall through.
|
||||||
if args.stage or args.all:
|
if args.stage or args.all:
|
||||||
stage.purge()
|
stage.purge()
|
||||||
if args.cache or args.all:
|
if args.downloads or args.all:
|
||||||
spack.cache.destroy()
|
spack.fetch_cache.destroy()
|
||||||
|
if args.user_cache or args.all:
|
||||||
|
spack.user_cache.destroy()
|
||||||
|
@ -23,11 +23,9 @@
|
|||||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
##############################################################################
|
##############################################################################
|
||||||
import os
|
import os
|
||||||
from pprint import pprint
|
|
||||||
|
|
||||||
from llnl.util.filesystem import join_path, mkdirp
|
from llnl.util.filesystem import join_path, mkdirp
|
||||||
from llnl.util.tty.colify import colify
|
from llnl.util.tty.colify import colify
|
||||||
from llnl.util.lang import list_modules
|
|
||||||
|
|
||||||
import spack
|
import spack
|
||||||
import spack.test
|
import spack.test
|
||||||
@ -35,11 +33,13 @@
|
|||||||
|
|
||||||
description = "Run unit tests"
|
description = "Run unit tests"
|
||||||
|
|
||||||
|
|
||||||
def setup_parser(subparser):
|
def setup_parser(subparser):
|
||||||
subparser.add_argument(
|
subparser.add_argument(
|
||||||
'names', nargs='*', help="Names of tests to run.")
|
'names', nargs='*', help="Names of tests to run.")
|
||||||
subparser.add_argument(
|
subparser.add_argument(
|
||||||
'-l', '--list', action='store_true', dest='list', help="Show available tests")
|
'-l', '--list', action='store_true', dest='list',
|
||||||
|
help="Show available tests")
|
||||||
subparser.add_argument(
|
subparser.add_argument(
|
||||||
'--createXmlOutput', action='store_true', dest='createXmlOutput',
|
'--createXmlOutput', action='store_true', dest='createXmlOutput',
|
||||||
help="Create JUnit XML from test results")
|
help="Create JUnit XML from test results")
|
||||||
@ -69,6 +69,7 @@ def fetch(self):
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "[mock fetcher]"
|
return "[mock fetcher]"
|
||||||
|
|
||||||
|
|
||||||
def test(parser, args):
|
def test(parser, args):
|
||||||
if args.list:
|
if args.list:
|
||||||
print "Available tests:"
|
print "Available tests:"
|
||||||
@ -85,5 +86,5 @@ def test(parser, args):
|
|||||||
|
|
||||||
if not os.path.exists(outputDir):
|
if not os.path.exists(outputDir):
|
||||||
mkdirp(outputDir)
|
mkdirp(outputDir)
|
||||||
spack.cache = MockCache()
|
spack.fetch_cache = MockCache()
|
||||||
spack.test.run(args.names, outputDir, args.verbose)
|
spack.test.run(args.names, outputDir, args.verbose)
|
||||||
|
@ -184,7 +184,8 @@ def uninstall(parser, args):
|
|||||||
uninstall_list = list(set(uninstall_list))
|
uninstall_list = list(set(uninstall_list))
|
||||||
|
|
||||||
if has_error:
|
if has_error:
|
||||||
tty.die('You can use spack uninstall --dependents to uninstall these dependencies as well') # NOQA: ignore=E501
|
tty.die('You can use spack uninstall --dependents '
|
||||||
|
'to uninstall these dependencies as well')
|
||||||
|
|
||||||
if not args.yes_to_all:
|
if not args.yes_to_all:
|
||||||
tty.msg("The following packages will be uninstalled : ")
|
tty.msg("The following packages will be uninstalled : ")
|
||||||
|
@ -525,7 +525,7 @@ def clear(self):
|
|||||||
ConfigScope('site', os.path.join(spack.etc_path, 'spack'))
|
ConfigScope('site', os.path.join(spack.etc_path, 'spack'))
|
||||||
|
|
||||||
"""User configuration can override both spack defaults and site config."""
|
"""User configuration can override both spack defaults and site config."""
|
||||||
ConfigScope('user', os.path.expanduser('~/.spack'))
|
ConfigScope('user', spack.user_config_path)
|
||||||
|
|
||||||
|
|
||||||
def highest_precedence_scope():
|
def highest_precedence_scope():
|
||||||
|
@ -165,11 +165,11 @@ def __init__(self, root, db_dir=None):
|
|||||||
|
|
||||||
def write_transaction(self, timeout=_db_lock_timeout):
|
def write_transaction(self, timeout=_db_lock_timeout):
|
||||||
"""Get a write lock context manager for use in a `with` block."""
|
"""Get a write lock context manager for use in a `with` block."""
|
||||||
return WriteTransaction(self, self._read, self._write, timeout)
|
return WriteTransaction(self.lock, self._read, self._write, timeout)
|
||||||
|
|
||||||
def read_transaction(self, timeout=_db_lock_timeout):
|
def read_transaction(self, timeout=_db_lock_timeout):
|
||||||
"""Get a read lock context manager for use in a `with` block."""
|
"""Get a read lock context manager for use in a `with` block."""
|
||||||
return ReadTransaction(self, self._read, None, timeout)
|
return ReadTransaction(self.lock, self._read, timeout=timeout)
|
||||||
|
|
||||||
def _write_to_yaml(self, stream):
|
def _write_to_yaml(self, stream):
|
||||||
"""Write out the databsae to a YAML file.
|
"""Write out the databsae to a YAML file.
|
||||||
@ -352,12 +352,22 @@ def _check_ref_counts(self):
|
|||||||
"Invalid ref_count: %s: %d (expected %d), in DB %s" %
|
"Invalid ref_count: %s: %d (expected %d), in DB %s" %
|
||||||
(key, found, expected, self._index_path))
|
(key, found, expected, self._index_path))
|
||||||
|
|
||||||
def _write(self):
|
def _write(self, type, value, traceback):
|
||||||
"""Write the in-memory database index to its file path.
|
"""Write the in-memory database index to its file path.
|
||||||
|
|
||||||
Does no locking.
|
This is a helper function called by the WriteTransaction context
|
||||||
|
manager. If there is an exception while the write lock is active,
|
||||||
|
nothing will be written to the database file, but the in-memory
|
||||||
|
database *may* be left in an inconsistent state. It will be consistent
|
||||||
|
after the start of the next transaction, when it read from disk again.
|
||||||
|
|
||||||
|
This routine does no locking.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
# Do not write if exceptions were raised
|
||||||
|
if type is not None:
|
||||||
|
return
|
||||||
|
|
||||||
temp_file = self._index_path + (
|
temp_file = self._index_path + (
|
||||||
'.%s.%s.temp' % (socket.getfqdn(), os.getpid()))
|
'.%s.%s.temp' % (socket.getfqdn(), os.getpid()))
|
||||||
|
|
||||||
@ -589,49 +599,6 @@ def missing(self, spec):
|
|||||||
return key in self._data and not self._data[key].installed
|
return key in self._data and not self._data[key].installed
|
||||||
|
|
||||||
|
|
||||||
class _Transaction(object):
|
|
||||||
"""Simple nested transaction context manager that uses a file lock.
|
|
||||||
|
|
||||||
This class can trigger actions when the lock is acquired for the
|
|
||||||
first time and released for the last.
|
|
||||||
|
|
||||||
Timeout for lock is customizable.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, db,
|
|
||||||
acquire_fn=None,
|
|
||||||
release_fn=None,
|
|
||||||
timeout=_db_lock_timeout):
|
|
||||||
self._db = db
|
|
||||||
self._timeout = timeout
|
|
||||||
self._acquire_fn = acquire_fn
|
|
||||||
self._release_fn = release_fn
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
if self._enter() and self._acquire_fn:
|
|
||||||
self._acquire_fn()
|
|
||||||
|
|
||||||
def __exit__(self, type, value, traceback):
|
|
||||||
if self._exit() and self._release_fn:
|
|
||||||
self._release_fn()
|
|
||||||
|
|
||||||
|
|
||||||
class ReadTransaction(_Transaction):
|
|
||||||
def _enter(self):
|
|
||||||
return self._db.lock.acquire_read(self._timeout)
|
|
||||||
|
|
||||||
def _exit(self):
|
|
||||||
return self._db.lock.release_read()
|
|
||||||
|
|
||||||
|
|
||||||
class WriteTransaction(_Transaction):
|
|
||||||
def _enter(self):
|
|
||||||
return self._db.lock.acquire_write(self._timeout)
|
|
||||||
|
|
||||||
def _exit(self):
|
|
||||||
return self._db.lock.release_write()
|
|
||||||
|
|
||||||
|
|
||||||
class CorruptDatabaseError(SpackError):
|
class CorruptDatabaseError(SpackError):
|
||||||
def __init__(self, path, msg=''):
|
def __init__(self, path, msg=''):
|
||||||
super(CorruptDatabaseError, self).__init__(
|
super(CorruptDatabaseError, self).__init__(
|
||||||
|
183
lib/spack/spack/file_cache.py
Normal file
183
lib/spack/spack/file_cache.py
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
##############################################################################
|
||||||
|
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
|
||||||
|
# Produced at the Lawrence Livermore National Laboratory.
|
||||||
|
#
|
||||||
|
# This file is part of Spack.
|
||||||
|
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||||
|
# LLNL-CODE-647188
|
||||||
|
#
|
||||||
|
# For details, see https://github.com/llnl/spack
|
||||||
|
# Please also see the LICENSE file for our notice and the LGPL.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License (as
|
||||||
|
# published by the Free Software Foundation) version 2.1, February 1999.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful, but
|
||||||
|
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||||
|
# conditions of the GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public
|
||||||
|
# License along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
##############################################################################
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from llnl.util.filesystem import *
|
||||||
|
from llnl.util.lock import *
|
||||||
|
|
||||||
|
from spack.error import SpackError
|
||||||
|
|
||||||
|
|
||||||
|
class FileCache(object):
|
||||||
|
"""This class manages cached data in the filesystem.
|
||||||
|
|
||||||
|
- Cache files are fetched and stored by unique keys. Keys can be relative
|
||||||
|
paths, so that thre can be some hierarchy in the cache.
|
||||||
|
|
||||||
|
- The FileCache handles locking cache files for reading and writing, so
|
||||||
|
client code need not manage locks for cache entries.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, root):
|
||||||
|
"""Create a file cache object.
|
||||||
|
|
||||||
|
This will create the cache directory if it does not exist yet.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.root = root.rstrip(os.path.sep)
|
||||||
|
if not os.path.exists(self.root):
|
||||||
|
mkdirp(self.root)
|
||||||
|
|
||||||
|
self._locks = {}
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
"""Remove all files under the cache root."""
|
||||||
|
for f in os.listdir(self.root):
|
||||||
|
path = join_path(self.root, f)
|
||||||
|
if os.path.isdir(path):
|
||||||
|
shutil.rmtree(path, True)
|
||||||
|
else:
|
||||||
|
os.remove(path)
|
||||||
|
|
||||||
|
def cache_path(self, key):
|
||||||
|
"""Path to the file in the cache for a particular key."""
|
||||||
|
return join_path(self.root, key)
|
||||||
|
|
||||||
|
def _lock_path(self, key):
|
||||||
|
"""Path to the file in the cache for a particular key."""
|
||||||
|
keyfile = os.path.basename(key)
|
||||||
|
keydir = os.path.dirname(key)
|
||||||
|
|
||||||
|
return join_path(self.root, keydir, '.' + keyfile + '.lock')
|
||||||
|
|
||||||
|
def _get_lock(self, key):
|
||||||
|
"""Create a lock for a key, if necessary, and return a lock object."""
|
||||||
|
if key not in self._locks:
|
||||||
|
lock_file = self._lock_path(key)
|
||||||
|
if not os.path.exists(lock_file):
|
||||||
|
touch(lock_file)
|
||||||
|
self._locks[key] = Lock(lock_file)
|
||||||
|
return self._locks[key]
|
||||||
|
|
||||||
|
def init_entry(self, key):
|
||||||
|
"""Ensure we can access a cache file. Create a lock for it if needed.
|
||||||
|
|
||||||
|
Return whether the cache file exists yet or not.
|
||||||
|
"""
|
||||||
|
cache_path = self.cache_path(key)
|
||||||
|
|
||||||
|
exists = os.path.exists(cache_path)
|
||||||
|
if exists:
|
||||||
|
if not os.path.isfile(cache_path):
|
||||||
|
raise CacheError("Cache file is not a file: %s" % cache_path)
|
||||||
|
|
||||||
|
if not os.access(cache_path, os.R_OK | os.W_OK):
|
||||||
|
raise CacheError("Cannot access cache file: %s" % cache_path)
|
||||||
|
else:
|
||||||
|
# if the file is hierarchical, make parent directories
|
||||||
|
parent = os.path.dirname(cache_path)
|
||||||
|
if parent.rstrip(os.path.sep) != self.root:
|
||||||
|
mkdirp(parent)
|
||||||
|
|
||||||
|
if not os.access(parent, os.R_OK | os.W_OK):
|
||||||
|
raise CacheError("Cannot access cache directory: %s" % parent)
|
||||||
|
|
||||||
|
# ensure lock is created for this key
|
||||||
|
self._get_lock(key)
|
||||||
|
return exists
|
||||||
|
|
||||||
|
def read_transaction(self, key):
|
||||||
|
"""Get a read transaction on a file cache item.
|
||||||
|
|
||||||
|
Returns a ReadTransaction context manager and opens the cache file for
|
||||||
|
reading. You can use it like this:
|
||||||
|
|
||||||
|
with spack.user_cache.read_transaction(key) as cache_file:
|
||||||
|
cache_file.read()
|
||||||
|
|
||||||
|
"""
|
||||||
|
return ReadTransaction(
|
||||||
|
self._get_lock(key), lambda: open(self.cache_path(key)))
|
||||||
|
|
||||||
|
def write_transaction(self, key):
|
||||||
|
"""Get a write transaction on a file cache item.
|
||||||
|
|
||||||
|
Returns a WriteTransaction context manager that opens a temporary file
|
||||||
|
for writing. Once the context manager finishes, if nothing went wrong,
|
||||||
|
moves the file into place on top of the old file atomically.
|
||||||
|
|
||||||
|
"""
|
||||||
|
class WriteContextManager(object):
|
||||||
|
def __enter__(cm):
|
||||||
|
cm.orig_filename = self.cache_path(key)
|
||||||
|
cm.orig_file = None
|
||||||
|
if os.path.exists(cm.orig_filename):
|
||||||
|
cm.orig_file = open(cm.orig_filename, 'r')
|
||||||
|
|
||||||
|
cm.tmp_filename = self.cache_path(key) + '.tmp'
|
||||||
|
cm.tmp_file = open(cm.tmp_filename, 'w')
|
||||||
|
|
||||||
|
return cm.orig_file, cm.tmp_file
|
||||||
|
|
||||||
|
def __exit__(cm, type, value, traceback):
|
||||||
|
if cm.orig_file:
|
||||||
|
cm.orig_file.close()
|
||||||
|
cm.tmp_file.close()
|
||||||
|
|
||||||
|
if value:
|
||||||
|
# remove tmp on exception & raise it
|
||||||
|
shutil.rmtree(cm.tmp_filename, True)
|
||||||
|
raise value
|
||||||
|
else:
|
||||||
|
os.rename(cm.tmp_filename, cm.orig_filename)
|
||||||
|
|
||||||
|
return WriteTransaction(self._get_lock(key), WriteContextManager)
|
||||||
|
|
||||||
|
def mtime(self, key):
|
||||||
|
"""Return modification time of cache file, or 0 if it does not exist.
|
||||||
|
|
||||||
|
Time is in units returned by os.stat in the mtime field, which is
|
||||||
|
platform-dependent.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not self.init_entry(key):
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
sinfo = os.stat(self.cache_path(key))
|
||||||
|
return sinfo.st_mtime
|
||||||
|
|
||||||
|
def remove(self, key):
|
||||||
|
lock = self._get_lock(key)
|
||||||
|
try:
|
||||||
|
lock.acquire_write()
|
||||||
|
os.unlink(self.cache_path(key))
|
||||||
|
finally:
|
||||||
|
lock.release_write()
|
||||||
|
os.unlink(self._lock_path(key))
|
||||||
|
|
||||||
|
|
||||||
|
class CacheError(SpackError):
|
||||||
|
pass
|
@ -520,7 +520,8 @@ def header(self):
|
|||||||
|
|
||||||
def prerequisite(self, spec):
|
def prerequisite(self, spec):
|
||||||
tty.warn('prerequisites: not supported by dotkit module files')
|
tty.warn('prerequisites: not supported by dotkit module files')
|
||||||
tty.warn('\tYou may want to check ~/.spack/modules.yaml')
|
tty.warn('\tYou may want to check %s/modules.yaml'
|
||||||
|
% spack.user_config_path)
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
|
@ -630,50 +630,12 @@ def activated(self):
|
|||||||
exts = spack.install_layout.extension_map(self.extendee_spec)
|
exts = spack.install_layout.extension_map(self.extendee_spec)
|
||||||
return (self.name in exts) and (exts[self.name] == self.spec)
|
return (self.name in exts) and (exts[self.name] == self.spec)
|
||||||
|
|
||||||
def preorder_traversal(self, visited=None, **kwargs):
|
|
||||||
"""This does a preorder traversal of the package's dependence DAG."""
|
|
||||||
virtual = kwargs.get("virtual", False)
|
|
||||||
|
|
||||||
if visited is None:
|
|
||||||
visited = set()
|
|
||||||
|
|
||||||
if self.name in visited:
|
|
||||||
return
|
|
||||||
visited.add(self.name)
|
|
||||||
|
|
||||||
if not virtual:
|
|
||||||
yield self
|
|
||||||
|
|
||||||
for name in sorted(self.dependencies.keys()):
|
|
||||||
dep_spec = self.get_dependency(name)
|
|
||||||
spec = dep_spec.spec
|
|
||||||
|
|
||||||
# Currently, we do not descend into virtual dependencies, as this
|
|
||||||
# makes doing a sensible traversal much harder. We just assume
|
|
||||||
# that ANY of the virtual deps will work, which might not be true
|
|
||||||
# (due to conflicts or unsatisfiable specs). For now this is ok,
|
|
||||||
# but we might want to reinvestigate if we start using a lot of
|
|
||||||
# complicated virtual dependencies
|
|
||||||
# TODO: reinvestigate this.
|
|
||||||
if spec.virtual:
|
|
||||||
if virtual:
|
|
||||||
yield spec
|
|
||||||
continue
|
|
||||||
|
|
||||||
for pkg in spack.repo.get(name).preorder_traversal(visited,
|
|
||||||
**kwargs):
|
|
||||||
yield pkg
|
|
||||||
|
|
||||||
def provides(self, vpkg_name):
|
def provides(self, vpkg_name):
|
||||||
"""
|
"""
|
||||||
True if this package provides a virtual package with the specified name
|
True if this package provides a virtual package with the specified name
|
||||||
"""
|
"""
|
||||||
return any(s.name == vpkg_name for s in self.provided)
|
return any(s.name == vpkg_name for s in self.provided)
|
||||||
|
|
||||||
def virtual_dependencies(self, visited=None):
|
|
||||||
for spec in sorted(set(self.preorder_traversal(virtual=True))):
|
|
||||||
yield spec
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def installed(self):
|
def installed(self):
|
||||||
return os.path.isdir(self.prefix)
|
return os.path.isdir(self.prefix)
|
||||||
@ -1236,6 +1198,14 @@ def install(self, spec, prefix):
|
|||||||
|
|
||||||
def do_uninstall(self, force=False):
|
def do_uninstall(self, force=False):
|
||||||
if not self.installed:
|
if not self.installed:
|
||||||
|
# prefix may not exist, but DB may be inconsistent. Try to fix by
|
||||||
|
# removing, but omit hooks.
|
||||||
|
specs = spack.installed_db.query(self.spec, installed=True)
|
||||||
|
if specs:
|
||||||
|
spack.installed_db.remove(specs[0])
|
||||||
|
tty.msg("Removed stale DB entry for %s" % self.spec.short_spec)
|
||||||
|
return
|
||||||
|
else:
|
||||||
raise InstallError(str(self.spec) + " is not installed.")
|
raise InstallError(str(self.spec) + " is not installed.")
|
||||||
|
|
||||||
if not force:
|
if not force:
|
||||||
@ -1446,6 +1416,7 @@ def use_cray_compiler_names():
|
|||||||
os.environ['FC'] = 'ftn'
|
os.environ['FC'] = 'ftn'
|
||||||
os.environ['F77'] = 'ftn'
|
os.environ['F77'] = 'ftn'
|
||||||
|
|
||||||
|
|
||||||
def flatten_dependencies(spec, flat_dir):
|
def flatten_dependencies(spec, flat_dir):
|
||||||
"""Make each dependency of spec present in dir via symlink."""
|
"""Make each dependency of spec present in dir via symlink."""
|
||||||
for dep in spec.traverse(root=False):
|
for dep in spec.traverse(root=False):
|
||||||
|
@ -25,8 +25,14 @@
|
|||||||
"""
|
"""
|
||||||
The ``virtual`` module contains utility classes for virtual dependencies.
|
The ``virtual`` module contains utility classes for virtual dependencies.
|
||||||
"""
|
"""
|
||||||
import spack.spec
|
from itertools import product as iproduct
|
||||||
import itertools
|
from pprint import pformat
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from yaml.error import MarkedYAMLError
|
||||||
|
|
||||||
|
import spack
|
||||||
|
|
||||||
|
|
||||||
class ProviderIndex(object):
|
class ProviderIndex(object):
|
||||||
"""This is a dict of dicts used for finding providers of particular
|
"""This is a dict of dicts used for finding providers of particular
|
||||||
@ -44,13 +50,29 @@ class ProviderIndex(object):
|
|||||||
|
|
||||||
Calling providers_for(spec) will find specs that provide a
|
Calling providers_for(spec) will find specs that provide a
|
||||||
matching implementation of MPI.
|
matching implementation of MPI.
|
||||||
"""
|
|
||||||
def __init__(self, specs, **kwargs):
|
|
||||||
# TODO: come up with another name for this. This "restricts" values to
|
|
||||||
# the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and
|
|
||||||
# keeps things as broad as possible, so it's really the wrong name)
|
|
||||||
self.restrict = kwargs.setdefault('restrict', False)
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, specs=None, restrict=False):
|
||||||
|
"""Create a new ProviderIndex.
|
||||||
|
|
||||||
|
Optional arguments:
|
||||||
|
|
||||||
|
specs
|
||||||
|
List (or sequence) of specs. If provided, will call
|
||||||
|
`update` on this ProviderIndex with each spec in the list.
|
||||||
|
|
||||||
|
restrict
|
||||||
|
"restricts" values to the verbatim input specs; do not
|
||||||
|
pre-apply package's constraints.
|
||||||
|
|
||||||
|
TODO: rename this. It is intended to keep things as broad
|
||||||
|
as possible without overly restricting results, so it is
|
||||||
|
not the best name.
|
||||||
|
"""
|
||||||
|
if specs is None:
|
||||||
|
specs = []
|
||||||
|
|
||||||
|
self.restrict = restrict
|
||||||
self.providers = {}
|
self.providers = {}
|
||||||
|
|
||||||
for spec in specs:
|
for spec in specs:
|
||||||
@ -62,9 +84,8 @@ def __init__(self, specs, **kwargs):
|
|||||||
|
|
||||||
self.update(spec)
|
self.update(spec)
|
||||||
|
|
||||||
|
|
||||||
def update(self, spec):
|
def update(self, spec):
|
||||||
if type(spec) != spack.spec.Spec:
|
if not isinstance(spec, spack.spec.Spec):
|
||||||
spec = spack.spec.Spec(spec)
|
spec = spack.spec.Spec(spec)
|
||||||
|
|
||||||
if not spec.name:
|
if not spec.name:
|
||||||
@ -75,12 +96,13 @@ def update(self, spec):
|
|||||||
|
|
||||||
pkg = spec.package
|
pkg = spec.package
|
||||||
for provided_spec, provider_spec in pkg.provided.iteritems():
|
for provided_spec, provider_spec in pkg.provided.iteritems():
|
||||||
provider_spec.compiler_flags = spec.compiler_flags.copy()#We want satisfaction other than flags
|
# We want satisfaction other than flags
|
||||||
|
provider_spec.compiler_flags = spec.compiler_flags.copy()
|
||||||
if provider_spec.satisfies(spec, deps=False):
|
if provider_spec.satisfies(spec, deps=False):
|
||||||
provided_name = provided_spec.name
|
provided_name = provided_spec.name
|
||||||
|
|
||||||
provider_map = self.providers.setdefault(provided_name, {})
|
provider_map = self.providers.setdefault(provided_name, {})
|
||||||
if not provided_spec in provider_map:
|
if provided_spec not in provider_map:
|
||||||
provider_map[provided_spec] = set()
|
provider_map[provided_spec] = set()
|
||||||
|
|
||||||
if self.restrict:
|
if self.restrict:
|
||||||
@ -102,7 +124,6 @@ def update(self, spec):
|
|||||||
constrained.constrain(provider_spec)
|
constrained.constrain(provider_spec)
|
||||||
provider_map[provided_spec].add(constrained)
|
provider_map[provided_spec].add(constrained)
|
||||||
|
|
||||||
|
|
||||||
def providers_for(self, *vpkg_specs):
|
def providers_for(self, *vpkg_specs):
|
||||||
"""Gives specs of all packages that provide virtual packages
|
"""Gives specs of all packages that provide virtual packages
|
||||||
with the supplied specs."""
|
with the supplied specs."""
|
||||||
@ -114,26 +135,25 @@ def providers_for(self, *vpkg_specs):
|
|||||||
|
|
||||||
# Add all the providers that satisfy the vpkg spec.
|
# Add all the providers that satisfy the vpkg spec.
|
||||||
if vspec.name in self.providers:
|
if vspec.name in self.providers:
|
||||||
for provider_spec, spec_set in self.providers[vspec.name].items():
|
for p_spec, spec_set in self.providers[vspec.name].items():
|
||||||
if provider_spec.satisfies(vspec, deps=False):
|
if p_spec.satisfies(vspec, deps=False):
|
||||||
providers.update(spec_set)
|
providers.update(spec_set)
|
||||||
|
|
||||||
# Return providers in order
|
# Return providers in order
|
||||||
return sorted(providers)
|
return sorted(providers)
|
||||||
|
|
||||||
|
|
||||||
# TODO: this is pretty darned nasty, and inefficient, but there
|
# TODO: this is pretty darned nasty, and inefficient, but there
|
||||||
# are not that many vdeps in most specs.
|
# are not that many vdeps in most specs.
|
||||||
def _cross_provider_maps(self, lmap, rmap):
|
def _cross_provider_maps(self, lmap, rmap):
|
||||||
result = {}
|
result = {}
|
||||||
for lspec, rspec in itertools.product(lmap, rmap):
|
for lspec, rspec in iproduct(lmap, rmap):
|
||||||
try:
|
try:
|
||||||
constrained = lspec.constrained(rspec)
|
constrained = lspec.constrained(rspec)
|
||||||
except spack.spec.UnsatisfiableSpecError:
|
except spack.spec.UnsatisfiableSpecError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# lp and rp are left and right provider specs.
|
# lp and rp are left and right provider specs.
|
||||||
for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]):
|
for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]):
|
||||||
if lp_spec.name == rp_spec.name:
|
if lp_spec.name == rp_spec.name:
|
||||||
try:
|
try:
|
||||||
const = lp_spec.constrained(rp_spec, deps=False)
|
const = lp_spec.constrained(rp_spec, deps=False)
|
||||||
@ -142,12 +162,10 @@ def _cross_provider_maps(self, lmap, rmap):
|
|||||||
continue
|
continue
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def __contains__(self, name):
|
def __contains__(self, name):
|
||||||
"""Whether a particular vpkg name is in the index."""
|
"""Whether a particular vpkg name is in the index."""
|
||||||
return name in self.providers
|
return name in self.providers
|
||||||
|
|
||||||
|
|
||||||
def satisfies(self, other):
|
def satisfies(self, other):
|
||||||
"""Check that providers of virtual specs are compatible."""
|
"""Check that providers of virtual specs are compatible."""
|
||||||
common = set(self.providers) & set(other.providers)
|
common = set(self.providers) & set(other.providers)
|
||||||
@ -164,3 +182,111 @@ def satisfies(self, other):
|
|||||||
result[name] = crossed
|
result[name] = crossed
|
||||||
|
|
||||||
return all(c in result for c in common)
|
return all(c in result for c in common)
|
||||||
|
|
||||||
|
def to_yaml(self, stream=None):
|
||||||
|
provider_list = self._transform(
|
||||||
|
lambda vpkg, pset: [
|
||||||
|
vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list)
|
||||||
|
|
||||||
|
yaml.dump({'provider_index': {'providers': provider_list}},
|
||||||
|
stream=stream)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_yaml(stream):
|
||||||
|
try:
|
||||||
|
yfile = yaml.load(stream)
|
||||||
|
except MarkedYAMLError, e:
|
||||||
|
raise spack.spec.SpackYAMLError(
|
||||||
|
"error parsing YAML ProviderIndex cache:", str(e))
|
||||||
|
|
||||||
|
if not isinstance(yfile, dict):
|
||||||
|
raise spack.spec.SpackYAMLError(
|
||||||
|
"YAML ProviderIndex was not a dict.")
|
||||||
|
|
||||||
|
if 'provider_index' not in yfile:
|
||||||
|
raise spack.spec.SpackYAMLError(
|
||||||
|
"YAML ProviderIndex does not start with 'provider_index'")
|
||||||
|
|
||||||
|
index = ProviderIndex()
|
||||||
|
providers = yfile['provider_index']['providers']
|
||||||
|
index.providers = _transform(
|
||||||
|
providers,
|
||||||
|
lambda vpkg, plist: (
|
||||||
|
spack.spec.Spec.from_node_dict(vpkg),
|
||||||
|
set(spack.spec.Spec.from_node_dict(p) for p in plist)))
|
||||||
|
return index
|
||||||
|
|
||||||
|
def merge(self, other):
|
||||||
|
"""Merge `other` ProviderIndex into this one."""
|
||||||
|
other = other.copy() # defensive copy.
|
||||||
|
|
||||||
|
for pkg in other.providers:
|
||||||
|
if pkg not in self.providers:
|
||||||
|
self.providers[pkg] = other.providers[pkg]
|
||||||
|
continue
|
||||||
|
|
||||||
|
spdict, opdict = self.providers[pkg], other.providers[pkg]
|
||||||
|
for provided_spec in opdict:
|
||||||
|
if provided_spec not in spdict:
|
||||||
|
spdict[provided_spec] = opdict[provided_spec]
|
||||||
|
continue
|
||||||
|
|
||||||
|
spdict[provided_spec] += opdict[provided_spec]
|
||||||
|
|
||||||
|
def remove_provider(self, pkg_name):
|
||||||
|
"""Remove a provider from the ProviderIndex."""
|
||||||
|
empty_pkg_dict = []
|
||||||
|
for pkg, pkg_dict in self.providers.items():
|
||||||
|
empty_pset = []
|
||||||
|
for provided, pset in pkg_dict.items():
|
||||||
|
same_name = set(p for p in pset if p.fullname == pkg_name)
|
||||||
|
pset.difference_update(same_name)
|
||||||
|
|
||||||
|
if not pset:
|
||||||
|
empty_pset.append(provided)
|
||||||
|
|
||||||
|
for provided in empty_pset:
|
||||||
|
del pkg_dict[provided]
|
||||||
|
|
||||||
|
if not pkg_dict:
|
||||||
|
empty_pkg_dict.append(pkg)
|
||||||
|
|
||||||
|
for pkg in empty_pkg_dict:
|
||||||
|
del self.providers[pkg]
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
"""Deep copy of this ProviderIndex."""
|
||||||
|
clone = ProviderIndex()
|
||||||
|
clone.providers = self._transform(
|
||||||
|
lambda vpkg, pset: (vpkg, set((p.copy() for p in pset))))
|
||||||
|
return clone
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.providers == other.providers
|
||||||
|
|
||||||
|
def _transform(self, transform_fun, out_mapping_type=dict):
|
||||||
|
return _transform(self.providers, transform_fun, out_mapping_type)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return pformat(
|
||||||
|
_transform(self.providers,
|
||||||
|
lambda k, v: (k, list(v))))
|
||||||
|
|
||||||
|
|
||||||
|
def _transform(providers, transform_fun, out_mapping_type=dict):
|
||||||
|
"""Syntactic sugar for transforming a providers dict.
|
||||||
|
|
||||||
|
transform_fun takes a (vpkg, pset) mapping and runs it on each
|
||||||
|
pair in nested dicts.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def mapiter(mappings):
|
||||||
|
if isinstance(mappings, dict):
|
||||||
|
return mappings.iteritems()
|
||||||
|
else:
|
||||||
|
return iter(mappings)
|
||||||
|
|
||||||
|
return dict(
|
||||||
|
(name, out_mapping_type([
|
||||||
|
transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)]))
|
||||||
|
for name, mappings in providers.items())
|
@ -23,6 +23,9 @@
|
|||||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
##############################################################################
|
##############################################################################
|
||||||
import os
|
import os
|
||||||
|
import stat
|
||||||
|
import shutil
|
||||||
|
import errno
|
||||||
import exceptions
|
import exceptions
|
||||||
import sys
|
import sys
|
||||||
import inspect
|
import inspect
|
||||||
@ -30,15 +33,18 @@
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from bisect import bisect_left
|
from bisect import bisect_left
|
||||||
|
from types import ModuleType
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
from llnl.util.filesystem import *
|
from llnl.util.filesystem import *
|
||||||
|
|
||||||
|
import spack
|
||||||
import spack.error
|
import spack.error
|
||||||
import spack.config
|
import spack.config
|
||||||
import spack.spec
|
import spack.spec
|
||||||
from spack.virtual import ProviderIndex
|
from spack.provider_index import ProviderIndex
|
||||||
from spack.util.naming import *
|
from spack.util.naming import *
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -51,6 +57,7 @@
|
|||||||
# These names describe how repos should be laid out in the filesystem.
|
# These names describe how repos should be laid out in the filesystem.
|
||||||
#
|
#
|
||||||
repo_config_name = 'repo.yaml' # Top-level filename for repo config.
|
repo_config_name = 'repo.yaml' # Top-level filename for repo config.
|
||||||
|
repo_index_name = 'index.yaml' # Top-level filename for repository index.
|
||||||
packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
|
packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
|
||||||
package_file_name = 'package.py' # Filename for packages in a repository.
|
package_file_name = 'package.py' # Filename for packages in a repository.
|
||||||
|
|
||||||
@ -68,12 +75,21 @@ def converter(self, spec_like, *args, **kwargs):
|
|||||||
return converter
|
return converter
|
||||||
|
|
||||||
|
|
||||||
def _make_namespace_module(ns):
|
class SpackNamespace(ModuleType):
|
||||||
module = imp.new_module(ns)
|
""" Allow lazy loading of modules."""
|
||||||
module.__file__ = "(spack namespace)"
|
def __init__(self, namespace):
|
||||||
module.__path__ = []
|
super(ModuleType, self).__init__(self, namespace)
|
||||||
module.__package__ = ns
|
self.__file__ = "(spack namespace)"
|
||||||
return module
|
self.__path__ = []
|
||||||
|
self.__name__ = namespace
|
||||||
|
self.__package__ = namespace
|
||||||
|
self.__modules = {}
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
"""Getattr lazily loads modules if they're not already loaded."""
|
||||||
|
submodule = self.__package__ + '.' + name
|
||||||
|
setattr(self, name, __import__(submodule))
|
||||||
|
return getattr(self, name)
|
||||||
|
|
||||||
|
|
||||||
def substitute_spack_prefix(path):
|
def substitute_spack_prefix(path):
|
||||||
@ -104,7 +120,7 @@ def __init__(self, *repo_dirs, **kwargs):
|
|||||||
self.by_namespace = NamespaceTrie()
|
self.by_namespace = NamespaceTrie()
|
||||||
self.by_path = {}
|
self.by_path = {}
|
||||||
|
|
||||||
self._all_package_names = []
|
self._all_package_names = None
|
||||||
self._provider_index = None
|
self._provider_index = None
|
||||||
|
|
||||||
# If repo_dirs is empty, just use the configuration
|
# If repo_dirs is empty, just use the configuration
|
||||||
@ -125,7 +141,6 @@ def __init__(self, *repo_dirs, **kwargs):
|
|||||||
"To remove the bad repository, run this command:",
|
"To remove the bad repository, run this command:",
|
||||||
" spack repo rm %s" % root)
|
" spack repo rm %s" % root)
|
||||||
|
|
||||||
|
|
||||||
def swap(self, other):
|
def swap(self, other):
|
||||||
"""Convenience function to make swapping repostiories easier.
|
"""Convenience function to make swapping repostiories easier.
|
||||||
|
|
||||||
@ -143,7 +158,6 @@ def swap(self, other):
|
|||||||
setattr(self, attr, getattr(other, attr))
|
setattr(self, attr, getattr(other, attr))
|
||||||
setattr(other, attr, tmp)
|
setattr(other, attr, tmp)
|
||||||
|
|
||||||
|
|
||||||
def _add(self, repo):
|
def _add(self, repo):
|
||||||
"""Add a repository to the namespace and path indexes.
|
"""Add a repository to the namespace and path indexes.
|
||||||
|
|
||||||
@ -157,36 +171,28 @@ def _add(self, repo):
|
|||||||
if repo.namespace in self.by_namespace:
|
if repo.namespace in self.by_namespace:
|
||||||
raise DuplicateRepoError(
|
raise DuplicateRepoError(
|
||||||
"Package repos '%s' and '%s' both provide namespace %s"
|
"Package repos '%s' and '%s' both provide namespace %s"
|
||||||
% (repo.root, self.by_namespace[repo.namespace].root, repo.namespace))
|
% (repo.root, self.by_namespace[repo.namespace].root,
|
||||||
|
repo.namespace))
|
||||||
|
|
||||||
# Add repo to the pkg indexes
|
# Add repo to the pkg indexes
|
||||||
self.by_namespace[repo.full_namespace] = repo
|
self.by_namespace[repo.full_namespace] = repo
|
||||||
self.by_path[repo.root] = repo
|
self.by_path[repo.root] = repo
|
||||||
|
|
||||||
# add names to the cached name list
|
|
||||||
new_pkgs = set(repo.all_package_names())
|
|
||||||
new_pkgs.update(set(self._all_package_names))
|
|
||||||
self._all_package_names = sorted(new_pkgs, key=lambda n:n.lower())
|
|
||||||
|
|
||||||
|
|
||||||
def put_first(self, repo):
|
def put_first(self, repo):
|
||||||
"""Add repo first in the search path."""
|
"""Add repo first in the search path."""
|
||||||
self._add(repo)
|
self._add(repo)
|
||||||
self.repos.insert(0, repo)
|
self.repos.insert(0, repo)
|
||||||
|
|
||||||
|
|
||||||
def put_last(self, repo):
|
def put_last(self, repo):
|
||||||
"""Add repo last in the search path."""
|
"""Add repo last in the search path."""
|
||||||
self._add(repo)
|
self._add(repo)
|
||||||
self.repos.append(repo)
|
self.repos.append(repo)
|
||||||
|
|
||||||
|
|
||||||
def remove(self, repo):
|
def remove(self, repo):
|
||||||
"""Remove a repo from the search path."""
|
"""Remove a repo from the search path."""
|
||||||
if repo in self.repos:
|
if repo in self.repos:
|
||||||
self.repos.remove(repo)
|
self.repos.remove(repo)
|
||||||
|
|
||||||
|
|
||||||
def get_repo(self, namespace, default=NOT_PROVIDED):
|
def get_repo(self, namespace, default=NOT_PROVIDED):
|
||||||
"""Get a repository by namespace.
|
"""Get a repository by namespace.
|
||||||
Arguments
|
Arguments
|
||||||
@ -206,38 +212,45 @@ def get_repo(self, namespace, default=NOT_PROVIDED):
|
|||||||
return default
|
return default
|
||||||
return self.by_namespace[fullspace]
|
return self.by_namespace[fullspace]
|
||||||
|
|
||||||
|
|
||||||
def first_repo(self):
|
def first_repo(self):
|
||||||
"""Get the first repo in precedence order."""
|
"""Get the first repo in precedence order."""
|
||||||
return self.repos[0] if self.repos else None
|
return self.repos[0] if self.repos else None
|
||||||
|
|
||||||
|
|
||||||
def all_package_names(self):
|
def all_package_names(self):
|
||||||
"""Return all unique package names in all repositories."""
|
"""Return all unique package names in all repositories."""
|
||||||
|
if self._all_package_names is None:
|
||||||
|
all_pkgs = set()
|
||||||
|
for repo in self.repos:
|
||||||
|
for name in repo.all_package_names():
|
||||||
|
all_pkgs.add(name)
|
||||||
|
self._all_package_names = sorted(all_pkgs, key=lambda n: n.lower())
|
||||||
return self._all_package_names
|
return self._all_package_names
|
||||||
|
|
||||||
|
|
||||||
def all_packages(self):
|
def all_packages(self):
|
||||||
for name in self.all_package_names():
|
for name in self.all_package_names():
|
||||||
yield self.get(name)
|
yield self.get(name)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def provider_index(self):
|
||||||
|
"""Merged ProviderIndex from all Repos in the RepoPath."""
|
||||||
|
if self._provider_index is None:
|
||||||
|
self._provider_index = ProviderIndex()
|
||||||
|
for repo in reversed(self.repos):
|
||||||
|
self._provider_index.merge(repo.provider_index)
|
||||||
|
|
||||||
|
return self._provider_index
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def providers_for(self, vpkg_spec):
|
def providers_for(self, vpkg_spec):
|
||||||
if self._provider_index is None:
|
providers = self.provider_index.providers_for(vpkg_spec)
|
||||||
self._provider_index = ProviderIndex(self.all_package_names())
|
|
||||||
|
|
||||||
providers = self._provider_index.providers_for(vpkg_spec)
|
|
||||||
if not providers:
|
if not providers:
|
||||||
raise UnknownPackageError(vpkg_spec.name)
|
raise UnknownPackageError(vpkg_spec.name)
|
||||||
return providers
|
return providers
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def extensions_for(self, extendee_spec):
|
def extensions_for(self, extendee_spec):
|
||||||
return [p for p in self.all_packages() if p.extends(extendee_spec)]
|
return [p for p in self.all_packages() if p.extends(extendee_spec)]
|
||||||
|
|
||||||
|
|
||||||
def find_module(self, fullname, path=None):
|
def find_module(self, fullname, path=None):
|
||||||
"""Implements precedence for overlaid namespaces.
|
"""Implements precedence for overlaid namespaces.
|
||||||
|
|
||||||
@ -264,7 +277,6 @@ def find_module(self, fullname, path=None):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def load_module(self, fullname):
|
def load_module(self, fullname):
|
||||||
"""Handles loading container namespaces when necessary.
|
"""Handles loading container namespaces when necessary.
|
||||||
|
|
||||||
@ -273,18 +285,14 @@ def load_module(self, fullname):
|
|||||||
if fullname in sys.modules:
|
if fullname in sys.modules:
|
||||||
return sys.modules[fullname]
|
return sys.modules[fullname]
|
||||||
|
|
||||||
# partition fullname into prefix and module name.
|
|
||||||
namespace, dot, module_name = fullname.rpartition('.')
|
|
||||||
|
|
||||||
if not self.by_namespace.is_prefix(fullname):
|
if not self.by_namespace.is_prefix(fullname):
|
||||||
raise ImportError("No such Spack repo: %s" % fullname)
|
raise ImportError("No such Spack repo: %s" % fullname)
|
||||||
|
|
||||||
module = _make_namespace_module(namespace)
|
module = SpackNamespace(fullname)
|
||||||
module.__loader__ = self
|
module.__loader__ = self
|
||||||
sys.modules[fullname] = module
|
sys.modules[fullname] = module
|
||||||
return module
|
return module
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def repo_for_pkg(self, spec):
|
def repo_for_pkg(self, spec):
|
||||||
"""Given a spec, get the repository for its package."""
|
"""Given a spec, get the repository for its package."""
|
||||||
@ -306,7 +314,6 @@ def repo_for_pkg(self, spec):
|
|||||||
# that can operate on packages that don't exist yet.
|
# that can operate on packages that don't exist yet.
|
||||||
return self.first_repo()
|
return self.first_repo()
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def get(self, spec, new=False):
|
def get(self, spec, new=False):
|
||||||
"""Find a repo that contains the supplied spec's package.
|
"""Find a repo that contains the supplied spec's package.
|
||||||
@ -315,12 +322,10 @@ def get(self, spec, new=False):
|
|||||||
"""
|
"""
|
||||||
return self.repo_for_pkg(spec).get(spec)
|
return self.repo_for_pkg(spec).get(spec)
|
||||||
|
|
||||||
|
|
||||||
def get_pkg_class(self, pkg_name):
|
def get_pkg_class(self, pkg_name):
|
||||||
"""Find a class for the spec's package and return the class object."""
|
"""Find a class for the spec's package and return the class object."""
|
||||||
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
|
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def dump_provenance(self, spec, path):
|
def dump_provenance(self, spec, path):
|
||||||
"""Dump provenance information for a spec to a particular path.
|
"""Dump provenance information for a spec to a particular path.
|
||||||
@ -330,24 +335,19 @@ def dump_provenance(self, spec, path):
|
|||||||
"""
|
"""
|
||||||
return self.repo_for_pkg(spec).dump_provenance(spec, path)
|
return self.repo_for_pkg(spec).dump_provenance(spec, path)
|
||||||
|
|
||||||
|
|
||||||
def dirname_for_package_name(self, pkg_name):
|
def dirname_for_package_name(self, pkg_name):
|
||||||
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
|
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
|
||||||
|
|
||||||
|
|
||||||
def filename_for_package_name(self, pkg_name):
|
def filename_for_package_name(self, pkg_name):
|
||||||
return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
|
return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
|
||||||
|
|
||||||
|
|
||||||
def exists(self, pkg_name):
|
def exists(self, pkg_name):
|
||||||
return any(repo.exists(pkg_name) for repo in self.repos)
|
return any(repo.exists(pkg_name) for repo in self.repos)
|
||||||
|
|
||||||
|
|
||||||
def __contains__(self, pkg_name):
|
def __contains__(self, pkg_name):
|
||||||
return self.exists(pkg_name)
|
return self.exists(pkg_name)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Repo(object):
|
class Repo(object):
|
||||||
"""Class representing a package repository in the filesystem.
|
"""Class representing a package repository in the filesystem.
|
||||||
|
|
||||||
@ -381,12 +381,14 @@ def __init__(self, root, namespace=repo_namespace):
|
|||||||
|
|
||||||
# check and raise BadRepoError on fail.
|
# check and raise BadRepoError on fail.
|
||||||
def check(condition, msg):
|
def check(condition, msg):
|
||||||
if not condition: raise BadRepoError(msg)
|
if not condition:
|
||||||
|
raise BadRepoError(msg)
|
||||||
|
|
||||||
# Validate repository layout.
|
# Validate repository layout.
|
||||||
self.config_file = join_path(self.root, repo_config_name)
|
self.config_file = join_path(self.root, repo_config_name)
|
||||||
check(os.path.isfile(self.config_file),
|
check(os.path.isfile(self.config_file),
|
||||||
"No %s found in '%s'" % (repo_config_name, root))
|
"No %s found in '%s'" % (repo_config_name, root))
|
||||||
|
|
||||||
self.packages_path = join_path(self.root, packages_dir_name)
|
self.packages_path = join_path(self.root, packages_dir_name)
|
||||||
check(os.path.isdir(self.packages_path),
|
check(os.path.isdir(self.packages_path),
|
||||||
"No directory '%s' found in '%s'" % (repo_config_name, root))
|
"No directory '%s' found in '%s'" % (repo_config_name, root))
|
||||||
@ -398,12 +400,14 @@ def check(condition, msg):
|
|||||||
|
|
||||||
self.namespace = config['namespace']
|
self.namespace = config['namespace']
|
||||||
check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
|
check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
|
||||||
("Invalid namespace '%s' in repo '%s'. " % (self.namespace, self.root)) +
|
("Invalid namespace '%s' in repo '%s'. "
|
||||||
|
% (self.namespace, self.root)) +
|
||||||
"Namespaces must be valid python identifiers separated by '.'")
|
"Namespaces must be valid python identifiers separated by '.'")
|
||||||
|
|
||||||
# Set up 'full_namespace' to include the super-namespace
|
# Set up 'full_namespace' to include the super-namespace
|
||||||
if self.super_namespace:
|
if self.super_namespace:
|
||||||
self.full_namespace = "%s.%s" % (self.super_namespace, self.namespace)
|
self.full_namespace = "%s.%s" % (
|
||||||
|
self.super_namespace, self.namespace)
|
||||||
else:
|
else:
|
||||||
self.full_namespace = self.namespace
|
self.full_namespace = self.namespace
|
||||||
|
|
||||||
@ -414,12 +418,21 @@ def check(condition, msg):
|
|||||||
self._modules = {}
|
self._modules = {}
|
||||||
self._classes = {}
|
self._classes = {}
|
||||||
self._instances = {}
|
self._instances = {}
|
||||||
|
|
||||||
|
# list of packages that are newer than the index.
|
||||||
|
self._needs_update = []
|
||||||
|
|
||||||
|
# Index of virtual dependencies
|
||||||
self._provider_index = None
|
self._provider_index = None
|
||||||
|
|
||||||
|
# Cached list of package names.
|
||||||
self._all_package_names = None
|
self._all_package_names = None
|
||||||
|
|
||||||
# make sure the namespace for packages in this repo exists.
|
# make sure the namespace for packages in this repo exists.
|
||||||
self._create_namespace()
|
self._create_namespace()
|
||||||
|
|
||||||
|
# Unique filename for cache of virtual dependency providers
|
||||||
|
self._cache_file = 'providers/%s-index.yaml' % self.namespace
|
||||||
|
|
||||||
def _create_namespace(self):
|
def _create_namespace(self):
|
||||||
"""Create this repo's namespace module and insert it into sys.modules.
|
"""Create this repo's namespace module and insert it into sys.modules.
|
||||||
@ -431,8 +444,9 @@ def _create_namespace(self):
|
|||||||
parent = None
|
parent = None
|
||||||
for l in range(1, len(self._names) + 1):
|
for l in range(1, len(self._names) + 1):
|
||||||
ns = '.'.join(self._names[:l])
|
ns = '.'.join(self._names[:l])
|
||||||
if not ns in sys.modules:
|
|
||||||
module = _make_namespace_module(ns)
|
if ns not in sys.modules:
|
||||||
|
module = SpackNamespace(ns)
|
||||||
module.__loader__ = self
|
module.__loader__ = self
|
||||||
sys.modules[ns] = module
|
sys.modules[ns] = module
|
||||||
|
|
||||||
@ -443,13 +457,13 @@ def _create_namespace(self):
|
|||||||
# import spack.pkg.builtin.mpich as mpich
|
# import spack.pkg.builtin.mpich as mpich
|
||||||
if parent:
|
if parent:
|
||||||
modname = self._names[l - 1]
|
modname = self._names[l - 1]
|
||||||
if not hasattr(parent, modname):
|
|
||||||
setattr(parent, modname, module)
|
setattr(parent, modname, module)
|
||||||
else:
|
else:
|
||||||
# no need to set up a module, but keep track of the parent.
|
# no need to set up a module
|
||||||
module = sys.modules[ns]
|
module = sys.modules[ns]
|
||||||
parent = module
|
|
||||||
|
|
||||||
|
# but keep track of the parent in this loop
|
||||||
|
parent = module
|
||||||
|
|
||||||
def real_name(self, import_name):
|
def real_name(self, import_name):
|
||||||
"""Allow users to import Spack packages using Python identifiers.
|
"""Allow users to import Spack packages using Python identifiers.
|
||||||
@ -476,13 +490,11 @@ def real_name(self, import_name):
|
|||||||
return name
|
return name
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def is_prefix(self, fullname):
|
def is_prefix(self, fullname):
|
||||||
"""True if fullname is a prefix of this Repo's namespace."""
|
"""True if fullname is a prefix of this Repo's namespace."""
|
||||||
parts = fullname.split('.')
|
parts = fullname.split('.')
|
||||||
return self._names[:len(parts)] == parts
|
return self._names[:len(parts)] == parts
|
||||||
|
|
||||||
|
|
||||||
def find_module(self, fullname, path=None):
|
def find_module(self, fullname, path=None):
|
||||||
"""Python find_module import hook.
|
"""Python find_module import hook.
|
||||||
|
|
||||||
@ -498,7 +510,6 @@ def find_module(self, fullname, path=None):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def load_module(self, fullname):
|
def load_module(self, fullname):
|
||||||
"""Python importer load hook.
|
"""Python importer load hook.
|
||||||
|
|
||||||
@ -510,7 +521,7 @@ def load_module(self, fullname):
|
|||||||
namespace, dot, module_name = fullname.rpartition('.')
|
namespace, dot, module_name = fullname.rpartition('.')
|
||||||
|
|
||||||
if self.is_prefix(fullname):
|
if self.is_prefix(fullname):
|
||||||
module = _make_namespace_module(fullname)
|
module = SpackNamespace(fullname)
|
||||||
|
|
||||||
elif namespace == self.full_namespace:
|
elif namespace == self.full_namespace:
|
||||||
real_name = self.real_name(module_name)
|
real_name = self.real_name(module_name)
|
||||||
@ -523,8 +534,12 @@ def load_module(self, fullname):
|
|||||||
|
|
||||||
module.__loader__ = self
|
module.__loader__ = self
|
||||||
sys.modules[fullname] = module
|
sys.modules[fullname] = module
|
||||||
return module
|
if namespace != fullname:
|
||||||
|
parent = sys.modules[namespace]
|
||||||
|
if not hasattr(parent, module_name):
|
||||||
|
setattr(parent, module_name, module)
|
||||||
|
|
||||||
|
return module
|
||||||
|
|
||||||
def _read_config(self):
|
def _read_config(self):
|
||||||
"""Check for a YAML config file in this db's root directory."""
|
"""Check for a YAML config file in this db's root directory."""
|
||||||
@ -534,23 +549,23 @@ def _read_config(self):
|
|||||||
|
|
||||||
if (not yaml_data or 'repo' not in yaml_data or
|
if (not yaml_data or 'repo' not in yaml_data or
|
||||||
not isinstance(yaml_data['repo'], dict)):
|
not isinstance(yaml_data['repo'], dict)):
|
||||||
tty.die("Invalid %s in repository %s"
|
tty.die("Invalid %s in repository %s" % (
|
||||||
% (repo_config_name, self.root))
|
repo_config_name, self.root))
|
||||||
|
|
||||||
return yaml_data['repo']
|
return yaml_data['repo']
|
||||||
|
|
||||||
except exceptions.IOError, e:
|
except exceptions.IOError:
|
||||||
tty.die("Error reading %s when opening %s"
|
tty.die("Error reading %s when opening %s"
|
||||||
% (self.config_file, self.root))
|
% (self.config_file, self.root))
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def get(self, spec, new=False):
|
def get(self, spec, new=False):
|
||||||
if spec.virtual:
|
if spec.virtual:
|
||||||
raise UnknownPackageError(spec.name)
|
raise UnknownPackageError(spec.name)
|
||||||
|
|
||||||
if spec.namespace and spec.namespace != self.namespace:
|
if spec.namespace and spec.namespace != self.namespace:
|
||||||
raise UnknownPackageError("Repository %s does not contain package %s"
|
raise UnknownPackageError(
|
||||||
|
"Repository %s does not contain package %s"
|
||||||
% (self.namespace, spec.fullname))
|
% (self.namespace, spec.fullname))
|
||||||
|
|
||||||
key = hash(spec)
|
key = hash(spec)
|
||||||
@ -559,14 +574,13 @@ def get(self, spec, new=False):
|
|||||||
try:
|
try:
|
||||||
copy = spec.copy() # defensive copy. Package owns its spec.
|
copy = spec.copy() # defensive copy. Package owns its spec.
|
||||||
self._instances[key] = package_class(copy)
|
self._instances[key] = package_class(copy)
|
||||||
except Exception, e:
|
except Exception:
|
||||||
if spack.debug:
|
if spack.debug:
|
||||||
sys.excepthook(*sys.exc_info())
|
sys.excepthook(*sys.exc_info())
|
||||||
raise FailedConstructorError(spec.fullname, *sys.exc_info())
|
raise FailedConstructorError(spec.fullname, *sys.exc_info())
|
||||||
|
|
||||||
return self._instances[key]
|
return self._instances[key]
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def dump_provenance(self, spec, path):
|
def dump_provenance(self, spec, path):
|
||||||
"""Dump provenance information for a spec to a particular path.
|
"""Dump provenance information for a spec to a particular path.
|
||||||
@ -579,7 +593,8 @@ def dump_provenance(self, spec, path):
|
|||||||
raise UnknownPackageError(spec.name)
|
raise UnknownPackageError(spec.name)
|
||||||
|
|
||||||
if spec.namespace and spec.namespace != self.namespace:
|
if spec.namespace and spec.namespace != self.namespace:
|
||||||
raise UnknownPackageError("Repository %s does not contain package %s."
|
raise UnknownPackageError(
|
||||||
|
"Repository %s does not contain package %s."
|
||||||
% (self.namespace, spec.fullname))
|
% (self.namespace, spec.fullname))
|
||||||
|
|
||||||
# Install any patch files needed by packages.
|
# Install any patch files needed by packages.
|
||||||
@ -595,34 +610,61 @@ def dump_provenance(self, spec, path):
|
|||||||
# Install the package.py file itself.
|
# Install the package.py file itself.
|
||||||
install(self.filename_for_package_name(spec), path)
|
install(self.filename_for_package_name(spec), path)
|
||||||
|
|
||||||
|
|
||||||
def purge(self):
|
def purge(self):
|
||||||
"""Clear entire package instance cache."""
|
"""Clear entire package instance cache."""
|
||||||
self._instances.clear()
|
self._instances.clear()
|
||||||
|
|
||||||
|
def _update_provider_index(self):
|
||||||
|
# Check modification dates of all packages
|
||||||
|
self._fast_package_check()
|
||||||
|
|
||||||
|
def read():
|
||||||
|
with open(self.index_file) as f:
|
||||||
|
self._provider_index = ProviderIndex.from_yaml(f)
|
||||||
|
|
||||||
|
# Read the old ProviderIndex, or make a new one.
|
||||||
|
key = self._cache_file
|
||||||
|
index_existed = spack.user_cache.init_entry(key)
|
||||||
|
if index_existed and not self._needs_update:
|
||||||
|
with spack.user_cache.read_transaction(key) as f:
|
||||||
|
self._provider_index = ProviderIndex.from_yaml(f)
|
||||||
|
else:
|
||||||
|
with spack.user_cache.write_transaction(key) as (old, new):
|
||||||
|
if old:
|
||||||
|
self._provider_index = ProviderIndex.from_yaml(old)
|
||||||
|
else:
|
||||||
|
self._provider_index = ProviderIndex()
|
||||||
|
|
||||||
|
for pkg_name in self._needs_update:
|
||||||
|
namespaced_name = '%s.%s' % (self.namespace, pkg_name)
|
||||||
|
self._provider_index.remove_provider(namespaced_name)
|
||||||
|
self._provider_index.update(namespaced_name)
|
||||||
|
|
||||||
|
self._provider_index.to_yaml(new)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def provider_index(self):
|
||||||
|
"""A provider index with names *specific* to this repo."""
|
||||||
|
if self._provider_index is None:
|
||||||
|
self._update_provider_index()
|
||||||
|
return self._provider_index
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def providers_for(self, vpkg_spec):
|
def providers_for(self, vpkg_spec):
|
||||||
if self._provider_index is None:
|
providers = self.provider_index.providers_for(vpkg_spec)
|
||||||
self._provider_index = ProviderIndex(self.all_package_names())
|
|
||||||
|
|
||||||
providers = self._provider_index.providers_for(vpkg_spec)
|
|
||||||
if not providers:
|
if not providers:
|
||||||
raise UnknownPackageError(vpkg_spec.name)
|
raise UnknownPackageError(vpkg_spec.name)
|
||||||
return providers
|
return providers
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def extensions_for(self, extendee_spec):
|
def extensions_for(self, extendee_spec):
|
||||||
return [p for p in self.all_packages() if p.extends(extendee_spec)]
|
return [p for p in self.all_packages() if p.extends(extendee_spec)]
|
||||||
|
|
||||||
|
|
||||||
def _check_namespace(self, spec):
|
def _check_namespace(self, spec):
|
||||||
"""Check that the spec's namespace is the same as this repository's."""
|
"""Check that the spec's namespace is the same as this repository's."""
|
||||||
if spec.namespace and spec.namespace != self.namespace:
|
if spec.namespace and spec.namespace != self.namespace:
|
||||||
raise UnknownNamespaceError(spec.namespace)
|
raise UnknownNamespaceError(spec.namespace)
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def dirname_for_package_name(self, spec):
|
def dirname_for_package_name(self, spec):
|
||||||
"""Get the directory name for a particular package. This is the
|
"""Get the directory name for a particular package. This is the
|
||||||
@ -630,7 +672,6 @@ def dirname_for_package_name(self, spec):
|
|||||||
self._check_namespace(spec)
|
self._check_namespace(spec)
|
||||||
return join_path(self.packages_path, spec.name)
|
return join_path(self.packages_path, spec.name)
|
||||||
|
|
||||||
|
|
||||||
@_autospec
|
@_autospec
|
||||||
def filename_for_package_name(self, spec):
|
def filename_for_package_name(self, spec):
|
||||||
"""Get the filename for the module we should load for a particular
|
"""Get the filename for the module we should load for a particular
|
||||||
@ -645,48 +686,95 @@ def filename_for_package_name(self, spec):
|
|||||||
pkg_dir = self.dirname_for_package_name(spec.name)
|
pkg_dir = self.dirname_for_package_name(spec.name)
|
||||||
return join_path(pkg_dir, package_file_name)
|
return join_path(pkg_dir, package_file_name)
|
||||||
|
|
||||||
|
def _fast_package_check(self):
|
||||||
|
"""List packages in the repo and check whether index is up to date.
|
||||||
|
|
||||||
def all_package_names(self):
|
Both of these opreations require checking all `package.py`
|
||||||
"""Returns a sorted list of all package names in the Repo."""
|
files so we do them at the same time. We list the repo
|
||||||
|
directory and look at package.py files, and we compare the
|
||||||
|
index modification date with the ost recently modified package
|
||||||
|
file, storing the result.
|
||||||
|
|
||||||
|
The implementation here should try to minimize filesystem
|
||||||
|
calls. At the moment, it is O(number of packages) and makes
|
||||||
|
about one stat call per package. This is resonably fast, and
|
||||||
|
avoids actually importing packages in Spack, which is slow.
|
||||||
|
|
||||||
|
"""
|
||||||
if self._all_package_names is None:
|
if self._all_package_names is None:
|
||||||
self._all_package_names = []
|
self._all_package_names = []
|
||||||
|
|
||||||
|
# Get index modification time.
|
||||||
|
index_mtime = spack.user_cache.mtime(self._cache_file)
|
||||||
|
|
||||||
for pkg_name in os.listdir(self.packages_path):
|
for pkg_name in os.listdir(self.packages_path):
|
||||||
# Skip non-directories in the package root.
|
# Skip non-directories in the package root.
|
||||||
pkg_dir = join_path(self.packages_path, pkg_name)
|
pkg_dir = join_path(self.packages_path, pkg_name)
|
||||||
if not os.path.isdir(pkg_dir):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Skip directories without a package.py in them.
|
|
||||||
pkg_file = join_path(self.packages_path, pkg_name, package_file_name)
|
|
||||||
if not os.path.isfile(pkg_file):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Warn about invalid names that look like packages.
|
# Warn about invalid names that look like packages.
|
||||||
if not valid_module_name(pkg_name):
|
if not valid_module_name(pkg_name):
|
||||||
tty.warn("Skipping package at %s. '%s' is not a valid Spack module name."
|
msg = ("Skipping package at %s. "
|
||||||
% (pkg_dir, pkg_name))
|
"'%s' is not a valid Spack module name.")
|
||||||
|
tty.warn(msg % (pkg_dir, pkg_name))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# construct the file name from the directory
|
||||||
|
pkg_file = join_path(
|
||||||
|
self.packages_path, pkg_name, package_file_name)
|
||||||
|
|
||||||
|
# Use stat here to avoid lots of calls to the filesystem.
|
||||||
|
try:
|
||||||
|
sinfo = os.stat(pkg_file)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
# No package.py file here.
|
||||||
|
continue
|
||||||
|
elif e.errno == errno.EACCES:
|
||||||
|
tty.warn("Can't read package file %s." % pkg_file)
|
||||||
|
continue
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# if it's not a file, skip it.
|
||||||
|
if stat.S_ISDIR(sinfo.st_mode):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# All checks passed. Add it to the list.
|
# All checks passed. Add it to the list.
|
||||||
self._all_package_names.append(pkg_name)
|
self._all_package_names.append(pkg_name)
|
||||||
|
|
||||||
|
# record the package if it is newer than the index.
|
||||||
|
if sinfo.st_mtime > index_mtime:
|
||||||
|
self._needs_update.append(pkg_name)
|
||||||
|
|
||||||
self._all_package_names.sort()
|
self._all_package_names.sort()
|
||||||
|
|
||||||
return self._all_package_names
|
return self._all_package_names
|
||||||
|
|
||||||
|
def all_package_names(self):
|
||||||
|
"""Returns a sorted list of all package names in the Repo."""
|
||||||
|
self._fast_package_check()
|
||||||
|
return self._all_package_names
|
||||||
|
|
||||||
def all_packages(self):
|
def all_packages(self):
|
||||||
|
"""Iterator over all packages in the repository.
|
||||||
|
|
||||||
|
Use this with care, because loading packages is slow.
|
||||||
|
|
||||||
|
"""
|
||||||
for name in self.all_package_names():
|
for name in self.all_package_names():
|
||||||
yield self.get(name)
|
yield self.get(name)
|
||||||
|
|
||||||
|
|
||||||
def exists(self, pkg_name):
|
def exists(self, pkg_name):
|
||||||
"""Whether a package with the supplied name exists."""
|
"""Whether a package with the supplied name exists."""
|
||||||
|
if self._all_package_names:
|
||||||
# This does a binary search in the sorted list.
|
# This does a binary search in the sorted list.
|
||||||
idx = bisect_left(self.all_package_names(), pkg_name)
|
idx = bisect_left(self.all_package_names(), pkg_name)
|
||||||
return (idx < len(self._all_package_names) and
|
return (idx < len(self._all_package_names) and
|
||||||
self._all_package_names[idx] == pkg_name)
|
self._all_package_names[idx] == pkg_name)
|
||||||
|
|
||||||
|
# If we haven't generated the full package list, don't.
|
||||||
|
# Just check whether the file exists.
|
||||||
|
filename = self.filename_for_package_name(pkg_name)
|
||||||
|
return os.path.exists(filename)
|
||||||
|
|
||||||
def _get_pkg_module(self, pkg_name):
|
def _get_pkg_module(self, pkg_name):
|
||||||
"""Create a module for a particular package.
|
"""Create a module for a particular package.
|
||||||
@ -719,7 +807,6 @@ def _get_pkg_module(self, pkg_name):
|
|||||||
|
|
||||||
return self._modules[pkg_name]
|
return self._modules[pkg_name]
|
||||||
|
|
||||||
|
|
||||||
def get_pkg_class(self, pkg_name):
|
def get_pkg_class(self, pkg_name):
|
||||||
"""Get the class for the package out of its module.
|
"""Get the class for the package out of its module.
|
||||||
|
|
||||||
@ -727,6 +814,11 @@ def get_pkg_class(self, pkg_name):
|
|||||||
package. Then extracts the package class from the module
|
package. Then extracts the package class from the module
|
||||||
according to Spack's naming convention.
|
according to Spack's naming convention.
|
||||||
"""
|
"""
|
||||||
|
namespace, _, pkg_name = pkg_name.rpartition('.')
|
||||||
|
if namespace and (namespace != self.namespace):
|
||||||
|
raise InvalidNamespaceError('Invalid namespace for %s repo: %s'
|
||||||
|
% (self.namespace, namespace))
|
||||||
|
|
||||||
class_name = mod_to_class(pkg_name)
|
class_name = mod_to_class(pkg_name)
|
||||||
module = self._get_pkg_module(pkg_name)
|
module = self._get_pkg_module(pkg_name)
|
||||||
|
|
||||||
@ -736,15 +828,12 @@ def get_pkg_class(self, pkg_name):
|
|||||||
|
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "[Repo '%s' at '%s']" % (self.namespace, self.root)
|
return "[Repo '%s' at '%s']" % (self.namespace, self.root)
|
||||||
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self.__str__()
|
return self.__str__()
|
||||||
|
|
||||||
|
|
||||||
def __contains__(self, pkg_name):
|
def __contains__(self, pkg_name):
|
||||||
return self.exists(pkg_name)
|
return self.exists(pkg_name)
|
||||||
|
|
||||||
@ -753,30 +842,37 @@ def create_repo(root, namespace=None):
|
|||||||
"""Create a new repository in root with the specified namespace.
|
"""Create a new repository in root with the specified namespace.
|
||||||
|
|
||||||
If the namespace is not provided, use basename of root.
|
If the namespace is not provided, use basename of root.
|
||||||
Return the canonicalized path and the namespace of the created repository.
|
Return the canonicalized path and namespace of the created repository.
|
||||||
"""
|
"""
|
||||||
root = canonicalize_path(root)
|
root = canonicalize_path(root)
|
||||||
if not namespace:
|
if not namespace:
|
||||||
namespace = os.path.basename(root)
|
namespace = os.path.basename(root)
|
||||||
|
|
||||||
if not re.match(r'\w[\.\w-]*', namespace):
|
if not re.match(r'\w[\.\w-]*', namespace):
|
||||||
raise InvalidNamespaceError("'%s' is not a valid namespace." % namespace)
|
raise InvalidNamespaceError(
|
||||||
|
"'%s' is not a valid namespace." % namespace)
|
||||||
|
|
||||||
existed = False
|
existed = False
|
||||||
if os.path.exists(root):
|
if os.path.exists(root):
|
||||||
if os.path.isfile(root):
|
if os.path.isfile(root):
|
||||||
raise BadRepoError('File %s already exists and is not a directory' % root)
|
raise BadRepoError('File %s already exists and is not a directory'
|
||||||
|
% root)
|
||||||
elif os.path.isdir(root):
|
elif os.path.isdir(root):
|
||||||
if not os.access(root, os.R_OK | os.W_OK):
|
if not os.access(root, os.R_OK | os.W_OK):
|
||||||
raise BadRepoError('Cannot create new repo in %s: cannot access directory.' % root)
|
raise BadRepoError(
|
||||||
|
'Cannot create new repo in %s: cannot access directory.'
|
||||||
|
% root)
|
||||||
if os.listdir(root):
|
if os.listdir(root):
|
||||||
raise BadRepoError('Cannot create new repo in %s: directory is not empty.' % root)
|
raise BadRepoError(
|
||||||
|
'Cannot create new repo in %s: directory is not empty.'
|
||||||
|
% root)
|
||||||
existed = True
|
existed = True
|
||||||
|
|
||||||
full_path = os.path.realpath(root)
|
full_path = os.path.realpath(root)
|
||||||
parent = os.path.dirname(full_path)
|
parent = os.path.dirname(full_path)
|
||||||
if not os.access(parent, os.R_OK | os.W_OK):
|
if not os.access(parent, os.R_OK | os.W_OK):
|
||||||
raise BadRepoError("Cannot create repository in %s: can't access parent!" % root)
|
raise BadRepoError(
|
||||||
|
"Cannot create repository in %s: can't access parent!" % root)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config_path = os.path.join(root, repo_config_name)
|
config_path = os.path.join(root, repo_config_name)
|
||||||
|
@ -102,23 +102,26 @@
|
|||||||
from StringIO import StringIO
|
from StringIO import StringIO
|
||||||
from operator import attrgetter
|
from operator import attrgetter
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from yaml.error import MarkedYAMLError
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
|
from llnl.util.filesystem import join_path
|
||||||
|
from llnl.util.lang import *
|
||||||
|
from llnl.util.tty.color import *
|
||||||
|
|
||||||
import spack
|
import spack
|
||||||
import spack.architecture
|
import spack.architecture
|
||||||
import spack.compilers as compilers
|
import spack.compilers as compilers
|
||||||
import spack.error
|
import spack.error
|
||||||
import spack.parse
|
import spack.parse
|
||||||
import yaml
|
|
||||||
from llnl.util.filesystem import join_path
|
|
||||||
from llnl.util.lang import *
|
|
||||||
from llnl.util.tty.color import *
|
|
||||||
from spack.build_environment import get_path_from_module, load_module
|
from spack.build_environment import get_path_from_module, load_module
|
||||||
from spack.util.naming import mod_to_class
|
from spack.util.naming import mod_to_class
|
||||||
from spack.util.prefix import Prefix
|
from spack.util.prefix import Prefix
|
||||||
from spack.util.string import *
|
from spack.util.string import *
|
||||||
from spack.version import *
|
from spack.version import *
|
||||||
from spack.virtual import ProviderIndex
|
from spack.provider_index import ProviderIndex
|
||||||
from yaml.error import MarkedYAMLError
|
|
||||||
|
|
||||||
# Valid pattern for an identifier in Spack
|
# Valid pattern for an identifier in Spack
|
||||||
identifier_re = r'\w[\w-]*'
|
identifier_re = r'\w[\w-]*'
|
||||||
@ -438,8 +441,7 @@ def copy(self):
|
|||||||
return clone
|
return clone
|
||||||
|
|
||||||
def _cmp_key(self):
|
def _cmp_key(self):
|
||||||
return ''.join(str(key) + ' '.join(str(v) for v in value)
|
return tuple((k, tuple(v)) for k, v in sorted(self.iteritems()))
|
||||||
for key, value in sorted(self.items()))
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
sorted_keys = filter(
|
sorted_keys = filter(
|
||||||
@ -715,7 +717,7 @@ def package_class(self):
|
|||||||
"""Internal package call gets only the class object for a package.
|
"""Internal package call gets only the class object for a package.
|
||||||
Use this to just get package metadata.
|
Use this to just get package metadata.
|
||||||
"""
|
"""
|
||||||
return spack.repo.get_pkg_class(self.name)
|
return spack.repo.get_pkg_class(self.fullname)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def virtual(self):
|
def virtual(self):
|
||||||
@ -904,36 +906,35 @@ def dag_hash(self, length=None):
|
|||||||
return b32_hash
|
return b32_hash
|
||||||
|
|
||||||
def to_node_dict(self):
|
def to_node_dict(self):
|
||||||
|
d = {}
|
||||||
|
|
||||||
params = dict((name, v.value) for name, v in self.variants.items())
|
params = dict((name, v.value) for name, v in self.variants.items())
|
||||||
params.update(dict((name, value)
|
params.update(dict((name, value)
|
||||||
for name, value in self.compiler_flags.items()))
|
for name, value in self.compiler_flags.items()))
|
||||||
|
|
||||||
|
if params:
|
||||||
|
d['parameters'] = params
|
||||||
|
|
||||||
|
if self.dependencies():
|
||||||
deps = self.dependencies_dict(deptype=('link', 'run'))
|
deps = self.dependencies_dict(deptype=('link', 'run'))
|
||||||
d = {
|
d['dependencies'] = dict(
|
||||||
'parameters': params,
|
|
||||||
'arch': self.architecture,
|
|
||||||
'dependencies': dict(
|
|
||||||
(name, {
|
(name, {
|
||||||
'hash': dspec.spec.dag_hash(),
|
'hash': dspec.spec.dag_hash(),
|
||||||
'type': [str(s) for s in dspec.deptypes]})
|
'type': [str(s) for s in dspec.deptypes]})
|
||||||
for name, dspec in deps.items())
|
for name, dspec in deps.items())
|
||||||
}
|
|
||||||
|
|
||||||
# Older concrete specs do not have a namespace. Omit for
|
if self.namespace:
|
||||||
# consistent hashing.
|
|
||||||
if not self.concrete or self.namespace:
|
|
||||||
d['namespace'] = self.namespace
|
d['namespace'] = self.namespace
|
||||||
|
|
||||||
if self.architecture:
|
if self.architecture:
|
||||||
# TODO: Fix the target.to_dict to account for the tuple
|
# TODO: Fix the target.to_dict to account for the tuple
|
||||||
# Want it to be a dict of dicts
|
# Want it to be a dict of dicts
|
||||||
d['arch'] = self.architecture.to_dict()
|
d['arch'] = self.architecture.to_dict()
|
||||||
else:
|
|
||||||
d['arch'] = None
|
|
||||||
|
|
||||||
if self.compiler:
|
if self.compiler:
|
||||||
d.update(self.compiler.to_dict())
|
d.update(self.compiler.to_dict())
|
||||||
else:
|
|
||||||
d['compiler'] = None
|
if self.versions:
|
||||||
d.update(self.versions.to_dict())
|
d.update(self.versions.to_dict())
|
||||||
|
|
||||||
return {self.name: d}
|
return {self.name: d}
|
||||||
@ -954,17 +955,18 @@ def from_node_dict(node):
|
|||||||
|
|
||||||
spec = Spec(name)
|
spec = Spec(name)
|
||||||
spec.namespace = node.get('namespace', None)
|
spec.namespace = node.get('namespace', None)
|
||||||
|
spec._hash = node.get('hash', None)
|
||||||
|
|
||||||
|
if 'version' in node or 'versions' in node:
|
||||||
spec.versions = VersionList.from_dict(node)
|
spec.versions = VersionList.from_dict(node)
|
||||||
|
|
||||||
if 'hash' in node:
|
if 'arch' in node:
|
||||||
spec._hash = node['hash']
|
|
||||||
|
|
||||||
spec.architecture = spack.architecture.arch_from_dict(node['arch'])
|
spec.architecture = spack.architecture.arch_from_dict(node['arch'])
|
||||||
|
|
||||||
if node['compiler'] is None:
|
if 'compiler' in node:
|
||||||
spec.compiler = None
|
|
||||||
else:
|
|
||||||
spec.compiler = CompilerSpec.from_dict(node)
|
spec.compiler = CompilerSpec.from_dict(node)
|
||||||
|
else:
|
||||||
|
spec.compiler = None
|
||||||
|
|
||||||
if 'parameters' in node:
|
if 'parameters' in node:
|
||||||
for name, value in node['parameters'].items():
|
for name, value in node['parameters'].items():
|
||||||
@ -972,14 +974,12 @@ def from_node_dict(node):
|
|||||||
spec.compiler_flags[name] = value
|
spec.compiler_flags[name] = value
|
||||||
else:
|
else:
|
||||||
spec.variants[name] = VariantSpec(name, value)
|
spec.variants[name] = VariantSpec(name, value)
|
||||||
|
|
||||||
elif 'variants' in node:
|
elif 'variants' in node:
|
||||||
for name, value in node['variants'].items():
|
for name, value in node['variants'].items():
|
||||||
spec.variants[name] = VariantSpec(name, value)
|
spec.variants[name] = VariantSpec(name, value)
|
||||||
for name in FlagMap.valid_compiler_flags():
|
for name in FlagMap.valid_compiler_flags():
|
||||||
spec.compiler_flags[name] = []
|
spec.compiler_flags[name] = []
|
||||||
else:
|
|
||||||
raise SpackRecordError(
|
|
||||||
"Did not find a valid format for variants in YAML file")
|
|
||||||
|
|
||||||
# Don't read dependencies here; from_node_dict() is used by
|
# Don't read dependencies here; from_node_dict() is used by
|
||||||
# from_yaml() to read the root *and* each dependency spec.
|
# from_yaml() to read the root *and* each dependency spec.
|
||||||
@ -1037,6 +1037,10 @@ def from_yaml(stream):
|
|||||||
for node in nodes:
|
for node in nodes:
|
||||||
# get dependency dict from the node.
|
# get dependency dict from the node.
|
||||||
name = next(iter(node))
|
name = next(iter(node))
|
||||||
|
|
||||||
|
if 'dependencies' not in node[name]:
|
||||||
|
continue
|
||||||
|
|
||||||
yaml_deps = node[name]['dependencies']
|
yaml_deps = node[name]['dependencies']
|
||||||
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
|
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
|
||||||
# Fill in dependencies by looking them up by name in deps dict
|
# Fill in dependencies by looking them up by name in deps dict
|
||||||
@ -1567,7 +1571,7 @@ def validate_names(self):
|
|||||||
UnsupportedCompilerError.
|
UnsupportedCompilerError.
|
||||||
"""
|
"""
|
||||||
for spec in self.traverse():
|
for spec in self.traverse():
|
||||||
# Don't get a package for a virtual name.
|
# raise an UnknownPackageError if the spec's package isn't real.
|
||||||
if (not spec.virtual) and spec.name:
|
if (not spec.virtual) and spec.name:
|
||||||
spack.repo.get(spec.fullname)
|
spack.repo.get(spec.fullname)
|
||||||
|
|
||||||
@ -2824,12 +2828,6 @@ def __init__(self, msg, yaml_error):
|
|||||||
super(SpackYAMLError, self).__init__(msg, str(yaml_error))
|
super(SpackYAMLError, self).__init__(msg, str(yaml_error))
|
||||||
|
|
||||||
|
|
||||||
class SpackRecordError(spack.error.SpackError):
|
|
||||||
|
|
||||||
def __init__(self, msg):
|
|
||||||
super(SpackRecordError, self).__init__(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class AmbiguousHashError(SpecError):
|
class AmbiguousHashError(SpecError):
|
||||||
|
|
||||||
def __init__(self, msg, *specs):
|
def __init__(self, msg, *specs):
|
||||||
|
@ -315,7 +315,8 @@ def fetch(self, mirror_only=False):
|
|||||||
# Add URL strategies for all the mirrors with the digest
|
# Add URL strategies for all the mirrors with the digest
|
||||||
for url in urls:
|
for url in urls:
|
||||||
fetchers.insert(0, fs.URLFetchStrategy(url, digest))
|
fetchers.insert(0, fs.URLFetchStrategy(url, digest))
|
||||||
fetchers.insert(0, spack.cache.fetcher(self.mirror_path, digest))
|
fetchers.insert(0, spack.fetch_cache.fetcher(self.mirror_path,
|
||||||
|
digest))
|
||||||
|
|
||||||
# Look for the archive in list_url
|
# Look for the archive in list_url
|
||||||
package_name = os.path.dirname(self.mirror_path)
|
package_name = os.path.dirname(self.mirror_path)
|
||||||
@ -365,7 +366,7 @@ def check(self):
|
|||||||
self.fetcher.check()
|
self.fetcher.check()
|
||||||
|
|
||||||
def cache_local(self):
|
def cache_local(self):
|
||||||
spack.cache.store(self.fetcher, self.mirror_path)
|
spack.fetch_cache.store(self.fetcher, self.mirror_path)
|
||||||
|
|
||||||
def expand_archive(self):
|
def expand_archive(self):
|
||||||
"""Changes to the stage directory and attempt to expand the downloaded
|
"""Changes to the stage directory and attempt to expand the downloaded
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
##############################################################################
|
##############################################################################
|
||||||
import sys
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
import nose
|
import nose
|
||||||
@ -32,16 +33,53 @@
|
|||||||
from spack.test.tally_plugin import Tally
|
from spack.test.tally_plugin import Tally
|
||||||
"""Names of tests to be included in Spack's test suite"""
|
"""Names of tests to be included in Spack's test suite"""
|
||||||
|
|
||||||
|
# All the tests Spack knows about.
|
||||||
|
# Keep these one per line so that it's easy to see changes in diffs.
|
||||||
test_names = [
|
test_names = [
|
||||||
'architecture', 'versions', 'url_parse', 'url_substitution', 'packages',
|
'architecture',
|
||||||
'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize',
|
'build_system_guess',
|
||||||
'multimethod', 'install', 'package_sanity', 'config', 'directory_layout',
|
'cc',
|
||||||
'pattern', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch',
|
'cmd.find',
|
||||||
'mirror', 'modules', 'url_extrapolate', 'cc', 'link_tree', 'spec_yaml',
|
'cmd.module',
|
||||||
'optional_deps', 'make_executable', 'build_system_guess', 'lock',
|
'cmd.test_install',
|
||||||
'database', 'namespace_trie', 'yaml', 'sbang', 'environment',
|
'cmd.uninstall',
|
||||||
'concretize_preferences', 'cmd.find', 'cmd.uninstall', 'cmd.test_install',
|
'concretize',
|
||||||
'cmd.test_compiler_cmd', 'cmd.module'
|
'concretize_preferences',
|
||||||
|
'config',
|
||||||
|
'database',
|
||||||
|
'directory_layout',
|
||||||
|
'environment',
|
||||||
|
'file_cache',
|
||||||
|
'git_fetch',
|
||||||
|
'hg_fetch',
|
||||||
|
'install',
|
||||||
|
'link_tree',
|
||||||
|
'lock',
|
||||||
|
'make_executable',
|
||||||
|
'mirror',
|
||||||
|
'modules',
|
||||||
|
'multimethod',
|
||||||
|
'namespace_trie',
|
||||||
|
'optional_deps',
|
||||||
|
'package_sanity',
|
||||||
|
'packages',
|
||||||
|
'pattern',
|
||||||
|
'python_version',
|
||||||
|
'sbang',
|
||||||
|
'spec_dag',
|
||||||
|
'spec_semantics',
|
||||||
|
'spec_syntax',
|
||||||
|
'spec_yaml',
|
||||||
|
'stage',
|
||||||
|
'svn_fetch',
|
||||||
|
'url_extrapolate',
|
||||||
|
'url_parse',
|
||||||
|
'url_substitution',
|
||||||
|
'versions',
|
||||||
|
'provider_index',
|
||||||
|
'yaml',
|
||||||
|
# This test needs to be last until global compiler cache is fixed.
|
||||||
|
'cmd.test_compiler_cmd',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -53,6 +91,10 @@ def list_tests():
|
|||||||
def run(names, outputDir, verbose=False):
|
def run(names, outputDir, verbose=False):
|
||||||
"""Run tests with the supplied names. Names should be a list. If
|
"""Run tests with the supplied names. Names should be a list. If
|
||||||
it's empty, run ALL of Spack's tests."""
|
it's empty, run ALL of Spack's tests."""
|
||||||
|
# Print output to stdout if verbose is 1.
|
||||||
|
if verbose:
|
||||||
|
os.environ['NOSE_NOCAPTURE'] = '1'
|
||||||
|
|
||||||
if not names:
|
if not names:
|
||||||
names = test_names
|
names = test_names
|
||||||
else:
|
else:
|
||||||
|
@ -86,6 +86,29 @@ def test_platform(self):
|
|||||||
|
|
||||||
self.assertEqual(str(output_platform_class), str(my_platform_class))
|
self.assertEqual(str(output_platform_class), str(my_platform_class))
|
||||||
|
|
||||||
|
def test_boolness(self):
|
||||||
|
# Make sure architecture reports that it's False when nothing's set.
|
||||||
|
arch = spack.architecture.Arch()
|
||||||
|
self.assertFalse(arch)
|
||||||
|
|
||||||
|
# Dummy architecture parts
|
||||||
|
plat = spack.architecture.platform()
|
||||||
|
plat_os = plat.operating_system('default_os')
|
||||||
|
plat_target = plat.target('default_target')
|
||||||
|
|
||||||
|
# Make sure architecture reports that it's True when anything is set.
|
||||||
|
arch = spack.architecture.Arch()
|
||||||
|
arch.platform = plat
|
||||||
|
self.assertTrue(arch)
|
||||||
|
|
||||||
|
arch = spack.architecture.Arch()
|
||||||
|
arch.platform_os = plat_os
|
||||||
|
self.assertTrue(arch)
|
||||||
|
|
||||||
|
arch = spack.architecture.Arch()
|
||||||
|
arch.target = plat_target
|
||||||
|
self.assertTrue(arch)
|
||||||
|
|
||||||
def test_user_front_end_input(self):
|
def test_user_front_end_input(self):
|
||||||
"""Test when user inputs just frontend that both the frontend target
|
"""Test when user inputs just frontend that both the frontend target
|
||||||
and frontend operating system match
|
and frontend operating system match
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
from spack.concretize import find_spec
|
from spack.concretize import find_spec
|
||||||
from spack.test.mock_packages_test import *
|
from spack.test.mock_packages_test import *
|
||||||
|
|
||||||
|
|
||||||
class ConcretizeTest(MockPackagesTest):
|
class ConcretizeTest(MockPackagesTest):
|
||||||
|
|
||||||
def check_spec(self, abstract, concrete):
|
def check_spec(self, abstract, concrete):
|
||||||
@ -59,7 +60,6 @@ def check_spec(self, abstract, concrete):
|
|||||||
if abstract.architecture and abstract.architecture.concrete:
|
if abstract.architecture and abstract.architecture.concrete:
|
||||||
self.assertEqual(abstract.architecture, concrete.architecture)
|
self.assertEqual(abstract.architecture, concrete.architecture)
|
||||||
|
|
||||||
|
|
||||||
def check_concretize(self, abstract_spec):
|
def check_concretize(self, abstract_spec):
|
||||||
abstract = Spec(abstract_spec)
|
abstract = Spec(abstract_spec)
|
||||||
concrete = abstract.concretized()
|
concrete = abstract.concretized()
|
||||||
@ -70,29 +70,24 @@ def check_concretize(self, abstract_spec):
|
|||||||
|
|
||||||
return concrete
|
return concrete
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_no_deps(self):
|
def test_concretize_no_deps(self):
|
||||||
self.check_concretize('libelf')
|
self.check_concretize('libelf')
|
||||||
self.check_concretize('libelf@0.8.13')
|
self.check_concretize('libelf@0.8.13')
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_dag(self):
|
def test_concretize_dag(self):
|
||||||
self.check_concretize('callpath')
|
self.check_concretize('callpath')
|
||||||
self.check_concretize('mpileaks')
|
self.check_concretize('mpileaks')
|
||||||
self.check_concretize('libelf')
|
self.check_concretize('libelf')
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_variant(self):
|
def test_concretize_variant(self):
|
||||||
self.check_concretize('mpich+debug')
|
self.check_concretize('mpich+debug')
|
||||||
self.check_concretize('mpich~debug')
|
self.check_concretize('mpich~debug')
|
||||||
self.check_concretize('mpich debug=2')
|
self.check_concretize('mpich debug=2')
|
||||||
self.check_concretize('mpich')
|
self.check_concretize('mpich')
|
||||||
|
|
||||||
|
|
||||||
def test_conretize_compiler_flags(self):
|
def test_conretize_compiler_flags(self):
|
||||||
self.check_concretize('mpich cppflags="-O3"')
|
self.check_concretize('mpich cppflags="-O3"')
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_preferred_version(self):
|
def test_concretize_preferred_version(self):
|
||||||
spec = self.check_concretize('python')
|
spec = self.check_concretize('python')
|
||||||
self.assertEqual(spec.versions, ver('2.7.11'))
|
self.assertEqual(spec.versions, ver('2.7.11'))
|
||||||
@ -100,7 +95,6 @@ def test_concretize_preferred_version(self):
|
|||||||
spec = self.check_concretize('python@3.5.1')
|
spec = self.check_concretize('python@3.5.1')
|
||||||
self.assertEqual(spec.versions, ver('3.5.1'))
|
self.assertEqual(spec.versions, ver('3.5.1'))
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_with_virtual(self):
|
def test_concretize_with_virtual(self):
|
||||||
self.check_concretize('mpileaks ^mpi')
|
self.check_concretize('mpileaks ^mpi')
|
||||||
self.check_concretize('mpileaks ^mpi@:1.1')
|
self.check_concretize('mpileaks ^mpi@:1.1')
|
||||||
@ -111,7 +105,6 @@ def test_concretize_with_virtual(self):
|
|||||||
self.check_concretize('mpileaks ^mpi@:1')
|
self.check_concretize('mpileaks ^mpi@:1')
|
||||||
self.check_concretize('mpileaks ^mpi@1.2:2')
|
self.check_concretize('mpileaks ^mpi@1.2:2')
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_with_restricted_virtual(self):
|
def test_concretize_with_restricted_virtual(self):
|
||||||
self.check_concretize('mpileaks ^mpich2')
|
self.check_concretize('mpileaks ^mpich2')
|
||||||
|
|
||||||
@ -142,58 +135,55 @@ def test_concretize_with_restricted_virtual(self):
|
|||||||
concrete = self.check_concretize('mpileaks ^mpich2@1.3.1:1.4')
|
concrete = self.check_concretize('mpileaks ^mpich2@1.3.1:1.4')
|
||||||
self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4'))
|
self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4'))
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_with_provides_when(self):
|
def test_concretize_with_provides_when(self):
|
||||||
"""Make sure insufficient versions of MPI are not in providers list when
|
"""Make sure insufficient versions of MPI are not in providers list when
|
||||||
we ask for some advanced version.
|
we ask for some advanced version.
|
||||||
"""
|
"""
|
||||||
self.assertTrue(not any(spec.satisfies('mpich2@:1.0')
|
self.assertTrue(
|
||||||
|
not any(spec.satisfies('mpich2@:1.0')
|
||||||
for spec in spack.repo.providers_for('mpi@2.1')))
|
for spec in spack.repo.providers_for('mpi@2.1')))
|
||||||
|
|
||||||
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
|
self.assertTrue(
|
||||||
|
not any(spec.satisfies('mpich2@:1.1')
|
||||||
for spec in spack.repo.providers_for('mpi@2.2')))
|
for spec in spack.repo.providers_for('mpi@2.2')))
|
||||||
|
|
||||||
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
|
self.assertTrue(
|
||||||
for spec in spack.repo.providers_for('mpi@2.2')))
|
not any(spec.satisfies('mpich@:1')
|
||||||
|
|
||||||
self.assertTrue(not any(spec.satisfies('mpich@:1')
|
|
||||||
for spec in spack.repo.providers_for('mpi@2')))
|
for spec in spack.repo.providers_for('mpi@2')))
|
||||||
|
|
||||||
self.assertTrue(not any(spec.satisfies('mpich@:1')
|
self.assertTrue(
|
||||||
|
not any(spec.satisfies('mpich@:1')
|
||||||
for spec in spack.repo.providers_for('mpi@3')))
|
for spec in spack.repo.providers_for('mpi@3')))
|
||||||
|
|
||||||
self.assertTrue(not any(spec.satisfies('mpich2')
|
self.assertTrue(
|
||||||
|
not any(spec.satisfies('mpich2')
|
||||||
for spec in spack.repo.providers_for('mpi@3')))
|
for spec in spack.repo.providers_for('mpi@3')))
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_two_virtuals(self):
|
def test_concretize_two_virtuals(self):
|
||||||
"""Test a package with multiple virtual dependencies."""
|
"""Test a package with multiple virtual dependencies."""
|
||||||
s = Spec('hypre').concretize()
|
Spec('hypre').concretize()
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_two_virtuals_with_one_bound(self):
|
def test_concretize_two_virtuals_with_one_bound(self):
|
||||||
"""Test a package with multiple virtual dependencies and one preset."""
|
"""Test a package with multiple virtual dependencies and one preset."""
|
||||||
s = Spec('hypre ^openblas').concretize()
|
Spec('hypre ^openblas').concretize()
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_two_virtuals_with_two_bound(self):
|
def test_concretize_two_virtuals_with_two_bound(self):
|
||||||
"""Test a package with multiple virtual dependencies and two of them preset."""
|
"""Test a package with multiple virtual deps and two of them preset."""
|
||||||
s = Spec('hypre ^openblas ^netlib-lapack').concretize()
|
Spec('hypre ^openblas ^netlib-lapack').concretize()
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_two_virtuals_with_dual_provider(self):
|
def test_concretize_two_virtuals_with_dual_provider(self):
|
||||||
"""Test a package with multiple virtual dependencies and force a provider
|
"""Test a package with multiple virtual dependencies and force a provider
|
||||||
that provides both."""
|
that provides both."""
|
||||||
s = Spec('hypre ^openblas-with-lapack').concretize()
|
Spec('hypre ^openblas-with-lapack').concretize()
|
||||||
|
|
||||||
|
|
||||||
def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
|
def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
|
||||||
"""Test a package with multiple virtual dependencies and force a provider
|
"""Test a package with multiple virtual dependencies and force a
|
||||||
that provides both, and another conflicting package that provides one."""
|
provider that provides both, and another conflicting package that
|
||||||
|
provides one.
|
||||||
|
"""
|
||||||
s = Spec('hypre ^openblas-with-lapack ^netlib-lapack')
|
s = Spec('hypre ^openblas-with-lapack ^netlib-lapack')
|
||||||
self.assertRaises(spack.spec.MultipleProviderError, s.concretize)
|
self.assertRaises(spack.spec.MultipleProviderError, s.concretize)
|
||||||
|
|
||||||
|
|
||||||
def test_virtual_is_fully_expanded_for_callpath(self):
|
def test_virtual_is_fully_expanded_for_callpath(self):
|
||||||
# force dependence on fake "zmpi" by asking for MPI 10.0
|
# force dependence on fake "zmpi" by asking for MPI 10.0
|
||||||
spec = Spec('callpath ^mpi@10.0')
|
spec = Spec('callpath ^mpi@10.0')
|
||||||
@ -210,7 +200,6 @@ def test_virtual_is_fully_expanded_for_callpath(self):
|
|||||||
|
|
||||||
self.assertTrue('fake' in spec._dependencies['zmpi'].spec)
|
self.assertTrue('fake' in spec._dependencies['zmpi'].spec)
|
||||||
|
|
||||||
|
|
||||||
def test_virtual_is_fully_expanded_for_mpileaks(self):
|
def test_virtual_is_fully_expanded_for_mpileaks(self):
|
||||||
spec = Spec('mpileaks ^mpi@10.0')
|
spec = Spec('mpileaks ^mpi@10.0')
|
||||||
self.assertTrue('mpi' in spec._dependencies)
|
self.assertTrue('mpi' in spec._dependencies)
|
||||||
@ -220,23 +209,24 @@ def test_virtual_is_fully_expanded_for_mpileaks(self):
|
|||||||
|
|
||||||
self.assertTrue('zmpi' in spec._dependencies)
|
self.assertTrue('zmpi' in spec._dependencies)
|
||||||
self.assertTrue('callpath' in spec._dependencies)
|
self.assertTrue('callpath' in spec._dependencies)
|
||||||
self.assertTrue('zmpi' in spec._dependencies['callpath'].
|
self.assertTrue(
|
||||||
spec._dependencies)
|
'zmpi' in spec._dependencies['callpath']
|
||||||
self.assertTrue('fake' in spec._dependencies['callpath'].
|
.spec._dependencies)
|
||||||
spec._dependencies['zmpi'].
|
self.assertTrue(
|
||||||
spec._dependencies)
|
'fake' in spec._dependencies['callpath']
|
||||||
|
.spec._dependencies['zmpi']
|
||||||
|
.spec._dependencies)
|
||||||
|
|
||||||
self.assertTrue(all(not 'mpi' in d._dependencies for d in spec.traverse()))
|
self.assertTrue(
|
||||||
|
all('mpi' not in d._dependencies for d in spec.traverse()))
|
||||||
self.assertTrue('zmpi' in spec)
|
self.assertTrue('zmpi' in spec)
|
||||||
self.assertTrue('mpi' in spec)
|
self.assertTrue('mpi' in spec)
|
||||||
|
|
||||||
|
|
||||||
def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
|
def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
|
||||||
spec = Spec('indirect_mpich')
|
spec = Spec('indirect_mpich')
|
||||||
spec.normalize()
|
spec.normalize()
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
|
|
||||||
|
|
||||||
def test_compiler_inheritance(self):
|
def test_compiler_inheritance(self):
|
||||||
spec = Spec('mpileaks')
|
spec = Spec('mpileaks')
|
||||||
spec.normalize()
|
spec.normalize()
|
||||||
@ -248,26 +238,26 @@ def test_compiler_inheritance(self):
|
|||||||
self.assertTrue(spec['libdwarf'].compiler.satisfies('clang'))
|
self.assertTrue(spec['libdwarf'].compiler.satisfies('clang'))
|
||||||
self.assertTrue(spec['libelf'].compiler.satisfies('clang'))
|
self.assertTrue(spec['libelf'].compiler.satisfies('clang'))
|
||||||
|
|
||||||
|
|
||||||
def test_external_package(self):
|
def test_external_package(self):
|
||||||
spec = Spec('externaltool%gcc')
|
spec = Spec('externaltool%gcc')
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
|
|
||||||
self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
|
self.assertEqual(
|
||||||
|
spec['externaltool'].external, '/path/to/external_tool')
|
||||||
self.assertFalse('externalprereq' in spec)
|
self.assertFalse('externalprereq' in spec)
|
||||||
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
|
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
|
||||||
|
|
||||||
|
|
||||||
def test_external_package_module(self):
|
def test_external_package_module(self):
|
||||||
# No tcl modules on darwin/linux machines
|
# No tcl modules on darwin/linux machines
|
||||||
# TODO: improved way to check for this.
|
# TODO: improved way to check for this.
|
||||||
if (spack.architecture.platform().name == 'darwin' or
|
platform = spack.architecture.platform().name
|
||||||
spack.architecture.platform().name == 'linux'):
|
if (platform == 'darwin' or platform == 'linux'):
|
||||||
return
|
return
|
||||||
|
|
||||||
spec = Spec('externalmodule')
|
spec = Spec('externalmodule')
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
self.assertEqual(spec['externalmodule'].external_module, 'external-module')
|
self.assertEqual(
|
||||||
|
spec['externalmodule'].external_module, 'external-module')
|
||||||
self.assertFalse('externalprereq' in spec)
|
self.assertFalse('externalprereq' in spec)
|
||||||
self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc'))
|
self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc'))
|
||||||
|
|
||||||
@ -280,16 +270,16 @@ def test_nobuild_package(self):
|
|||||||
got_error = True
|
got_error = True
|
||||||
self.assertTrue(got_error)
|
self.assertTrue(got_error)
|
||||||
|
|
||||||
|
|
||||||
def test_external_and_virtual(self):
|
def test_external_and_virtual(self):
|
||||||
spec = Spec('externaltest')
|
spec = Spec('externaltest')
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
|
self.assertEqual(
|
||||||
self.assertEqual(spec['stuff'].external, '/path/to/external_virtual_gcc')
|
spec['externaltool'].external, '/path/to/external_tool')
|
||||||
|
self.assertEqual(
|
||||||
|
spec['stuff'].external, '/path/to/external_virtual_gcc')
|
||||||
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
|
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
|
||||||
self.assertTrue(spec['stuff'].compiler.satisfies('gcc'))
|
self.assertTrue(spec['stuff'].compiler.satisfies('gcc'))
|
||||||
|
|
||||||
|
|
||||||
def test_find_spec_parents(self):
|
def test_find_spec_parents(self):
|
||||||
"""Tests the spec finding logic used by concretization. """
|
"""Tests the spec finding logic used by concretization. """
|
||||||
s = Spec('a +foo',
|
s = Spec('a +foo',
|
||||||
@ -300,7 +290,6 @@ def test_find_spec_parents(self):
|
|||||||
|
|
||||||
self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name)
|
self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name)
|
||||||
|
|
||||||
|
|
||||||
def test_find_spec_children(self):
|
def test_find_spec_children(self):
|
||||||
s = Spec('a',
|
s = Spec('a',
|
||||||
Spec('b +foo',
|
Spec('b +foo',
|
||||||
@ -315,7 +304,6 @@ def test_find_spec_children(self):
|
|||||||
Spec('e +foo'))
|
Spec('e +foo'))
|
||||||
self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name)
|
self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name)
|
||||||
|
|
||||||
|
|
||||||
def test_find_spec_sibling(self):
|
def test_find_spec_sibling(self):
|
||||||
s = Spec('a',
|
s = Spec('a',
|
||||||
Spec('b +foo',
|
Spec('b +foo',
|
||||||
@ -333,7 +321,6 @@ def test_find_spec_sibling(self):
|
|||||||
Spec('f +foo')))
|
Spec('f +foo')))
|
||||||
self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name)
|
self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name)
|
||||||
|
|
||||||
|
|
||||||
def test_find_spec_self(self):
|
def test_find_spec_self(self):
|
||||||
s = Spec('a',
|
s = Spec('a',
|
||||||
Spec('b +foo',
|
Spec('b +foo',
|
||||||
@ -342,7 +329,6 @@ def test_find_spec_self(self):
|
|||||||
Spec('e'))
|
Spec('e'))
|
||||||
self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name)
|
self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name)
|
||||||
|
|
||||||
|
|
||||||
def test_find_spec_none(self):
|
def test_find_spec_none(self):
|
||||||
s = Spec('a',
|
s = Spec('a',
|
||||||
Spec('b',
|
Spec('b',
|
||||||
@ -351,7 +337,6 @@ def test_find_spec_none(self):
|
|||||||
Spec('e'))
|
Spec('e'))
|
||||||
self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s))
|
self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s))
|
||||||
|
|
||||||
|
|
||||||
def test_compiler_child(self):
|
def test_compiler_child(self):
|
||||||
s = Spec('mpileaks%clang ^dyninst%gcc')
|
s = Spec('mpileaks%clang ^dyninst%gcc')
|
||||||
s.concretize()
|
s.concretize()
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
|
|
||||||
import spack
|
import spack
|
||||||
from llnl.util.filesystem import join_path
|
from llnl.util.filesystem import join_path
|
||||||
from llnl.util.lock import *
|
|
||||||
from llnl.util.tty.colify import colify
|
from llnl.util.tty.colify import colify
|
||||||
from spack.test.mock_database import MockDatabase
|
from spack.test.mock_database import MockDatabase
|
||||||
|
|
||||||
@ -104,10 +103,12 @@ def test_010_all_install_sanity(self):
|
|||||||
self.assertEqual(len(libelf_specs), 1)
|
self.assertEqual(len(libelf_specs), 1)
|
||||||
|
|
||||||
# Query by dependency
|
# Query by dependency
|
||||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
|
self.assertEqual(
|
||||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
|
len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
|
||||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
|
self.assertEqual(
|
||||||
|
len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
|
||||||
|
self.assertEqual(
|
||||||
|
len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
|
||||||
|
|
||||||
def test_015_write_and_read(self):
|
def test_015_write_and_read(self):
|
||||||
# write and read DB
|
# write and read DB
|
||||||
@ -122,7 +123,6 @@ def test_015_write_and_read(self):
|
|||||||
self.assertEqual(new_rec.path, rec.path)
|
self.assertEqual(new_rec.path, rec.path)
|
||||||
self.assertEqual(new_rec.installed, rec.installed)
|
self.assertEqual(new_rec.installed, rec.installed)
|
||||||
|
|
||||||
|
|
||||||
def _check_db_sanity(self):
|
def _check_db_sanity(self):
|
||||||
"""Utiilty function to check db against install layout."""
|
"""Utiilty function to check db against install layout."""
|
||||||
expected = sorted(spack.install_layout.all_specs())
|
expected = sorted(spack.install_layout.all_specs())
|
||||||
@ -132,12 +132,10 @@ def _check_db_sanity(self):
|
|||||||
for e, a in zip(expected, actual):
|
for e, a in zip(expected, actual):
|
||||||
self.assertEqual(e, a)
|
self.assertEqual(e, a)
|
||||||
|
|
||||||
|
|
||||||
def test_020_db_sanity(self):
|
def test_020_db_sanity(self):
|
||||||
"""Make sure query() returns what's actually in the db."""
|
"""Make sure query() returns what's actually in the db."""
|
||||||
self._check_db_sanity()
|
self._check_db_sanity()
|
||||||
|
|
||||||
|
|
||||||
def test_030_db_sanity_from_another_process(self):
|
def test_030_db_sanity_from_another_process(self):
|
||||||
def read_and_modify():
|
def read_and_modify():
|
||||||
self._check_db_sanity() # check that other process can read DB
|
self._check_db_sanity() # check that other process can read DB
|
||||||
@ -152,14 +150,12 @@ def read_and_modify():
|
|||||||
with self.installed_db.read_transaction():
|
with self.installed_db.read_transaction():
|
||||||
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
|
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
|
||||||
|
|
||||||
|
|
||||||
def test_040_ref_counts(self):
|
def test_040_ref_counts(self):
|
||||||
"""Ensure that we got ref counts right when we read the DB."""
|
"""Ensure that we got ref counts right when we read the DB."""
|
||||||
self.installed_db._check_ref_counts()
|
self.installed_db._check_ref_counts()
|
||||||
|
|
||||||
|
|
||||||
def test_050_basic_query(self):
|
def test_050_basic_query(self):
|
||||||
"""Ensure that querying the database is consistent with what is installed."""
|
"""Ensure querying database is consistent with what is installed."""
|
||||||
# query everything
|
# query everything
|
||||||
self.assertEqual(len(spack.installed_db.query()), 13)
|
self.assertEqual(len(spack.installed_db.query()), 13)
|
||||||
|
|
||||||
@ -186,7 +182,6 @@ def test_050_basic_query(self):
|
|||||||
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
|
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
|
||||||
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1)
|
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1)
|
||||||
|
|
||||||
|
|
||||||
def _check_remove_and_add_package(self, spec):
|
def _check_remove_and_add_package(self, spec):
|
||||||
"""Remove a spec from the DB, then add it and make sure everything's
|
"""Remove a spec from the DB, then add it and make sure everything's
|
||||||
still ok once it is added. This checks that it was
|
still ok once it is added. This checks that it was
|
||||||
@ -215,15 +210,12 @@ def _check_remove_and_add_package(self, spec):
|
|||||||
self._check_db_sanity()
|
self._check_db_sanity()
|
||||||
self.installed_db._check_ref_counts()
|
self.installed_db._check_ref_counts()
|
||||||
|
|
||||||
|
|
||||||
def test_060_remove_and_add_root_package(self):
|
def test_060_remove_and_add_root_package(self):
|
||||||
self._check_remove_and_add_package('mpileaks ^mpich')
|
self._check_remove_and_add_package('mpileaks ^mpich')
|
||||||
|
|
||||||
|
|
||||||
def test_070_remove_and_add_dependency_package(self):
|
def test_070_remove_and_add_dependency_package(self):
|
||||||
self._check_remove_and_add_package('dyninst')
|
self._check_remove_and_add_package('dyninst')
|
||||||
|
|
||||||
|
|
||||||
def test_080_root_ref_counts(self):
|
def test_080_root_ref_counts(self):
|
||||||
rec = self.installed_db.get_record('mpileaks ^mpich')
|
rec = self.installed_db.get_record('mpileaks ^mpich')
|
||||||
|
|
||||||
@ -231,45 +223,89 @@ def test_080_root_ref_counts(self):
|
|||||||
self.installed_db.remove('mpileaks ^mpich')
|
self.installed_db.remove('mpileaks ^mpich')
|
||||||
|
|
||||||
# record no longer in DB
|
# record no longer in DB
|
||||||
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
self.assertEqual(
|
||||||
|
self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
||||||
|
|
||||||
# record's deps have updated ref_counts
|
# record's deps have updated ref_counts
|
||||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0)
|
self.assertEqual(
|
||||||
|
self.installed_db.get_record('callpath ^mpich').ref_count, 0)
|
||||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
|
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
|
||||||
|
|
||||||
# put the spec back
|
# Put the spec back
|
||||||
self.installed_db.add(rec.spec, rec.path)
|
self.installed_db.add(rec.spec, rec.path)
|
||||||
|
|
||||||
# record is present again
|
# record is present again
|
||||||
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
|
self.assertEqual(
|
||||||
|
len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
|
||||||
|
|
||||||
# dependencies have ref counts updated
|
# dependencies have ref counts updated
|
||||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1)
|
self.assertEqual(
|
||||||
|
self.installed_db.get_record('callpath ^mpich').ref_count, 1)
|
||||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
||||||
|
|
||||||
|
|
||||||
def test_090_non_root_ref_counts(self):
|
def test_090_non_root_ref_counts(self):
|
||||||
mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich')
|
self.installed_db.get_record('mpileaks ^mpich')
|
||||||
callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich')
|
self.installed_db.get_record('callpath ^mpich')
|
||||||
|
|
||||||
# "force remove" a non-root spec from the DB
|
# "force remove" a non-root spec from the DB
|
||||||
self.installed_db.remove('callpath ^mpich')
|
self.installed_db.remove('callpath ^mpich')
|
||||||
|
|
||||||
# record still in DB but marked uninstalled
|
# record still in DB but marked uninstalled
|
||||||
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), [])
|
self.assertEqual(
|
||||||
self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
|
self.installed_db.query('callpath ^mpich', installed=True), [])
|
||||||
|
self.assertEqual(
|
||||||
|
len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
|
||||||
|
|
||||||
# record and its deps have same ref_counts
|
# record and its deps have same ref_counts
|
||||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1)
|
self.assertEqual(self.installed_db.get_record(
|
||||||
|
'callpath ^mpich', installed=any).ref_count, 1)
|
||||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
||||||
|
|
||||||
# remove only dependent of uninstalled callpath record
|
# remove only dependent of uninstalled callpath record
|
||||||
self.installed_db.remove('mpileaks ^mpich')
|
self.installed_db.remove('mpileaks ^mpich')
|
||||||
|
|
||||||
# record and parent are completely gone.
|
# record and parent are completely gone.
|
||||||
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
self.assertEqual(
|
||||||
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), [])
|
self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
||||||
|
self.assertEqual(
|
||||||
|
self.installed_db.query('callpath ^mpich', installed=any), [])
|
||||||
|
|
||||||
# mpich ref count updated properly.
|
# mpich ref count updated properly.
|
||||||
mpich_rec = self.installed_db.get_record('mpich')
|
mpich_rec = self.installed_db.get_record('mpich')
|
||||||
self.assertEqual(mpich_rec.ref_count, 0)
|
self.assertEqual(mpich_rec.ref_count, 0)
|
||||||
|
|
||||||
|
def test_100_no_write_with_exception_on_remove(self):
|
||||||
|
def fail_while_writing():
|
||||||
|
with self.installed_db.write_transaction():
|
||||||
|
self._mock_remove('mpileaks ^zmpi')
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
with self.installed_db.read_transaction():
|
||||||
|
self.assertEqual(
|
||||||
|
len(self.installed_db.query('mpileaks ^zmpi', installed=any)),
|
||||||
|
1)
|
||||||
|
|
||||||
|
self.assertRaises(Exception, fail_while_writing)
|
||||||
|
|
||||||
|
# reload DB and make sure zmpi is still there.
|
||||||
|
with self.installed_db.read_transaction():
|
||||||
|
self.assertEqual(
|
||||||
|
len(self.installed_db.query('mpileaks ^zmpi', installed=any)),
|
||||||
|
1)
|
||||||
|
|
||||||
|
def test_110_no_write_with_exception_on_install(self):
|
||||||
|
def fail_while_writing():
|
||||||
|
with self.installed_db.write_transaction():
|
||||||
|
self._mock_install('cmake')
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
with self.installed_db.read_transaction():
|
||||||
|
self.assertEqual(
|
||||||
|
self.installed_db.query('cmake', installed=any), [])
|
||||||
|
|
||||||
|
self.assertRaises(Exception, fail_while_writing)
|
||||||
|
|
||||||
|
# reload DB and make sure cmake was not written.
|
||||||
|
with self.installed_db.read_transaction():
|
||||||
|
self.assertEqual(
|
||||||
|
self.installed_db.query('cmake', installed=any), [])
|
||||||
|
83
lib/spack/spack/test/file_cache.py
Normal file
83
lib/spack/spack/test/file_cache.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
##############################################################################
|
||||||
|
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
|
||||||
|
# Produced at the Lawrence Livermore National Laboratory.
|
||||||
|
#
|
||||||
|
# This file is part of Spack.
|
||||||
|
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||||
|
# LLNL-CODE-647188
|
||||||
|
#
|
||||||
|
# For details, see https://github.com/llnl/spack
|
||||||
|
# Please also see the LICENSE file for our notice and the LGPL.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License (as
|
||||||
|
# published by the Free Software Foundation) version 2.1, February 1999.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful, but
|
||||||
|
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||||
|
# conditions of the GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public
|
||||||
|
# License along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
##############################################################################
|
||||||
|
"""
|
||||||
|
Test Spack's FileCache.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from spack.file_cache import FileCache
|
||||||
|
|
||||||
|
|
||||||
|
class FileCacheTest(unittest.TestCase):
|
||||||
|
"""Ensure that a file cache can properly write to a file and recover its
|
||||||
|
contents."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.scratch_dir = tempfile.mkdtemp()
|
||||||
|
self.cache = FileCache(self.scratch_dir)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
shutil.rmtree(self.scratch_dir)
|
||||||
|
|
||||||
|
def test_write_and_read_cache_file(self):
|
||||||
|
"""Test writing then reading a cached file."""
|
||||||
|
with self.cache.write_transaction('test.yaml') as (old, new):
|
||||||
|
self.assertTrue(old is None)
|
||||||
|
self.assertTrue(new is not None)
|
||||||
|
new.write("foobar\n")
|
||||||
|
|
||||||
|
with self.cache.read_transaction('test.yaml') as stream:
|
||||||
|
text = stream.read()
|
||||||
|
self.assertEqual("foobar\n", text)
|
||||||
|
|
||||||
|
def test_remove(self):
|
||||||
|
"""Test removing an entry from the cache."""
|
||||||
|
self.test_write_and_write_cache_file()
|
||||||
|
|
||||||
|
self.cache.remove('test.yaml')
|
||||||
|
|
||||||
|
self.assertFalse(os.path.exists(self.cache.cache_path('test.yaml')))
|
||||||
|
self.assertFalse(os.path.exists(self.cache._lock_path('test.yaml')))
|
||||||
|
|
||||||
|
def test_write_and_write_cache_file(self):
|
||||||
|
"""Test two write transactions on a cached file."""
|
||||||
|
with self.cache.write_transaction('test.yaml') as (old, new):
|
||||||
|
self.assertTrue(old is None)
|
||||||
|
self.assertTrue(new is not None)
|
||||||
|
new.write("foobar\n")
|
||||||
|
|
||||||
|
with self.cache.write_transaction('test.yaml') as (old, new):
|
||||||
|
self.assertTrue(old is not None)
|
||||||
|
text = old.read()
|
||||||
|
self.assertEqual("foobar\n", text)
|
||||||
|
self.assertTrue(new is not None)
|
||||||
|
new.write("barbaz\n")
|
||||||
|
|
||||||
|
with self.cache.read_transaction('test.yaml') as stream:
|
||||||
|
text = stream.read()
|
||||||
|
self.assertEqual("barbaz\n", text)
|
@ -46,21 +46,21 @@ def setUp(self):
|
|||||||
self.lock_path = join_path(self.tempdir, 'lockfile')
|
self.lock_path = join_path(self.tempdir, 'lockfile')
|
||||||
touch(self.lock_path)
|
touch(self.lock_path)
|
||||||
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
shutil.rmtree(self.tempdir, ignore_errors=True)
|
shutil.rmtree(self.tempdir, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
def multiproc_test(self, *functions):
|
def multiproc_test(self, *functions):
|
||||||
"""Order some processes using simple barrier synchronization."""
|
"""Order some processes using simple barrier synchronization."""
|
||||||
b = Barrier(len(functions), timeout=barrier_timeout)
|
b = Barrier(len(functions), timeout=barrier_timeout)
|
||||||
procs = [Process(target=f, args=(b,)) for f in functions]
|
procs = [Process(target=f, args=(b,)) for f in functions]
|
||||||
for p in procs: p.start()
|
|
||||||
|
for p in procs:
|
||||||
|
p.start()
|
||||||
|
|
||||||
for p in procs:
|
for p in procs:
|
||||||
p.join()
|
p.join()
|
||||||
self.assertEqual(p.exitcode, 0)
|
self.assertEqual(p.exitcode, 0)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Process snippets below can be composed into tests.
|
# Process snippets below can be composed into tests.
|
||||||
#
|
#
|
||||||
@ -88,7 +88,6 @@ def timeout_read(self, barrier):
|
|||||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||||
barrier.wait()
|
barrier.wait()
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Test that exclusive locks on other processes time out when an
|
# Test that exclusive locks on other processes time out when an
|
||||||
# exclusive lock is held.
|
# exclusive lock is held.
|
||||||
@ -97,11 +96,13 @@ def test_write_lock_timeout_on_write(self):
|
|||||||
self.multiproc_test(self.acquire_write, self.timeout_write)
|
self.multiproc_test(self.acquire_write, self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_on_write_2(self):
|
def test_write_lock_timeout_on_write_2(self):
|
||||||
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_write, self.timeout_write, self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_on_write_3(self):
|
def test_write_lock_timeout_on_write_3(self):
|
||||||
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_write, self.timeout_write, self.timeout_write,
|
||||||
|
self.timeout_write)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Test that shared locks on other processes time out when an
|
# Test that shared locks on other processes time out when an
|
||||||
@ -111,11 +112,13 @@ def test_read_lock_timeout_on_write(self):
|
|||||||
self.multiproc_test(self.acquire_write, self.timeout_read)
|
self.multiproc_test(self.acquire_write, self.timeout_read)
|
||||||
|
|
||||||
def test_read_lock_timeout_on_write_2(self):
|
def test_read_lock_timeout_on_write_2(self):
|
||||||
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read)
|
self.multiproc_test(
|
||||||
|
self.acquire_write, self.timeout_read, self.timeout_read)
|
||||||
|
|
||||||
def test_read_lock_timeout_on_write_3(self):
|
def test_read_lock_timeout_on_write_3(self):
|
||||||
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read)
|
self.multiproc_test(
|
||||||
|
self.acquire_write, self.timeout_read, self.timeout_read,
|
||||||
|
self.timeout_read)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Test that exclusive locks time out when shared locks are held.
|
# Test that exclusive locks time out when shared locks are held.
|
||||||
@ -124,27 +127,35 @@ def test_write_lock_timeout_on_read(self):
|
|||||||
self.multiproc_test(self.acquire_read, self.timeout_write)
|
self.multiproc_test(self.acquire_read, self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_on_read_2(self):
|
def test_write_lock_timeout_on_read_2(self):
|
||||||
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.timeout_write, self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_on_read_3(self):
|
def test_write_lock_timeout_on_read_3(self):
|
||||||
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.timeout_write, self.timeout_write,
|
||||||
|
self.timeout_write)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Test that exclusive locks time while lots of shared locks are held.
|
# Test that exclusive locks time while lots of shared locks are held.
|
||||||
#
|
#
|
||||||
def test_write_lock_timeout_with_multiple_readers_2_1(self):
|
def test_write_lock_timeout_with_multiple_readers_2_1(self):
|
||||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.acquire_read, self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_with_multiple_readers_2_2(self):
|
def test_write_lock_timeout_with_multiple_readers_2_2(self):
|
||||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.acquire_read, self.timeout_write,
|
||||||
|
self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_with_multiple_readers_3_1(self):
|
def test_write_lock_timeout_with_multiple_readers_3_1(self):
|
||||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.acquire_read, self.acquire_read,
|
||||||
|
self.timeout_write)
|
||||||
|
|
||||||
def test_write_lock_timeout_with_multiple_readers_3_2(self):
|
def test_write_lock_timeout_with_multiple_readers_3_2(self):
|
||||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
|
self.multiproc_test(
|
||||||
|
self.acquire_read, self.acquire_read, self.acquire_read,
|
||||||
|
self.timeout_write, self.timeout_write)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Longer test case that ensures locks are reusable. Ordering is
|
# Longer test case that ensures locks are reusable. Ordering is
|
||||||
@ -187,7 +198,6 @@ def p1(barrier):
|
|||||||
barrier.wait() # ---------------------------------------- 13
|
barrier.wait() # ---------------------------------------- 13
|
||||||
lock.release_read()
|
lock.release_read()
|
||||||
|
|
||||||
|
|
||||||
def p2(barrier):
|
def p2(barrier):
|
||||||
lock = Lock(self.lock_path)
|
lock = Lock(self.lock_path)
|
||||||
|
|
||||||
@ -224,7 +234,6 @@ def p2(barrier):
|
|||||||
barrier.wait() # ---------------------------------------- 13
|
barrier.wait() # ---------------------------------------- 13
|
||||||
lock.release_read()
|
lock.release_read()
|
||||||
|
|
||||||
|
|
||||||
def p3(barrier):
|
def p3(barrier):
|
||||||
lock = Lock(self.lock_path)
|
lock = Lock(self.lock_path)
|
||||||
|
|
||||||
@ -262,3 +271,176 @@ def p3(barrier):
|
|||||||
lock.release_read()
|
lock.release_read()
|
||||||
|
|
||||||
self.multiproc_test(p1, p2, p3)
|
self.multiproc_test(p1, p2, p3)
|
||||||
|
|
||||||
|
def test_transaction(self):
|
||||||
|
def enter_fn():
|
||||||
|
vals['entered'] = True
|
||||||
|
|
||||||
|
def exit_fn(t, v, tb):
|
||||||
|
vals['exited'] = True
|
||||||
|
vals['exception'] = (t or v or tb)
|
||||||
|
|
||||||
|
lock = Lock(self.lock_path)
|
||||||
|
vals = {'entered': False, 'exited': False, 'exception': False}
|
||||||
|
with ReadTransaction(lock, enter_fn, exit_fn):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exception': False}
|
||||||
|
with WriteTransaction(lock, enter_fn, exit_fn):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
|
||||||
|
def test_transaction_with_exception(self):
|
||||||
|
def enter_fn():
|
||||||
|
vals['entered'] = True
|
||||||
|
|
||||||
|
def exit_fn(t, v, tb):
|
||||||
|
vals['exited'] = True
|
||||||
|
vals['exception'] = (t or v or tb)
|
||||||
|
|
||||||
|
lock = Lock(self.lock_path)
|
||||||
|
|
||||||
|
def do_read_with_exception():
|
||||||
|
with ReadTransaction(lock, enter_fn, exit_fn):
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
def do_write_with_exception():
|
||||||
|
with WriteTransaction(lock, enter_fn, exit_fn):
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exception': False}
|
||||||
|
self.assertRaises(Exception, do_read_with_exception)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exception': False}
|
||||||
|
self.assertRaises(Exception, do_write_with_exception)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
|
||||||
|
def test_transaction_with_context_manager(self):
|
||||||
|
class TestContextManager(object):
|
||||||
|
def __enter__(self):
|
||||||
|
vals['entered'] = True
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
vals['exited'] = True
|
||||||
|
vals['exception'] = (t or v or tb)
|
||||||
|
|
||||||
|
def exit_fn(t, v, tb):
|
||||||
|
vals['exited_fn'] = True
|
||||||
|
vals['exception_fn'] = (t or v or tb)
|
||||||
|
|
||||||
|
lock = Lock(self.lock_path)
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
with ReadTransaction(lock, TestContextManager, exit_fn):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
self.assertTrue(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
with ReadTransaction(lock, TestContextManager):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
self.assertFalse(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
with WriteTransaction(lock, TestContextManager, exit_fn):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
self.assertTrue(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
with WriteTransaction(lock, TestContextManager):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertFalse(vals['exception'])
|
||||||
|
self.assertFalse(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
|
||||||
|
def test_transaction_with_context_manager_and_exception(self):
|
||||||
|
class TestContextManager(object):
|
||||||
|
def __enter__(self):
|
||||||
|
vals['entered'] = True
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
vals['exited'] = True
|
||||||
|
vals['exception'] = (t or v or tb)
|
||||||
|
|
||||||
|
def exit_fn(t, v, tb):
|
||||||
|
vals['exited_fn'] = True
|
||||||
|
vals['exception_fn'] = (t or v or tb)
|
||||||
|
|
||||||
|
lock = Lock(self.lock_path)
|
||||||
|
|
||||||
|
def do_read_with_exception(exit_fn):
|
||||||
|
with ReadTransaction(lock, TestContextManager, exit_fn):
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
def do_write_with_exception(exit_fn):
|
||||||
|
with WriteTransaction(lock, TestContextManager, exit_fn):
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
self.assertRaises(Exception, do_read_with_exception, exit_fn)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
self.assertTrue(vals['exited_fn'])
|
||||||
|
self.assertTrue(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
self.assertRaises(Exception, do_read_with_exception, None)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
self.assertFalse(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
self.assertRaises(Exception, do_write_with_exception, exit_fn)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
self.assertTrue(vals['exited_fn'])
|
||||||
|
self.assertTrue(vals['exception_fn'])
|
||||||
|
|
||||||
|
vals = {'entered': False, 'exited': False, 'exited_fn': False,
|
||||||
|
'exception': False, 'exception_fn': False}
|
||||||
|
self.assertRaises(Exception, do_write_with_exception, None)
|
||||||
|
self.assertTrue(vals['entered'])
|
||||||
|
self.assertTrue(vals['exited'])
|
||||||
|
self.assertTrue(vals['exception'])
|
||||||
|
self.assertFalse(vals['exited_fn'])
|
||||||
|
self.assertFalse(vals['exception_fn'])
|
||||||
|
@ -95,8 +95,10 @@ def setUp(self):
|
|||||||
self._mock_install('mpileaks ^zmpi')
|
self._mock_install('mpileaks ^zmpi')
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
|
with spack.installed_db.write_transaction():
|
||||||
for spec in spack.installed_db.query():
|
for spec in spack.installed_db.query():
|
||||||
spec.package.do_uninstall(spec)
|
spec.package.do_uninstall(spec)
|
||||||
|
|
||||||
super(MockDatabase, self).tearDown()
|
super(MockDatabase, self).tearDown()
|
||||||
shutil.rmtree(self.install_path)
|
shutil.rmtree(self.install_path)
|
||||||
spack.install_path = self.spack_install_path
|
spack.install_path = self.spack_install_path
|
||||||
|
93
lib/spack/spack/test/provider_index.py
Normal file
93
lib/spack/spack/test/provider_index.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
##############################################################################
|
||||||
|
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
|
||||||
|
# Produced at the Lawrence Livermore National Laboratory.
|
||||||
|
#
|
||||||
|
# This file is part of Spack.
|
||||||
|
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||||
|
# LLNL-CODE-647188
|
||||||
|
#
|
||||||
|
# For details, see https://github.com/llnl/spack
|
||||||
|
# Please also see the LICENSE file for our notice and the LGPL.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License (as
|
||||||
|
# published by the Free Software Foundation) version 2.1, February 1999.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful, but
|
||||||
|
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||||
|
# conditions of the GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public
|
||||||
|
# License along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
##############################################################################
|
||||||
|
"""Tests for provider index cache files.
|
||||||
|
|
||||||
|
Tests assume that mock packages provide this:
|
||||||
|
|
||||||
|
{'blas': {
|
||||||
|
blas: set([netlib-blas, openblas, openblas-with-lapack])},
|
||||||
|
'lapack': {lapack: set([netlib-lapack, openblas-with-lapack])},
|
||||||
|
'mpi': {mpi@:1: set([mpich@:1]),
|
||||||
|
mpi@:2.0: set([mpich2]),
|
||||||
|
mpi@:2.1: set([mpich2@1.1:]),
|
||||||
|
mpi@:2.2: set([mpich2@1.2:]),
|
||||||
|
mpi@:3: set([mpich@3:]),
|
||||||
|
mpi@:10.0: set([zmpi])},
|
||||||
|
'stuff': {stuff: set([externalvirtual])}}
|
||||||
|
"""
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
import spack
|
||||||
|
from spack.spec import Spec
|
||||||
|
from spack.provider_index import ProviderIndex
|
||||||
|
from spack.test.mock_packages_test import *
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderIndexTest(MockPackagesTest):
|
||||||
|
|
||||||
|
def test_yaml_round_trip(self):
|
||||||
|
p = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
|
||||||
|
ostream = StringIO()
|
||||||
|
p.to_yaml(ostream)
|
||||||
|
|
||||||
|
istream = StringIO(ostream.getvalue())
|
||||||
|
q = ProviderIndex.from_yaml(istream)
|
||||||
|
|
||||||
|
self.assertEqual(p, q)
|
||||||
|
|
||||||
|
def test_providers_for_simple(self):
|
||||||
|
p = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
|
||||||
|
blas_providers = p.providers_for('blas')
|
||||||
|
self.assertTrue(Spec('netlib-blas') in blas_providers)
|
||||||
|
self.assertTrue(Spec('openblas') in blas_providers)
|
||||||
|
self.assertTrue(Spec('openblas-with-lapack') in blas_providers)
|
||||||
|
|
||||||
|
lapack_providers = p.providers_for('lapack')
|
||||||
|
self.assertTrue(Spec('netlib-lapack') in lapack_providers)
|
||||||
|
self.assertTrue(Spec('openblas-with-lapack') in lapack_providers)
|
||||||
|
|
||||||
|
def test_mpi_providers(self):
|
||||||
|
p = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
|
||||||
|
mpi_2_providers = p.providers_for('mpi@2')
|
||||||
|
self.assertTrue(Spec('mpich2') in mpi_2_providers)
|
||||||
|
self.assertTrue(Spec('mpich@3:') in mpi_2_providers)
|
||||||
|
|
||||||
|
mpi_3_providers = p.providers_for('mpi@3')
|
||||||
|
self.assertTrue(Spec('mpich2') not in mpi_3_providers)
|
||||||
|
self.assertTrue(Spec('mpich@3:') in mpi_3_providers)
|
||||||
|
self.assertTrue(Spec('zmpi') in mpi_3_providers)
|
||||||
|
|
||||||
|
def test_equal(self):
|
||||||
|
p = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
q = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
self.assertEqual(p, q)
|
||||||
|
|
||||||
|
def test_copy(self):
|
||||||
|
p = ProviderIndex(spack.repo.all_package_names())
|
||||||
|
q = p.copy()
|
||||||
|
self.assertEqual(p, q)
|
@ -30,41 +30,36 @@
|
|||||||
from spack.spec import Spec
|
from spack.spec import Spec
|
||||||
from spack.test.mock_packages_test import *
|
from spack.test.mock_packages_test import *
|
||||||
|
|
||||||
class SpecDagTest(MockPackagesTest):
|
|
||||||
|
class SpecYamlTest(MockPackagesTest):
|
||||||
|
|
||||||
def check_yaml_round_trip(self, spec):
|
def check_yaml_round_trip(self, spec):
|
||||||
yaml_text = spec.to_yaml()
|
yaml_text = spec.to_yaml()
|
||||||
spec_from_yaml = Spec.from_yaml(yaml_text)
|
spec_from_yaml = Spec.from_yaml(yaml_text)
|
||||||
self.assertTrue(spec.eq_dag(spec_from_yaml))
|
self.assertTrue(spec.eq_dag(spec_from_yaml))
|
||||||
|
|
||||||
|
|
||||||
def test_simple_spec(self):
|
def test_simple_spec(self):
|
||||||
spec = Spec('mpileaks')
|
spec = Spec('mpileaks')
|
||||||
self.check_yaml_round_trip(spec)
|
self.check_yaml_round_trip(spec)
|
||||||
|
|
||||||
|
|
||||||
def test_normal_spec(self):
|
def test_normal_spec(self):
|
||||||
spec = Spec('mpileaks+debug~opt')
|
spec = Spec('mpileaks+debug~opt')
|
||||||
spec.normalize()
|
spec.normalize()
|
||||||
self.check_yaml_round_trip(spec)
|
self.check_yaml_round_trip(spec)
|
||||||
|
|
||||||
|
|
||||||
def test_ambiguous_version_spec(self):
|
def test_ambiguous_version_spec(self):
|
||||||
spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
|
spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
|
||||||
spec.normalize()
|
spec.normalize()
|
||||||
self.check_yaml_round_trip(spec)
|
self.check_yaml_round_trip(spec)
|
||||||
|
|
||||||
|
|
||||||
def test_concrete_spec(self):
|
def test_concrete_spec(self):
|
||||||
spec = Spec('mpileaks+debug~opt')
|
spec = Spec('mpileaks+debug~opt')
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
self.check_yaml_round_trip(spec)
|
self.check_yaml_round_trip(spec)
|
||||||
|
|
||||||
|
|
||||||
def test_yaml_subdag(self):
|
def test_yaml_subdag(self):
|
||||||
spec = Spec('mpileaks^mpich+debug')
|
spec = Spec('mpileaks^mpich+debug')
|
||||||
spec.concretize()
|
spec.concretize()
|
||||||
|
|
||||||
yaml_spec = Spec.from_yaml(spec.to_yaml())
|
yaml_spec = Spec.from_yaml(spec.to_yaml())
|
||||||
|
|
||||||
for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
|
for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
|
||||||
|
Loading…
Reference in New Issue
Block a user