database.py: simplify, avoid sets, improve perf
This commit is contained in:
parent
9a2b0aca66
commit
dfca65158d
@ -263,18 +263,6 @@ def from_dict(cls, spec, dictionary):
|
|||||||
return InstallRecord(spec, **d)
|
return InstallRecord(spec, **d)
|
||||||
|
|
||||||
|
|
||||||
class ForbiddenLockError(SpackError):
|
|
||||||
"""Raised when an upstream DB attempts to acquire a lock"""
|
|
||||||
|
|
||||||
|
|
||||||
class ForbiddenLock:
|
|
||||||
def __getattr__(self, name):
|
|
||||||
raise ForbiddenLockError(f"Cannot access attribute '{name}' of lock")
|
|
||||||
|
|
||||||
def __reduce__(self):
|
|
||||||
return ForbiddenLock, tuple()
|
|
||||||
|
|
||||||
|
|
||||||
class LockConfiguration(NamedTuple):
|
class LockConfiguration(NamedTuple):
|
||||||
"""Data class to configure locks in Database objects
|
"""Data class to configure locks in Database objects
|
||||||
|
|
||||||
@ -617,16 +605,12 @@ def __init__(
|
|||||||
self.db_lock_timeout = lock_cfg.database_timeout
|
self.db_lock_timeout = lock_cfg.database_timeout
|
||||||
tty.debug(f"DATABASE LOCK TIMEOUT: {str(self.db_lock_timeout)}s")
|
tty.debug(f"DATABASE LOCK TIMEOUT: {str(self.db_lock_timeout)}s")
|
||||||
|
|
||||||
self.lock: Union[ForbiddenLock, lk.Lock]
|
self.lock = lk.Lock(
|
||||||
if self.is_upstream:
|
str(self._lock_path),
|
||||||
self.lock = ForbiddenLock()
|
default_timeout=self.db_lock_timeout,
|
||||||
else:
|
desc="database",
|
||||||
self.lock = lk.Lock(
|
enable=not self.is_upstream and lock_cfg.enable,
|
||||||
str(self._lock_path),
|
)
|
||||||
default_timeout=self.db_lock_timeout,
|
|
||||||
desc="database",
|
|
||||||
enable=lock_cfg.enable,
|
|
||||||
)
|
|
||||||
self._data: Dict[str, InstallRecord] = {}
|
self._data: Dict[str, InstallRecord] = {}
|
||||||
|
|
||||||
# For every installed spec we keep track of its install prefix, so that
|
# For every installed spec we keep track of its install prefix, so that
|
||||||
@ -1050,6 +1034,9 @@ def _write(self, type=None, value=None, traceback=None):
|
|||||||
|
|
||||||
This routine does no locking.
|
This routine does no locking.
|
||||||
"""
|
"""
|
||||||
|
if self.is_upstream:
|
||||||
|
raise UpstreamDatabaseLockingError("Cannot write to an upstream database")
|
||||||
|
|
||||||
self._ensure_parent_directories()
|
self._ensure_parent_directories()
|
||||||
|
|
||||||
# Do not write if exceptions were raised
|
# Do not write if exceptions were raised
|
||||||
@ -1670,38 +1657,24 @@ def query(
|
|||||||
"""
|
"""
|
||||||
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
|
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
|
||||||
if install_tree not in valid_trees:
|
if install_tree not in valid_trees:
|
||||||
msg = "Invalid install_tree argument to Database.query()\n"
|
raise ValueError(
|
||||||
msg += f"Try one of {', '.join(valid_trees)}"
|
f"Invalid install_tree argument to Database.query(). Try one of {valid_trees}"
|
||||||
tty.error(msg)
|
|
||||||
return []
|
|
||||||
|
|
||||||
upstream_results = []
|
|
||||||
upstreams = self.upstream_dbs
|
|
||||||
if install_tree not in ("all", "upstream"):
|
|
||||||
upstreams = [u for u in self.upstream_dbs if u.root == install_tree]
|
|
||||||
for upstream_db in upstreams:
|
|
||||||
# queries for upstream DBs need to *not* lock - we may not
|
|
||||||
# have permissions to do this and the upstream DBs won't know about
|
|
||||||
# us anyway (so e.g. they should never uninstall specs)
|
|
||||||
upstream_results.extend(
|
|
||||||
upstream_db._query(
|
|
||||||
query_spec,
|
|
||||||
predicate_fn=predicate_fn,
|
|
||||||
installed=installed,
|
|
||||||
explicit=explicit,
|
|
||||||
start_date=start_date,
|
|
||||||
end_date=end_date,
|
|
||||||
hashes=hashes,
|
|
||||||
in_buildcache=in_buildcache,
|
|
||||||
origin=origin,
|
|
||||||
)
|
|
||||||
or []
|
|
||||||
)
|
)
|
||||||
|
|
||||||
local_results: Set["spack.spec.Spec"] = set()
|
if install_tree == "all":
|
||||||
if install_tree in ("all", "local") or self.root == install_tree:
|
databases = [self, *self.upstream_dbs]
|
||||||
local_results = set(
|
elif install_tree == "upstream":
|
||||||
self.query_local(
|
databases = self.upstream_dbs
|
||||||
|
elif install_tree == "local" or self.root == install_tree:
|
||||||
|
databases = [self]
|
||||||
|
else:
|
||||||
|
databases = [u for u in self.upstream_dbs if u.root == install_tree]
|
||||||
|
|
||||||
|
results: List[spack.spec.Spec] = []
|
||||||
|
|
||||||
|
for db in databases:
|
||||||
|
results.extend(
|
||||||
|
db.query_local(
|
||||||
query_spec,
|
query_spec,
|
||||||
predicate_fn=predicate_fn,
|
predicate_fn=predicate_fn,
|
||||||
installed=installed,
|
installed=installed,
|
||||||
@ -1714,8 +1687,13 @@ def query(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
results = list(local_results) + list(x for x in upstream_results if x not in local_results)
|
# Stable deduplication on dag hash picks local specs over upstreams.
|
||||||
results.sort() # type: ignore[call-overload]
|
if len(databases) > 1:
|
||||||
|
results = list(llnl.util.lang.dedupe(results, key=lambda x: x.dag_hash()))
|
||||||
|
|
||||||
|
# reduce number of comparisons with slow default __lt__
|
||||||
|
results.sort(key=lambda s: s.name)
|
||||||
|
results.sort()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def query_one(
|
def query_one(
|
||||||
|
@ -165,10 +165,8 @@ def test_installed_upstream(upstream_and_downstream_db, tmpdir):
|
|||||||
upstream_db._read()
|
upstream_db._read()
|
||||||
|
|
||||||
for dep in spec.traverse(root=False):
|
for dep in spec.traverse(root=False):
|
||||||
record = downstream_db.get_by_hash(dep.dag_hash())
|
assert downstream_db.get_by_hash(dep.dag_hash()) is not None
|
||||||
assert record is not None
|
assert upstream_db.get_by_hash(dep.dag_hash()) is not None
|
||||||
with pytest.raises(spack.database.ForbiddenLockError):
|
|
||||||
upstream_db.get_by_hash(dep.dag_hash())
|
|
||||||
|
|
||||||
new_spec = spack.concretize.concretize_one("w")
|
new_spec = spack.concretize.concretize_one("w")
|
||||||
downstream_db.add(new_spec)
|
downstream_db.add(new_spec)
|
||||||
@ -258,7 +256,7 @@ def test_cannot_write_upstream(tmp_path, mock_packages, config):
|
|||||||
# Create it as an upstream
|
# Create it as an upstream
|
||||||
db = spack.database.Database(str(tmp_path), is_upstream=True)
|
db = spack.database.Database(str(tmp_path), is_upstream=True)
|
||||||
|
|
||||||
with pytest.raises(spack.database.ForbiddenLockError):
|
with pytest.raises(spack.database.UpstreamDatabaseLockingError):
|
||||||
db.add(spack.concretize.concretize_one("pkg-a"))
|
db.add(spack.concretize.concretize_one("pkg-a"))
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user