Compare commits

...

14 Commits

Author SHA1 Message Date
Todd Gamblin
03cba85029
bugfix: don't include root in general depth rule 2023-03-11 16:21:53 -08:00
Todd Gamblin
2c7a2fa3e0
Don't require priority attribute on models. 2023-02-22 10:53:16 -08:00
Todd Gamblin
7e8fb4b8d0
WIP 2023-01-30 11:48:03 -08:00
Todd Gamblin
54087cf274
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
b0e67d3411
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
d92b653f0b
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
3435806007
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
06d5abe895
WIP 2023-01-24 17:19:19 -08:00
Todd Gamblin
dc40e121a3
WIP 2023-01-24 17:18:50 -08:00
Todd Gamblin
03acf14d86
tests: test consistency of solver and spec_clauses
Previously we didn't check whether all facts on a specs were represented in the models
returned by the solver. This is an important check as it ensures that we can have
conditions (dependencies, conflicts, etc.) on any property modeled by spec_clauses.

This caught the issue reported in #32497 (https://github.com/spack/spack/issues/32497),
specifically that we don't currently model patch facts. We can fix this in a follow-on
but it is marked XFAIL for now.
2023-01-24 17:18:50 -08:00
Todd Gamblin
9ffb642b95
refactor: use AspFunction consistently before and after solve
Currently we create `AspFunction` objects as inputs to solves, but we don't use them
when extracting symbols from clingo solves. Use them more consistently in both
scenarios, and simplify the code.
2023-01-24 17:18:47 -08:00
Todd Gamblin
20e698c6b0
concretizer: add depth calculation 2023-01-24 17:12:10 -08:00
Todd Gamblin
c410f3f392
rename optimization criteria to be clearer about roots/non-roots 2023-01-24 17:12:10 -08:00
Todd Gamblin
b37fab40b5
solver: rework optimization criteria with larger constants
To allow room for DAG-ordered optimization, rework the way we write optimization
criteria in `concretize.lp`, and convert build offsets to use constants.
2023-01-24 17:12:10 -08:00
10 changed files with 430 additions and 299 deletions

View File

@ -46,14 +46,6 @@ def setup_parser(subparser):
) )
def shift(asp_function):
"""Transforms ``attr("foo", "bar")`` into ``foo("bar")``."""
if not asp_function.args:
raise ValueError(f"Can't shift ASP function with no arguments: {str(asp_function)}")
first, *rest = asp_function.args
return asp.AspFunction(first, rest)
def compare_specs(a, b, to_string=False, color=None): def compare_specs(a, b, to_string=False, color=None):
""" """
Generate a comparison, including diffs (for each side) and an intersection. Generate a comparison, including diffs (for each side) and an intersection.
@ -79,7 +71,7 @@ def compare_specs(a, b, to_string=False, color=None):
# get facts for specs, making sure to include build dependencies of concrete # get facts for specs, making sure to include build dependencies of concrete
# specs and to descend into dependency hashes so we include all facts. # specs and to descend into dependency hashes so we include all facts.
a_facts = set( a_facts = set(
shift(func) func.shift()
for func in setup.spec_clauses( for func in setup.spec_clauses(
a, a,
body=True, body=True,
@ -89,7 +81,7 @@ def compare_specs(a, b, to_string=False, color=None):
if func.name == "attr" if func.name == "attr"
) )
b_facts = set( b_facts = set(
shift(func) func.shift()
for func in setup.spec_clauses( for func in setup.spec_clauses(
b, b,
body=True, body=True,

View File

@ -97,28 +97,41 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs): def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat() result.raise_if_unsat()
opt, _, _ = min(result.answers) opt, *_ = min(result.answers)
# dump the solutions as concretized specs
if ("opt" in show) and (not required_format): if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels) tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:") tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria) maxlen = max(len(name) for name in result.criteria)
color.cprint("@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")) max_depth = max(len(v) for v in result.criteria.values() if isinstance(v, list))
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen header = "@*{"
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1): header += "".join(f"{depth:<4}" for depth in range(max_depth))
color.cprint( header += "Criterion}"
fmt color.cprint(header)
% (
i, # make non-zero numbers red
name, def highlight(n, c):
"-" if build_cost is None else installed_cost, return color.colorize(f"@{c}{{{n:<4}}}" if n > 0 else f"{n:<4}")
installed_cost if build_cost is None else build_cost,
for i, (name, cost) in enumerate(result.criteria.items(), 1):
colored_name = name.replace("build:", "@c{build:}")
colored_name = colored_name.replace("reuse:", "@B{reuse:}")
colored_name = colored_name.replace("fixed:", "@G{fixed:}")
colored_name = color.colorize(colored_name)
if isinstance(cost, int):
print(highlight(cost, "G") + " " * (max_depth - 1) + colored_name)
else:
print(
"".join(highlight(c, "c" if "build:" in name else "B") for c in cost)
+ colored_name
) )
)
print() print()
# dump the solutions as concretized specs
if "solutions" in show: if "solutions" in show:
for spec in result.specs: for spec in result.specs:
# With -y, just print YAML to output. # With -y, just print YAML to output.

View File

@ -13,6 +13,7 @@
import re import re
import types import types
import warnings import warnings
from typing import Dict, List, Tuple, Union
import archspec.cpu import archspec.cpu
@ -124,84 +125,87 @@ def getter(node):
# The space of possible priorities for optimization targets # The space of possible priorities for optimization targets
# is partitioned in the following ranges: # is partitioned in the following ranges:
# # +=============================================================+
# [0-100) Optimization criteria for software being reused # | Priority | Description |
# [100-200) Fixed criteria that are higher priority than reuse, but lower than build # +=============================================================+
# [200-300) Optimization criteria for software being built # | 10,000,000+ | Error conditions |
# [300-1000) High-priority fixed criteria # +-------------+-----------------------------------------------+
# [1000-inf) Error conditions # | 9,999,999 | |
# | ... | High-priority criteria |
# | 1,000,000 | |
# +-------------+-----------------------------------------------+
# | 999,999 | |
# | ... | Standard criteria for built packages |
# | 100,001 | |
# +-------------+-----------------------------------------------+
# | 100,000 | Number of packages being built |
# +-------------+-----------------------------------------------+
# | 99,999 | |
# | ... | Standard criteria for reused packages |
# | 0 | |
# +-------------+-----------------------------------------------+
# #
# Each optimization target is a minimization with optimal value 0. # Each optimization target is a minimization with optimal value 0.
#
#: High fixed priority offset for criteria that supersede all build criteria #: High fixed priority offset for criteria that supersede all build criteria
high_fixed_priority_offset = 300 high_fixed_priority_offset = 10_000_000
#: Priority offset for "build" criteria (regular criterio shifted to #: Priority offset for "build" criteria (regular criterio shifted to
#: higher priority for specs we have to build) #: higher priority for specs we have to build)
build_priority_offset = 200 build_priority_offset = 100_000
#: Priority offset of "fixed" criteria (those w/o build criteria) #: max priority for an error
fixed_priority_offset = 100 max_error_priority = 3
def build_criteria_names(costs, arg_tuples): def build_criteria_names(
costs: List[int], opt_criteria: List["AspFunction"], max_depth: int
) -> Dict[str, Union[int, List[Tuple[int, int]]]]:
"""Construct an ordered mapping from criteria names to costs.""" """Construct an ordered mapping from criteria names to costs."""
# pull optimization criteria names out of the solution
priorities_names = []
num_fixed = 0 # ensure names of all criteria are unique
num_high_fixed = 0 names = {criterion.args[0] for criterion in opt_criteria}
for args in arg_tuples: assert len(names) == len(opt_criteria), "names of optimization criteria must be unique"
priority, name = args[:2]
priority = int(priority)
# add the priority of this opt criterion and its name # split opt criteria into two lists
priorities_names.append((priority, name)) fixed_criteria = [oc for oc in opt_criteria if oc.args[1] == "fixed"]
leveled_criteria = [oc for oc in opt_criteria if oc.args[1] == "leveled"]
# if the priority is less than fixed_priority_offset, then it # first non-error criterion
# has an associated build priority -- the same criterion but for solve_index = max_error_priority + 1
# nodes that we have to build.
if priority < fixed_priority_offset:
build_priority = priority + build_priority_offset
priorities_names.append((build_priority, name))
elif priority >= high_fixed_priority_offset:
num_high_fixed += 1
else:
num_fixed += 1
# sort the criteria by priority # compute without needing max_depth from solve
priorities_names = sorted(priorities_names, reverse=True) max_leveled_costs = (len(costs) - max_error_priority - 3) / 2
assert max_leveled_costs * 2 == len(costs) - max_error_priority - 3
assert max_leveled_costs % len(leveled_criteria) == 0
max_leveled_costs = int(max_leveled_costs)
# We only have opt-criterion values for non-error types n_leveled_costs = len(leveled_criteria) * (max_depth + 1)
# error type criteria are excluded (they come first)
error_criteria = len(costs) - len(priorities_names)
costs = costs[error_criteria:]
# split list into three parts: build criteria, fixed criteria, non-build criteria build_index = solve_index + 1 + max_leveled_costs
num_criteria = len(priorities_names) fixed_costs = [costs[solve_index], costs[build_index]]
num_build = (num_criteria - num_fixed - num_high_fixed) // 2
build_start_idx = num_high_fixed build_costs = costs[solve_index + 1 : solve_index + 1 + n_leveled_costs]
fixed_start_idx = num_high_fixed + num_build reuse_costs = costs[build_index + 1 : build_index + 1 + n_leveled_costs]
installed_start_idx = num_high_fixed + num_build + num_fixed assert len(build_costs) == len(reuse_costs) == n_leveled_costs
high_fixed = priorities_names[:build_start_idx] criteria = {}
build = priorities_names[build_start_idx:fixed_start_idx]
fixed = priorities_names[fixed_start_idx:installed_start_idx]
installed = priorities_names[installed_start_idx:]
# mapping from priority to index in cost list def add_fixed(criterion_idx, cost_idx):
indices = dict((p, i) for i, (p, n) in enumerate(priorities_names)) name = fixed_criteria[criterion_idx].args[2]
criteria["fixed: " + name] = costs[cost_idx]
# make a list that has each name with its build and non-build costs add_fixed(0, solve_index)
criteria = [(cost, None, name) for cost, (p, name) in zip(costs[:build_start_idx], high_fixed)]
criteria += [
(cost, None, name)
for cost, (p, name) in zip(costs[fixed_start_idx:installed_start_idx], fixed)
]
for (i, name), (b, _) in zip(installed, build): for i, fn in enumerate(leveled_criteria):
criteria.append((costs[indices[i]], costs[indices[b]], name)) name = fn.args[2]
criteria["build: " + name] = build_costs[i :: len(leveled_criteria)]
add_fixed(1, build_index)
for i, fn in enumerate(leveled_criteria):
name = fn.args[2]
criteria["reuse: " + name] = reuse_costs[i :: len(leveled_criteria)]
return criteria return criteria
@ -251,7 +255,11 @@ def _id(thing):
class AspFunction(AspObject): class AspFunction(AspObject):
def __init__(self, name, args=None): def __init__(self, name, args=None):
self.name = name self.name = name
self.args = () if args is None else tuple(args)
def simplify(arg):
return arg if isinstance(arg, (str, bool, int)) else str(arg)
self.args = () if args is None else tuple(simplify(arg) for arg in args)
def _cmp_key(self): def _cmp_key(self):
return (self.name, self.args) return (self.name, self.args)
@ -286,10 +294,29 @@ def argify(arg):
elif isinstance(arg, int): elif isinstance(arg, int):
return clingo.Number(arg) return clingo.Number(arg)
else: else:
return clingo.String(str(arg)) return clingo.String(arg)
return clingo.Function(self.name, [argify(arg) for arg in self.args], positive=positive) return clingo.Function(self.name, [argify(arg) for arg in self.args], positive=positive)
@staticmethod
def from_symbol(symbol):
def deargify(arg):
if arg.type is clingo.SymbolType.Number:
return arg.number
elif arg.type is clingo.SymbolType.String and arg.string in ("True", "False"):
return arg.string == "True"
else:
return arg.string
return AspFunction(symbol.name, [deargify(arg) for arg in symbol.arguments])
def shift(self):
"""Transforms ``attr("foo", "bar")`` into ``foo("bar")``."""
if not self.args:
raise ValueError(f"Can't shift ASP function with no arguments: {str(self)}")
first, *rest = self.args
return AspFunction(first, rest)
def __str__(self): def __str__(self):
return "%s(%s)" % (self.name, ", ".join(str(_id(arg)) for arg in self.args)) return "%s(%s)" % (self.name, ", ".join(str(_id(arg)) for arg in self.args))
@ -494,7 +521,8 @@ def _compute_specs_from_answer_set(self):
self._concrete_specs, self._unsolved_specs = [], [] self._concrete_specs, self._unsolved_specs = [], []
self._concrete_specs_by_input = {} self._concrete_specs_by_input = {}
best = min(self.answers) best = min(self.answers)
opt, _, answer = best
opt, _, answer, _ = best
for input_spec in self.abstract_specs: for input_spec in self.abstract_specs:
key = input_spec.name key = input_spec.name
if input_spec.virtual: if input_spec.virtual:
@ -576,13 +604,13 @@ def stringify(sym):
return sym.string or str(sym) return sym.string or str(sym)
def extract_args(model, predicate_name): def extract_functions(model, function_name):
"""Extract the arguments to predicates with the provided name from a model. """Extract ASP functions with the given name from a model.
Pull out all the predicates with name ``predicate_name`` from the model, and return Pull out all the functions with name ``function_name`` from the model, and return them as
their stringified arguments as tuples. ``AspFunction`` objects.
""" """
return [stringify(sym.arguments) for sym in model if sym.name == predicate_name] return [AspFunction.from_symbol(sym) for sym in model if sym.name == function_name]
class PyclingoDriver(object): class PyclingoDriver(object):
@ -681,11 +709,10 @@ def solve(self, setup, specs, reuse=None, output=None, control=None):
self.control = control or default_clingo_control() self.control = control or default_clingo_control()
# set up the problem -- this generates facts and rules # set up the problem -- this generates facts and rules
self.assumptions = [] self.assumptions = []
timer.start("setup") with timer.measure("setup"):
with self.control.backend() as backend: with self.control.backend() as backend:
self.backend = backend self.backend = backend
setup.setup(self, specs, reuse=reuse) setup.setup(self, specs, reuse=reuse)
timer.stop("setup")
timer.start("load") timer.start("load")
# read in the main ASP program and display logic -- these are # read in the main ASP program and display logic -- these are
@ -731,7 +758,8 @@ def visit(node):
cores = [] # unsatisfiable cores if they do not cores = [] # unsatisfiable cores if they do not
def on_model(model): def on_model(model):
models.append((model.cost, model.symbols(shown=True, terms=True))) priorities = getattr(model, "priority", None)
models.append((model.cost, priorities, model.symbols(shown=True, terms=True)))
solve_kwargs = { solve_kwargs = {
"assumptions": self.assumptions, "assumptions": self.assumptions,
@ -752,24 +780,28 @@ def on_model(model):
if result.satisfiable: if result.satisfiable:
# get the best model # get the best model
builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible) builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible)
min_cost, best_model = min(models) min_cost, priorities, best_model = min(models)
# first check for errors # first check for errors
error_args = extract_args(best_model, "error") error_args = [fn.args for fn in extract_functions(best_model, "error")]
errors = sorted((int(priority), msg, args) for priority, msg, *args in error_args) errors = sorted((int(priority), msg, args) for priority, msg, *args in error_args)
for _, msg, args in errors: for _, msg, args in errors:
self.handle_error(msg, *args) self.handle_error(msg, *args)
# build specs from spec attributes in the model # build specs from spec attributes in the model
spec_attrs = [(name, tuple(rest)) for name, *rest in extract_args(best_model, "attr")] spec_attrs = extract_functions(best_model, "attr")
answers = builder.build_specs(spec_attrs) with timer.measure("build"):
answers = builder.build_specs(spec_attrs)
# add best spec to the results # add best spec to the results
result.answers.append((list(min_cost), 0, answers)) result.answers.append((list(min_cost), 0, answers, spec_attrs))
# get optimization criteria # get optimization criteria
criteria_args = extract_args(best_model, "opt_criterion") criteria = extract_functions(best_model, "opt_criterion")
result.criteria = build_criteria_names(min_cost, criteria_args) depths = extract_functions(best_model, "depth")
max_depth = max(d.args[1] for d in depths)
result.criteria = build_criteria_names(min_cost, criteria, max_depth)
# record the number of models the solver considered # record the number of models the solver considered
result.nmodels = len(models) result.nmodels = len(models)
@ -779,7 +811,7 @@ def on_model(model):
# print any unknown functions in the model # print any unknown functions in the model
for sym in best_model: for sym in best_model:
if sym.name not in ("attr", "error", "opt_criterion"): if sym.name not in ("attr", "error", "opt_criterion", "depth", "const_max_depth"):
tty.debug( tty.debug(
"UNKNOWN SYMBOL: %s(%s)" % (sym.name, ", ".join(stringify(sym.arguments))) "UNKNOWN SYMBOL: %s(%s)" % (sym.name, ", ".join(stringify(sym.arguments)))
) )
@ -1409,23 +1441,23 @@ def spec_clauses(self, *args, **kwargs):
def _spec_clauses( def _spec_clauses(
self, self,
spec, spec,
body=False, body: bool = False,
transitive=True, transitive: bool = True,
expand_hashes=False, expand_hashes: bool = False,
concrete_build_deps=False, concrete_build_deps: bool = False,
deptype: Union[str, Tuple[str, ...]] = "all",
): ):
"""Return a list of clauses for a spec mandates are true. """Return a list of clauses for a spec mandates are true.
Arguments: Arguments:
spec (spack.spec.Spec): the spec to analyze spec: the spec to analyze
body (bool): if True, generate clauses to be used in rule bodies body: if True, generate clauses to be used in rule bodies
(final values) instead of rule heads (setters). (final values) instead of rule heads (setters).
transitive (bool): if False, don't generate clauses from transitive: if False, don't generate clauses from dependencies.
dependencies (default True) expand_hashes: If transitive and True, descend into hashes of concrete specs.
expand_hashes (bool): if True, descend into hashes of concrete specs concrete_build_deps: if False, do not include pure build deps
(default False)
concrete_build_deps (bool): if False, do not include pure build deps
of concrete specs (as they have no effect on runtime constraints) of concrete specs (as they have no effect on runtime constraints)
deptype: dependency types to follow when transitive (default "all").
Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates
hashes for the dependency requirements of concrete specs. If ``expand_hashes`` hashes for the dependency requirements of concrete specs. If ``expand_hashes``
@ -1553,7 +1585,7 @@ class Body(object):
# add all clauses from dependencies # add all clauses from dependencies
if transitive: if transitive:
# TODO: Eventually distinguish 2 deps on the same pkg (build and link) # TODO: Eventually distinguish 2 deps on the same pkg (build and link)
for dspec in spec.edges_to_dependencies(): for dspec in spec.edges_to_dependencies(deptype=deptype):
dep = dspec.spec dep = dspec.spec
if spec.concrete: if spec.concrete:
@ -1585,6 +1617,7 @@ class Body(object):
body=body, body=body,
expand_hashes=expand_hashes, expand_hashes=expand_hashes,
concrete_build_deps=concrete_build_deps, concrete_build_deps=concrete_build_deps,
deptype=deptype,
) )
) )
@ -2316,7 +2349,7 @@ def deprecated(self, pkg, version):
tty.warn(msg.format(pkg, version)) tty.warn(msg.format(pkg, version))
@staticmethod @staticmethod
def sort_fn(function_tuple): def sort_fn(function):
"""Ensure attributes are evaluated in the correct order. """Ensure attributes are evaluated in the correct order.
hash attributes are handled first, since they imply entire concrete specs hash attributes are handled first, since they imply entire concrete specs
@ -2326,7 +2359,7 @@ def sort_fn(function_tuple):
the concrete specs on which they depend because all nodes are fully constructed before we the concrete specs on which they depend because all nodes are fully constructed before we
consider which ones are external. consider which ones are external.
""" """
name = function_tuple[0] name = function.args[0]
if name == "hash": if name == "hash":
return (-5, 0) return (-5, 0)
elif name == "node": elif name == "node":
@ -2340,23 +2373,24 @@ def sort_fn(function_tuple):
else: else:
return (-1, 0) return (-1, 0)
def build_specs(self, function_tuples): def build_specs(self, functions):
# Functions don't seem to be in particular order in output. Sort # Functions don't seem to be in particular order in output. Sort
# them here so that directives that build objects (like node and # them here so that directives that build objects (like node and
# node_compiler) are called in the right order. # node_compiler) are called in the right order.
self.function_tuples = sorted(set(function_tuples), key=self.sort_fn) self.functions = sorted(set(functions), key=self.sort_fn)
self._specs = {} self._specs = {}
for name, args in self.function_tuples: for attr in self.functions:
if SpecBuilder.ignored_attributes.match(name): fn = attr.shift() # attr("foo", "bar") -> foo("bar")
if SpecBuilder.ignored_attributes.match(fn.name):
continue continue
action = getattr(self, name, None) action = getattr(self, fn.name, None)
# print out unknown actions so we can display them for debugging # print out unknown actions so we can display them for debugging
if not action: if not action:
msg = 'UNKNOWN SYMBOL: attr("%s", %s)' % (name, ", ".join(str(a) for a in args)) tty.debug(f"UNKNOWN SYMBOL: {attr}")
tty.debug(msg)
continue continue
msg = ( msg = (
@ -2368,8 +2402,8 @@ def build_specs(self, function_tuples):
# ignore predicates on virtual packages, as they're used for # ignore predicates on virtual packages, as they're used for
# solving but don't construct anything. Do not ignore error # solving but don't construct anything. Do not ignore error
# predicates on virtual packages. # predicates on virtual packages.
if name != "error": if fn.name != "error":
pkg = args[0] pkg = fn.args[0]
if spack.repo.path.is_virtual(pkg): if spack.repo.path.is_virtual(pkg):
continue continue
@ -2379,7 +2413,7 @@ def build_specs(self, function_tuples):
if spec and spec.concrete: if spec and spec.concrete:
continue continue
action(*args) action(*fn.args)
# namespace assignment is done after the fact, as it is not # namespace assignment is done after the fact, as it is not
# currently part of the solve # currently part of the solve

View File

@ -23,9 +23,17 @@ literal_not_solved(ID) :- not literal_solved(ID), literal(ID).
% in better reporting for users. See #30669 for details. % in better reporting for users. See #30669 for details.
1 { literal_solved(ID) : literal(ID) }. 1 { literal_solved(ID) : literal(ID) }.
opt_criterion(300, "number of input specs not concretized"). % priority ranges for optimization criteria
#minimize{ 0@300: #true }. % note that clingo's weight_t is int32_t, so the max priority we can use is 2,147,483,647
#minimize { 1@300,ID : literal_not_solved(ID) }. #const max_error_priority = 3.
#const error_prio = 10000000.
#const solve_prio = 1000000.
#const build_prio = 100000. % n_nodes x depth_offset x max levels needs to be less than this
#const depth_offset = 100. % depth_offset-1 is the max id for leveled criteria
opt_criterion(solve_prio, "fixed", "number of input specs not concretized").
#minimize{ 0@solve_prio: #true }.
#minimize{ 1@solve_prio,ID : literal_not_solved(ID) }.
% Map constraint on the literal ID to the correct PSID % Map constraint on the literal ID to the correct PSID
attr(Name, A1) :- literal(LiteralID, Name, A1), literal_solved(LiteralID). attr(Name, A1) :- literal(LiteralID, Name, A1), literal_solved(LiteralID).
@ -65,7 +73,8 @@ version_declared(Package, Version, Weight) :- version_declared(Package, Version,
version_declared(Package, Version) :- version_declared(Package, Version, _). version_declared(Package, Version) :- version_declared(Package, Version, _).
% a spec with a git hash version is equivalent to one with the same matched version % a spec with a git hash version is equivalent to one with the same matched version
version_satisfies(Package, Constraint, HashVersion) :- version_satisfies(Package, Constraint, EquivalentVersion), version_satisfies(Package, Constraint, HashVersion) :-
version_satisfies(Package, Constraint, EquivalentVersion),
version_equivalent(Package, HashVersion, EquivalentVersion). version_equivalent(Package, HashVersion, EquivalentVersion).
#defined version_equivalent/3. #defined version_equivalent/3.
@ -146,7 +155,10 @@ possible_version_weight(Package, Weight)
% Otherwise covered by `no_version_error` and `versions_conflict_error`. % Otherwise covered by `no_version_error` and `versions_conflict_error`.
error(1, "No valid version for '{0}' satisfies '@{1}'", Package, Constraint) error(1, "No valid version for '{0}' satisfies '@{1}'", Package, Constraint)
:- attr("node_version_satisfies", Package, Constraint), :- attr("node_version_satisfies", Package, Constraint),
C = #count{ Version : attr("version", Package, Version), version_satisfies(Package, Constraint, Version)}, C = #count{
Version
: attr("version", Package, Version), version_satisfies(Package, Constraint, Version)
},
C < 1. C < 1.
attr("node_version_satisfies", Package, Constraint) attr("node_version_satisfies", Package, Constraint)
@ -1077,11 +1089,11 @@ build(Package) :- not attr("hash", Package, _), attr("node", Package).
% 200+ Shifted priorities for build nodes; correspond to priorities 0 - 99. % 200+ Shifted priorities for build nodes; correspond to priorities 0 - 99.
% 100 - 199 Unshifted priorities. Currently only includes minimizing #builds. % 100 - 199 Unshifted priorities. Currently only includes minimizing #builds.
% 0 - 99 Priorities for non-built nodes. % 0 - 99 Priorities for non-built nodes.
build_priority(Package, 200) :- build(Package), attr("node", Package), optimize_for_reuse(). build_priority(Package, build_prio) :- build(Package), attr("node", Package), optimize_for_reuse().
build_priority(Package, 0) :- not build(Package), attr("node", Package), optimize_for_reuse(). build_priority(Package, 0) :- not build(Package), attr("node", Package), optimize_for_reuse().
% don't adjust build priorities if reuse is not enabled % don't adjust build priorities if reuse is not enabled
build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse(). build_priority(Package, build_prio) :- attr("node", Package), not optimize_for_reuse().
% don't assign versions from installed packages unless reuse is enabled % don't assign versions from installed packages unless reuse is enabled
% NOTE: that "installed" means the declared version was only included because % NOTE: that "installed" means the declared version was only included because
@ -1101,22 +1113,56 @@ build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse().
#defined installed_hash/2. #defined installed_hash/2.
%-----------------------------------------------------------------------------
% Calculate min depth of nodes in the DAG
% We use this to optimize nodes closer to roots with higher precedence.
%-----------------------------------------------------------------------------
#const max_depth = 4.
% roots have depth 0.
depth(Package, 0) :- attr("root", Package).
%depth(Package, D+1) :- depth(Dependent, D), depends_on(Dependent, Package), D < max_depth.
%parent_depth(Package, D) :-
% depends_on(Dependent, Package),
% depth(Dependent, D),
% D < max_depth - 1.
%depth(Package, M+1) :-
% M = #min{ D: parent_depth(Package, D); max_depth - 1 },
% attr("node", Package).
% other nodes' depth is the minimum depth of any dependent plus one.
depth(Package, N + 1) :-
N = #min{
D: depends_on(Dependent, Package),
depth(Dependent, D),
D < max_depth;
max_depth - 1
},
N = 0..max_depth - 1,
not attr("root", Package),
attr("node", Package).
%----------------------------------------------------------------- %-----------------------------------------------------------------
% Optimization to avoid errors % Optimization to avoid errors
%----------------------------------------------------------------- %-----------------------------------------------------------------
% Some errors are handled as rules instead of constraints because % Some errors are handled as rules instead of constraints because
% it allows us to explain why something failed. Here we optimize % it allows us to explain why something failed. Here we optimize
% HEAVILY against the facts generated by those rules. % HEAVILY against the facts generated by those rules.
#minimize{ 0@1000: #true}.
#minimize{ 0@1001: #true}.
#minimize{ 0@1002: #true}.
#minimize{ 1000@1000+Priority,Msg: error(Priority, Msg) }. % ensure that error costs are always in the solution.
#minimize{ 1000@1000+Priority,Msg,Arg1: error(Priority, Msg, Arg1) }. #minimize{ 0@error_prio + (0..max_error_priority): #true}.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2: error(Priority, Msg, Arg1, Arg2) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3: error(Priority, Msg, Arg1, Arg2, Arg3) }. % TODO: why 1000 and not just 1? 1000 seems unnecessary since priorities are lexicographic.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4) }. #minimize{ 1000@error_prio+Priority,Msg: error(Priority, Msg) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4,Arg5: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4, Arg5) }. #minimize{ 1000@error_prio+Priority,Msg,Arg1: error(Priority, Msg, Arg1) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2: error(Priority, Msg, Arg1, Arg2) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3: error(Priority, Msg, Arg1, Arg2, Arg3) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3,Arg4: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3,Arg4,Arg5: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4, Arg5) }.
%----------------------------------------------------------------------------- %-----------------------------------------------------------------------------
% How to optimize the spec (high to low priority) % How to optimize the spec (high to low priority)
@ -1126,199 +1172,157 @@ build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse().
% 2. a `#minimize{ 0@2 : #true }.` statement that ensures the criterion % 2. a `#minimize{ 0@2 : #true }.` statement that ensures the criterion
% is displayed (clingo doesn't display sums over empty sets by default) % is displayed (clingo doesn't display sums over empty sets by default)
% Ensure that values are returned by clingo for every distinct optimization criterion.
% Some criteria are "fixed" and have only one bucket. Others are summed into multiple
% buckets -- per build priority and per depth in the graph.
% If we don't do this, it's very hard to read the sums back. We use `0@...` because
% it doesn't affect the sums -- it just ensure that clingo returns them.
% "fixed" criteria have one bucket -- their priority.
#minimize{ 0@N: opt_criterion(N, "fixed", _) }.
% "leveled" criteria sum into a bucket per depth in the graph, per build priority
#minimize{
0@(((max_depth - D - 1) * depth_offset) + N + build_prio)
: opt_criterion(N, "leveled", _), depth(_, D)
}.
#minimize{
0@(((max_depth - D - 1) * depth_offset) + N)
: opt_criterion(N, "leveled", _), depth(_, D)
}.
% Try hard to reuse installed packages (i.e., minimize the number built) % Try hard to reuse installed packages (i.e., minimize the number built)
opt_criterion(100, "number of packages to build (vs. reuse)"). opt_criterion(build_prio, "fixed", "number of packages to build (vs. reuse)").
#minimize { 0@100: #true }. #minimize { 1@build_prio,Package : build(Package), optimize_for_reuse() }.
#minimize { 1@100,Package : build(Package), optimize_for_reuse() }.
#defined optimize_for_reuse/0. #defined optimize_for_reuse/0.
% A condition group specifies one or more specs that must be satisfied. % A condition group specifies one or more specs that must be satisfied.
% Specs declared first are preferred, so we assign increasing weights and % Specs declared first are preferred, so we assign increasing weights and
% minimize the weights. % minimize the weights.
opt_criterion(75, "requirement weight"). opt_criterion(65, "leveled", "requirement weight").
#minimize{ 0@275: #true }. #minimize{
#minimize{ 0@75: #true }. Weight@(65 + ((max_depth - D - 1) * depth_offset) + Priority), Package
#minimize {
Weight@75+Priority
: requirement_weight(Package, Weight), : requirement_weight(Package, Weight),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}. }.
% Minimize the number of deprecated versions being used opt_criterion(60, "leveled", "deprecated versions used").
opt_criterion(73, "deprecated versions used").
#minimize{ 0@273: #true }.
#minimize{ 0@73: #true }.
#minimize{ #minimize{
1@73+Priority,Package 1@(60 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: attr("deprecated", Package, _), : attr("deprecated", Package, _),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}. }.
% Minimize the: opt_criterion(55, "leveled", "version badness").
% 1. Version weight
% 2. Number of variants with a non default value, if not set
% for the root package.
opt_criterion(70, "version weight").
#minimize{ 0@270: #true }.
#minimize{ 0@70: #true }.
#minimize {
Weight@70+Priority
: attr("root", Package), version_weight(Package, Weight),
build_priority(Package, Priority)
}.
opt_criterion(65, "number of non-default variants (roots)").
#minimize{ 0@265: #true }.
#minimize{ 0@65: #true }.
#minimize {
1@65+Priority,Package,Variant,Value
: variant_not_default(Package, Variant, Value),
attr("root", Package),
build_priority(Package, Priority)
}.
opt_criterion(60, "preferred providers for roots").
#minimize{ 0@260: #true }.
#minimize{ 0@60: #true }.
#minimize{ #minimize{
Weight@60+Priority,Provider,Virtual Weight@(55 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: provider_weight(Provider, Virtual, Weight),
attr("root", Provider),
build_priority(Provider, Priority)
}.
opt_criterion(55, "default values of variants not being used (roots)").
#minimize{ 0@255: #true }.
#minimize{ 0@55: #true }.
#minimize{
1@55+Priority,Package,Variant,Value
: variant_default_not_used(Package, Variant, Value),
attr("root", Package),
build_priority(Package, Priority)
}.
% Try to use default variants or variants that have been set
opt_criterion(50, "number of non-default variants (non-roots)").
#minimize{ 0@250: #true }.
#minimize{ 0@50: #true }.
#minimize {
1@50+Priority,Package,Variant,Value
: variant_not_default(Package, Variant, Value),
not attr("root", Package),
build_priority(Package, Priority)
}.
% Minimize the weights of the providers, i.e. use as much as
% possible the most preferred providers
opt_criterion(45, "preferred providers (non-roots)").
#minimize{ 0@245: #true }.
#minimize{ 0@45: #true }.
#minimize{
Weight@45+Priority,Provider,Virtual
: provider_weight(Provider, Virtual, Weight), not attr("root", Provider),
build_priority(Provider, Priority)
}.
% Try to minimize the number of compiler mismatches in the DAG.
opt_criterion(40, "compiler mismatches that are not from CLI").
#minimize{ 0@240: #true }.
#minimize{ 0@40: #true }.
#minimize{
1@40+Priority,Package,Dependency
: compiler_mismatch(Package, Dependency),
build_priority(Package, Priority)
}.
opt_criterion(39, "compiler mismatches that are not from CLI").
#minimize{ 0@239: #true }.
#minimize{ 0@39: #true }.
#minimize{
1@39+Priority,Package,Dependency
: compiler_mismatch_required(Package, Dependency),
build_priority(Package, Priority)
}.
% Try to minimize the number of compiler mismatches in the DAG.
opt_criterion(35, "OS mismatches").
#minimize{ 0@235: #true }.
#minimize{ 0@35: #true }.
#minimize{
1@35+Priority,Package,Dependency
: node_os_mismatch(Package, Dependency),
build_priority(Package, Priority)
}.
opt_criterion(30, "non-preferred OS's").
#minimize{ 0@230: #true }.
#minimize{ 0@30: #true }.
#minimize{
Weight@30+Priority,Package
: node_os_weight(Package, Weight),
build_priority(Package, Priority)
}.
% Choose more recent versions for nodes
opt_criterion(25, "version badness").
#minimize{ 0@225: #true }.
#minimize{ 0@25: #true }.
#minimize{
Weight@25+Priority,Package
: version_weight(Package, Weight), : version_weight(Package, Weight),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}. }.
% Try to use all the default values of variants opt_criterion(50, "leveled", "number of non-default variants").
opt_criterion(20, "default values of variants not being used (non-roots)").
#minimize{ 0@220: #true }.
#minimize{ 0@20: #true }.
#minimize{ #minimize{
1@20+Priority,Package,Variant,Value 1@(50 + ((max_depth - D - 1) * depth_offset) + Priority), Package, Variant, Value
: variant_not_default(Package, Variant, Value),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(45, "leveled", "preferred providers").
#minimize{
Weight@(45 + ((max_depth - D - 1) * depth_offset) + Priority), Provider, Virtual
: provider_weight(Provider, Virtual, Weight),
build_priority(Provider, Priority),
depth(Package, D)
}.
opt_criterion(40, "leveled", "default values of variants not being used").
#minimize{
1@(40 + ((max_depth - D - 1) * depth_offset) + Priority), Package, Variant, Value
: variant_default_not_used(Package, Variant, Value), : variant_default_not_used(Package, Variant, Value),
not attr("root", Package), build_priority(Package, Priority),
build_priority(Package, Priority) depth(Package, D)
}. }.
% Try to use preferred compilers opt_criterion(35, "leveled", "compiler mismatches (not from CLI)").
opt_criterion(15, "non-preferred compilers").
#minimize{ 0@215: #true }.
#minimize{ 0@15: #true }.
#minimize{ #minimize{
Weight@15+Priority,Package 1@(35 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: compiler_mismatch(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(30, "leveled", "compiler mismatches (from CLI)").
#minimize{
1@(30 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: compiler_mismatch_required(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(25, "leveled", "OS mismatches").
#minimize{
1@(25 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: node_os_mismatch(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(20, "leveled", "non-preferred compilers").
#minimize{
Weight@(20 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: compiler_weight(Package, Weight), : compiler_weight(Package, Weight),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(15, "leveled", "non-preferred OS's").
#minimize{
Weight@(15 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: node_os_weight(Package, Weight),
build_priority(Package, Priority),
depth(Package, D)
}. }.
% Minimize the number of mismatches for targets in the DAG, try % Minimize the number of mismatches for targets in the DAG, try
% to select the preferred target. % to select the preferred target.
opt_criterion(10, "target mismatches"). opt_criterion(10, "leveled", "target mismatches").
#minimize{ 0@210: #true }.
#minimize{ 0@10: #true }.
#minimize{ #minimize{
1@10+Priority,Package,Dependency 1@(10 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: node_target_mismatch(Package, Dependency), : node_target_mismatch(Dependent, Package),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}. }.
opt_criterion(5, "non-preferred targets"). opt_criterion(5, "leveled", "non-preferred targets").
#minimize{ 0@205: #true }.
#minimize{ 0@5: #true }.
#minimize{ #minimize{
Weight@5+Priority,Package Weight@(5 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: node_target_weight(Package, Weight), : node_target_weight(Package, Weight),
build_priority(Package, Priority) build_priority(Package, Priority),
depth(Package, D)
}. }.
%----------------- %-----------------
% Domain heuristic % Domain heuristic
%----------------- %-----------------
#heuristic attr("version", Package, Version) : version_declared(Package, Version, 0), attr("node", Package). [10, true] #heuristic attr("version", Package, Version) : version_declared(Package, Version, 0), attr("node", Package). [10, true]
#heuristic version_weight(Package, 0) : version_declared(Package, Version, 0), attr("node", Package). [10, true] #heuristic version_weight(Package, 0) : version_declared(Package, Version, 0), attr("node", Package). [10, true]
#heuristic attr("node_target", Package, Target) : package_target_weight(Target, Package, 0), attr("node", Package). [10, true] #heuristic attr("node_target", Package, Target) : package_target_weight(Target, Package, 0), attr("node", Package). [10, true]
#heuristic node_target_weight(Package, 0) : attr("node", Package). [10, true] #heuristic node_target_weight(Package, 0) : attr("node", Package). [10, true]
#heuristic attr("variant_value", Package, Variant, Value) : variant_default_value(Package, Variant, Value), attr("node", Package). [10, true] #heuristic attr("variant_value", Package, Variant, Value) : variant_default_value(Package, Variant, Value), attr("node", Package). [10, true]
#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
#heuristic attr("node", Package) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true] %#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
#heuristic attr("node_os", Package, OS) : buildable_os(OS). [10, true] %#heuristic attr("node", Package) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
%#heuristic attr("node_os", Package, OS) : buildable_os(OS). [10, true]
%#heuristic attr("node_target", Dependency, Target): depends_on(Package, Dependency), attr("node_target", Package, Target). [20, true]
%#heuristic attr("node_os", Dependency, OS): depends_on(Package, Dependency), attr("node_os", Package, OS). [20, true]
%----------- %-----------
% Notes % Notes

View File

@ -15,7 +15,7 @@
#show attr/4. #show attr/4.
% names of optimization criteria % names of optimization criteria
#show opt_criterion/2. #show opt_criterion/3.
% error types % error types
#show error/2. #show error/2.
@ -25,4 +25,16 @@
#show error/6. #show error/6.
#show error/7. #show error/7.
% depths
#show depth/2.
%#show parent_depth/2.
% debug % debug
%#show depends_on/2.
%node(Package) :- attr("node", Package).
%#show node/1.
%version(Package, Version) :- attr("version", Package, Version).
%#show version/2.

View File

@ -2928,7 +2928,7 @@ def _new_concretize(self, tests=False):
result.raise_if_unsat() result.raise_if_unsat()
# take the best answer # take the best answer
opt, i, answer = min(result.answers) opt, i, answer, _ = min(result.answers)
name = self.name name = self.name
# TODO: Consolidate this code with similar code in solve.py # TODO: Consolidate this code with similar code in solve.py
if self.virtual: if self.virtual:

View File

@ -1779,8 +1779,8 @@ def test_version_weight_and_provenance(self):
num_specs = len(list(result_spec.traverse())) num_specs = len(list(result_spec.traverse()))
criteria = [ criteria = [
(num_specs - 1, None, "number of packages to build (vs. reuse)"), (None, num_specs - 1, "number of packages to build (vs. reuse)"),
(2, 0, "version badness"), (2, 0, "NON-ROOTS: version badness"),
] ]
for criterion in criteria: for criterion in criteria:

View File

@ -0,0 +1,72 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import itertools
import pytest
import spack.spec
import spack.solver.asp as asp
import spack.store
pytestmark = [
pytest.mark.skipif(
spack.config.get("config:concretizer") == "original", reason="requires new concretizer"
),
pytest.mark.usefixtures("mutable_config", "mock_packages"),
]
@pytest.fixture
def reusable_specs(mock_packages):
reusable_specs = []
for spec in ["mpich", "openmpi", "zmpi"]:
reusable_specs.extend(s for s in spack.spec.Spec(spec).concretized().traverse(root=True))
return list(sorted(set(reusable_specs)))
@pytest.mark.parametrize(
"root,reuse",
itertools.product(
("mpileaks ^mpich", "mpileaks ^openmpi", "mpileaks ^zmpi", "patch"),
(True, False),
),
)
def test_all_facts_in_solve(database, root, reuse, reusable_specs):
reusable_specs = reusable_specs if reuse else []
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, [spack.spec.Spec(root)], reuse=reusable_specs)
*_, result_attrs = result.answers[0]
result_attrs = set(result_attrs)
def remove_hashes(attrs):
return []
for spec in result.specs:
# check only link and run deps if reusing.
deptype = ("link", "run") if reuse else "all"
# get all facts about the spec and filter out just the "attr" ones.
attrs = setup.spec_clauses(spec, deptype=deptype, body=True, expand_hashes=True)
# only consider attr() functions, not other displayed atoms
# don't consider any DAG/package hashes, as they are added after solving
attrs = set(attr for attr in attrs if attr.name == "attr" and "hash" not in attr.args[0])
# make sure all facts from the solver are in the actual solution.
diff = attrs - result_attrs
# this is a current bug in the solver: we don't manage dependency patches
# properly, and with reuse it can grab something w/o the right patch.
# See https://github.com/spack/spack/issues/32497
# TODO: Remove this XFAIL when #32497 is fixed.
patches = [a for a in diff if a.args[0] == "variant_value" and a.args[2] == "patches"]
if diff and not (diff - set(patches)):
pytest.xfail("Bug in new concretizer with patch constraints. See #32497.")
assert not diff

View File

@ -12,4 +12,6 @@ class Openmpi(Package):
variant("internal-hwloc", default=False) variant("internal-hwloc", default=False)
variant("fabrics", values=any_combination_of("psm", "mxm")) variant("fabrics", values=any_combination_of("psm", "mxm"))
provides("mpi")
depends_on("hwloc", when="~internal-hwloc") depends_on("hwloc", when="~internal-hwloc")

View File

@ -23,6 +23,8 @@ class Clingo(CMakePackage):
url = "https://github.com/potassco/clingo/archive/v5.2.2.tar.gz" url = "https://github.com/potassco/clingo/archive/v5.2.2.tar.gz"
git = "https://github.com/potassco/clingo.git" git = "https://github.com/potassco/clingo.git"
tags = ["windows"] tags = ["windows"]
submodules = True
maintainers = ["tgamblin", "alalazo"] maintainers = ["tgamblin", "alalazo"]
version("master", branch="master", submodules=True) version("master", branch="master", submodules=True)