Compare commits

...

14 Commits

Author SHA1 Message Date
Todd Gamblin
03cba85029
bugfix: don't include root in general depth rule 2023-03-11 16:21:53 -08:00
Todd Gamblin
2c7a2fa3e0
Don't require priority attribute on models. 2023-02-22 10:53:16 -08:00
Todd Gamblin
7e8fb4b8d0
WIP 2023-01-30 11:48:03 -08:00
Todd Gamblin
54087cf274
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
b0e67d3411
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
d92b653f0b
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
3435806007
WIP 2023-01-24 17:19:21 -08:00
Todd Gamblin
06d5abe895
WIP 2023-01-24 17:19:19 -08:00
Todd Gamblin
dc40e121a3
WIP 2023-01-24 17:18:50 -08:00
Todd Gamblin
03acf14d86
tests: test consistency of solver and spec_clauses
Previously we didn't check whether all facts on a specs were represented in the models
returned by the solver. This is an important check as it ensures that we can have
conditions (dependencies, conflicts, etc.) on any property modeled by spec_clauses.

This caught the issue reported in #32497 (https://github.com/spack/spack/issues/32497),
specifically that we don't currently model patch facts. We can fix this in a follow-on
but it is marked XFAIL for now.
2023-01-24 17:18:50 -08:00
Todd Gamblin
9ffb642b95
refactor: use AspFunction consistently before and after solve
Currently we create `AspFunction` objects as inputs to solves, but we don't use them
when extracting symbols from clingo solves. Use them more consistently in both
scenarios, and simplify the code.
2023-01-24 17:18:47 -08:00
Todd Gamblin
20e698c6b0
concretizer: add depth calculation 2023-01-24 17:12:10 -08:00
Todd Gamblin
c410f3f392
rename optimization criteria to be clearer about roots/non-roots 2023-01-24 17:12:10 -08:00
Todd Gamblin
b37fab40b5
solver: rework optimization criteria with larger constants
To allow room for DAG-ordered optimization, rework the way we write optimization
criteria in `concretize.lp`, and convert build offsets to use constants.
2023-01-24 17:12:10 -08:00
10 changed files with 430 additions and 299 deletions

View File

@ -46,14 +46,6 @@ def setup_parser(subparser):
)
def shift(asp_function):
"""Transforms ``attr("foo", "bar")`` into ``foo("bar")``."""
if not asp_function.args:
raise ValueError(f"Can't shift ASP function with no arguments: {str(asp_function)}")
first, *rest = asp_function.args
return asp.AspFunction(first, rest)
def compare_specs(a, b, to_string=False, color=None):
"""
Generate a comparison, including diffs (for each side) and an intersection.
@ -79,7 +71,7 @@ def compare_specs(a, b, to_string=False, color=None):
# get facts for specs, making sure to include build dependencies of concrete
# specs and to descend into dependency hashes so we include all facts.
a_facts = set(
shift(func)
func.shift()
for func in setup.spec_clauses(
a,
body=True,
@ -89,7 +81,7 @@ def compare_specs(a, b, to_string=False, color=None):
if func.name == "attr"
)
b_facts = set(
shift(func)
func.shift()
for func in setup.spec_clauses(
b,
body=True,

View File

@ -97,28 +97,41 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers)
opt, *_ = min(result.answers)
# dump the solutions as concretized specs
if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels)
tty.msg("Optimization Criteria:")
maxlen = max(len(s[2]) for s in result.criteria)
color.cprint("@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " "))
maxlen = max(len(name) for name in result.criteria)
max_depth = max(len(v) for v in result.criteria.values() if isinstance(v, list))
fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen
for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1):
color.cprint(
fmt
% (
i,
name,
"-" if build_cost is None else installed_cost,
installed_cost if build_cost is None else build_cost,
)
header = "@*{"
header += "".join(f"{depth:<4}" for depth in range(max_depth))
header += "Criterion}"
color.cprint(header)
# make non-zero numbers red
def highlight(n, c):
return color.colorize(f"@{c}{{{n:<4}}}" if n > 0 else f"{n:<4}")
for i, (name, cost) in enumerate(result.criteria.items(), 1):
colored_name = name.replace("build:", "@c{build:}")
colored_name = colored_name.replace("reuse:", "@B{reuse:}")
colored_name = colored_name.replace("fixed:", "@G{fixed:}")
colored_name = color.colorize(colored_name)
if isinstance(cost, int):
print(highlight(cost, "G") + " " * (max_depth - 1) + colored_name)
else:
print(
"".join(highlight(c, "c" if "build:" in name else "B") for c in cost)
+ colored_name
)
print()
# dump the solutions as concretized specs
if "solutions" in show:
for spec in result.specs:
# With -y, just print YAML to output.

View File

@ -13,6 +13,7 @@
import re
import types
import warnings
from typing import Dict, List, Tuple, Union
import archspec.cpu
@ -124,84 +125,87 @@ def getter(node):
# The space of possible priorities for optimization targets
# is partitioned in the following ranges:
#
# [0-100) Optimization criteria for software being reused
# [100-200) Fixed criteria that are higher priority than reuse, but lower than build
# [200-300) Optimization criteria for software being built
# [300-1000) High-priority fixed criteria
# [1000-inf) Error conditions
# +=============================================================+
# | Priority | Description |
# +=============================================================+
# | 10,000,000+ | Error conditions |
# +-------------+-----------------------------------------------+
# | 9,999,999 | |
# | ... | High-priority criteria |
# | 1,000,000 | |
# +-------------+-----------------------------------------------+
# | 999,999 | |
# | ... | Standard criteria for built packages |
# | 100,001 | |
# +-------------+-----------------------------------------------+
# | 100,000 | Number of packages being built |
# +-------------+-----------------------------------------------+
# | 99,999 | |
# | ... | Standard criteria for reused packages |
# | 0 | |
# +-------------+-----------------------------------------------+
#
# Each optimization target is a minimization with optimal value 0.
#
#: High fixed priority offset for criteria that supersede all build criteria
high_fixed_priority_offset = 300
high_fixed_priority_offset = 10_000_000
#: Priority offset for "build" criteria (regular criterio shifted to
#: higher priority for specs we have to build)
build_priority_offset = 200
build_priority_offset = 100_000
#: Priority offset of "fixed" criteria (those w/o build criteria)
fixed_priority_offset = 100
#: max priority for an error
max_error_priority = 3
def build_criteria_names(costs, arg_tuples):
def build_criteria_names(
costs: List[int], opt_criteria: List["AspFunction"], max_depth: int
) -> Dict[str, Union[int, List[Tuple[int, int]]]]:
"""Construct an ordered mapping from criteria names to costs."""
# pull optimization criteria names out of the solution
priorities_names = []
num_fixed = 0
num_high_fixed = 0
for args in arg_tuples:
priority, name = args[:2]
priority = int(priority)
# ensure names of all criteria are unique
names = {criterion.args[0] for criterion in opt_criteria}
assert len(names) == len(opt_criteria), "names of optimization criteria must be unique"
# add the priority of this opt criterion and its name
priorities_names.append((priority, name))
# split opt criteria into two lists
fixed_criteria = [oc for oc in opt_criteria if oc.args[1] == "fixed"]
leveled_criteria = [oc for oc in opt_criteria if oc.args[1] == "leveled"]
# if the priority is less than fixed_priority_offset, then it
# has an associated build priority -- the same criterion but for
# nodes that we have to build.
if priority < fixed_priority_offset:
build_priority = priority + build_priority_offset
priorities_names.append((build_priority, name))
elif priority >= high_fixed_priority_offset:
num_high_fixed += 1
else:
num_fixed += 1
# first non-error criterion
solve_index = max_error_priority + 1
# sort the criteria by priority
priorities_names = sorted(priorities_names, reverse=True)
# compute without needing max_depth from solve
max_leveled_costs = (len(costs) - max_error_priority - 3) / 2
assert max_leveled_costs * 2 == len(costs) - max_error_priority - 3
assert max_leveled_costs % len(leveled_criteria) == 0
max_leveled_costs = int(max_leveled_costs)
# We only have opt-criterion values for non-error types
# error type criteria are excluded (they come first)
error_criteria = len(costs) - len(priorities_names)
costs = costs[error_criteria:]
n_leveled_costs = len(leveled_criteria) * (max_depth + 1)
# split list into three parts: build criteria, fixed criteria, non-build criteria
num_criteria = len(priorities_names)
num_build = (num_criteria - num_fixed - num_high_fixed) // 2
build_index = solve_index + 1 + max_leveled_costs
fixed_costs = [costs[solve_index], costs[build_index]]
build_start_idx = num_high_fixed
fixed_start_idx = num_high_fixed + num_build
installed_start_idx = num_high_fixed + num_build + num_fixed
build_costs = costs[solve_index + 1 : solve_index + 1 + n_leveled_costs]
reuse_costs = costs[build_index + 1 : build_index + 1 + n_leveled_costs]
assert len(build_costs) == len(reuse_costs) == n_leveled_costs
high_fixed = priorities_names[:build_start_idx]
build = priorities_names[build_start_idx:fixed_start_idx]
fixed = priorities_names[fixed_start_idx:installed_start_idx]
installed = priorities_names[installed_start_idx:]
criteria = {}
# mapping from priority to index in cost list
indices = dict((p, i) for i, (p, n) in enumerate(priorities_names))
def add_fixed(criterion_idx, cost_idx):
name = fixed_criteria[criterion_idx].args[2]
criteria["fixed: " + name] = costs[cost_idx]
# make a list that has each name with its build and non-build costs
criteria = [(cost, None, name) for cost, (p, name) in zip(costs[:build_start_idx], high_fixed)]
criteria += [
(cost, None, name)
for cost, (p, name) in zip(costs[fixed_start_idx:installed_start_idx], fixed)
]
add_fixed(0, solve_index)
for (i, name), (b, _) in zip(installed, build):
criteria.append((costs[indices[i]], costs[indices[b]], name))
for i, fn in enumerate(leveled_criteria):
name = fn.args[2]
criteria["build: " + name] = build_costs[i :: len(leveled_criteria)]
add_fixed(1, build_index)
for i, fn in enumerate(leveled_criteria):
name = fn.args[2]
criteria["reuse: " + name] = reuse_costs[i :: len(leveled_criteria)]
return criteria
@ -251,7 +255,11 @@ def _id(thing):
class AspFunction(AspObject):
def __init__(self, name, args=None):
self.name = name
self.args = () if args is None else tuple(args)
def simplify(arg):
return arg if isinstance(arg, (str, bool, int)) else str(arg)
self.args = () if args is None else tuple(simplify(arg) for arg in args)
def _cmp_key(self):
return (self.name, self.args)
@ -286,10 +294,29 @@ def argify(arg):
elif isinstance(arg, int):
return clingo.Number(arg)
else:
return clingo.String(str(arg))
return clingo.String(arg)
return clingo.Function(self.name, [argify(arg) for arg in self.args], positive=positive)
@staticmethod
def from_symbol(symbol):
def deargify(arg):
if arg.type is clingo.SymbolType.Number:
return arg.number
elif arg.type is clingo.SymbolType.String and arg.string in ("True", "False"):
return arg.string == "True"
else:
return arg.string
return AspFunction(symbol.name, [deargify(arg) for arg in symbol.arguments])
def shift(self):
"""Transforms ``attr("foo", "bar")`` into ``foo("bar")``."""
if not self.args:
raise ValueError(f"Can't shift ASP function with no arguments: {str(self)}")
first, *rest = self.args
return AspFunction(first, rest)
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(str(_id(arg)) for arg in self.args))
@ -494,7 +521,8 @@ def _compute_specs_from_answer_set(self):
self._concrete_specs, self._unsolved_specs = [], []
self._concrete_specs_by_input = {}
best = min(self.answers)
opt, _, answer = best
opt, _, answer, _ = best
for input_spec in self.abstract_specs:
key = input_spec.name
if input_spec.virtual:
@ -576,13 +604,13 @@ def stringify(sym):
return sym.string or str(sym)
def extract_args(model, predicate_name):
"""Extract the arguments to predicates with the provided name from a model.
def extract_functions(model, function_name):
"""Extract ASP functions with the given name from a model.
Pull out all the predicates with name ``predicate_name`` from the model, and return
their stringified arguments as tuples.
Pull out all the functions with name ``function_name`` from the model, and return them as
``AspFunction`` objects.
"""
return [stringify(sym.arguments) for sym in model if sym.name == predicate_name]
return [AspFunction.from_symbol(sym) for sym in model if sym.name == function_name]
class PyclingoDriver(object):
@ -681,11 +709,10 @@ def solve(self, setup, specs, reuse=None, output=None, control=None):
self.control = control or default_clingo_control()
# set up the problem -- this generates facts and rules
self.assumptions = []
timer.start("setup")
with timer.measure("setup"):
with self.control.backend() as backend:
self.backend = backend
setup.setup(self, specs, reuse=reuse)
timer.stop("setup")
timer.start("load")
# read in the main ASP program and display logic -- these are
@ -731,7 +758,8 @@ def visit(node):
cores = [] # unsatisfiable cores if they do not
def on_model(model):
models.append((model.cost, model.symbols(shown=True, terms=True)))
priorities = getattr(model, "priority", None)
models.append((model.cost, priorities, model.symbols(shown=True, terms=True)))
solve_kwargs = {
"assumptions": self.assumptions,
@ -752,24 +780,28 @@ def on_model(model):
if result.satisfiable:
# get the best model
builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible)
min_cost, best_model = min(models)
min_cost, priorities, best_model = min(models)
# first check for errors
error_args = extract_args(best_model, "error")
error_args = [fn.args for fn in extract_functions(best_model, "error")]
errors = sorted((int(priority), msg, args) for priority, msg, *args in error_args)
for _, msg, args in errors:
self.handle_error(msg, *args)
# build specs from spec attributes in the model
spec_attrs = [(name, tuple(rest)) for name, *rest in extract_args(best_model, "attr")]
spec_attrs = extract_functions(best_model, "attr")
with timer.measure("build"):
answers = builder.build_specs(spec_attrs)
# add best spec to the results
result.answers.append((list(min_cost), 0, answers))
result.answers.append((list(min_cost), 0, answers, spec_attrs))
# get optimization criteria
criteria_args = extract_args(best_model, "opt_criterion")
result.criteria = build_criteria_names(min_cost, criteria_args)
criteria = extract_functions(best_model, "opt_criterion")
depths = extract_functions(best_model, "depth")
max_depth = max(d.args[1] for d in depths)
result.criteria = build_criteria_names(min_cost, criteria, max_depth)
# record the number of models the solver considered
result.nmodels = len(models)
@ -779,7 +811,7 @@ def on_model(model):
# print any unknown functions in the model
for sym in best_model:
if sym.name not in ("attr", "error", "opt_criterion"):
if sym.name not in ("attr", "error", "opt_criterion", "depth", "const_max_depth"):
tty.debug(
"UNKNOWN SYMBOL: %s(%s)" % (sym.name, ", ".join(stringify(sym.arguments)))
)
@ -1409,23 +1441,23 @@ def spec_clauses(self, *args, **kwargs):
def _spec_clauses(
self,
spec,
body=False,
transitive=True,
expand_hashes=False,
concrete_build_deps=False,
body: bool = False,
transitive: bool = True,
expand_hashes: bool = False,
concrete_build_deps: bool = False,
deptype: Union[str, Tuple[str, ...]] = "all",
):
"""Return a list of clauses for a spec mandates are true.
Arguments:
spec (spack.spec.Spec): the spec to analyze
body (bool): if True, generate clauses to be used in rule bodies
spec: the spec to analyze
body: if True, generate clauses to be used in rule bodies
(final values) instead of rule heads (setters).
transitive (bool): if False, don't generate clauses from
dependencies (default True)
expand_hashes (bool): if True, descend into hashes of concrete specs
(default False)
concrete_build_deps (bool): if False, do not include pure build deps
transitive: if False, don't generate clauses from dependencies.
expand_hashes: If transitive and True, descend into hashes of concrete specs.
concrete_build_deps: if False, do not include pure build deps
of concrete specs (as they have no effect on runtime constraints)
deptype: dependency types to follow when transitive (default "all").
Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates
hashes for the dependency requirements of concrete specs. If ``expand_hashes``
@ -1553,7 +1585,7 @@ class Body(object):
# add all clauses from dependencies
if transitive:
# TODO: Eventually distinguish 2 deps on the same pkg (build and link)
for dspec in spec.edges_to_dependencies():
for dspec in spec.edges_to_dependencies(deptype=deptype):
dep = dspec.spec
if spec.concrete:
@ -1585,6 +1617,7 @@ class Body(object):
body=body,
expand_hashes=expand_hashes,
concrete_build_deps=concrete_build_deps,
deptype=deptype,
)
)
@ -2316,7 +2349,7 @@ def deprecated(self, pkg, version):
tty.warn(msg.format(pkg, version))
@staticmethod
def sort_fn(function_tuple):
def sort_fn(function):
"""Ensure attributes are evaluated in the correct order.
hash attributes are handled first, since they imply entire concrete specs
@ -2326,7 +2359,7 @@ def sort_fn(function_tuple):
the concrete specs on which they depend because all nodes are fully constructed before we
consider which ones are external.
"""
name = function_tuple[0]
name = function.args[0]
if name == "hash":
return (-5, 0)
elif name == "node":
@ -2340,23 +2373,24 @@ def sort_fn(function_tuple):
else:
return (-1, 0)
def build_specs(self, function_tuples):
def build_specs(self, functions):
# Functions don't seem to be in particular order in output. Sort
# them here so that directives that build objects (like node and
# node_compiler) are called in the right order.
self.function_tuples = sorted(set(function_tuples), key=self.sort_fn)
self.functions = sorted(set(functions), key=self.sort_fn)
self._specs = {}
for name, args in self.function_tuples:
if SpecBuilder.ignored_attributes.match(name):
for attr in self.functions:
fn = attr.shift() # attr("foo", "bar") -> foo("bar")
if SpecBuilder.ignored_attributes.match(fn.name):
continue
action = getattr(self, name, None)
action = getattr(self, fn.name, None)
# print out unknown actions so we can display them for debugging
if not action:
msg = 'UNKNOWN SYMBOL: attr("%s", %s)' % (name, ", ".join(str(a) for a in args))
tty.debug(msg)
tty.debug(f"UNKNOWN SYMBOL: {attr}")
continue
msg = (
@ -2368,8 +2402,8 @@ def build_specs(self, function_tuples):
# ignore predicates on virtual packages, as they're used for
# solving but don't construct anything. Do not ignore error
# predicates on virtual packages.
if name != "error":
pkg = args[0]
if fn.name != "error":
pkg = fn.args[0]
if spack.repo.path.is_virtual(pkg):
continue
@ -2379,7 +2413,7 @@ def build_specs(self, function_tuples):
if spec and spec.concrete:
continue
action(*args)
action(*fn.args)
# namespace assignment is done after the fact, as it is not
# currently part of the solve

View File

@ -23,9 +23,17 @@ literal_not_solved(ID) :- not literal_solved(ID), literal(ID).
% in better reporting for users. See #30669 for details.
1 { literal_solved(ID) : literal(ID) }.
opt_criterion(300, "number of input specs not concretized").
#minimize{ 0@300: #true }.
#minimize { 1@300,ID : literal_not_solved(ID) }.
% priority ranges for optimization criteria
% note that clingo's weight_t is int32_t, so the max priority we can use is 2,147,483,647
#const max_error_priority = 3.
#const error_prio = 10000000.
#const solve_prio = 1000000.
#const build_prio = 100000. % n_nodes x depth_offset x max levels needs to be less than this
#const depth_offset = 100. % depth_offset-1 is the max id for leveled criteria
opt_criterion(solve_prio, "fixed", "number of input specs not concretized").
#minimize{ 0@solve_prio: #true }.
#minimize{ 1@solve_prio,ID : literal_not_solved(ID) }.
% Map constraint on the literal ID to the correct PSID
attr(Name, A1) :- literal(LiteralID, Name, A1), literal_solved(LiteralID).
@ -65,7 +73,8 @@ version_declared(Package, Version, Weight) :- version_declared(Package, Version,
version_declared(Package, Version) :- version_declared(Package, Version, _).
% a spec with a git hash version is equivalent to one with the same matched version
version_satisfies(Package, Constraint, HashVersion) :- version_satisfies(Package, Constraint, EquivalentVersion),
version_satisfies(Package, Constraint, HashVersion) :-
version_satisfies(Package, Constraint, EquivalentVersion),
version_equivalent(Package, HashVersion, EquivalentVersion).
#defined version_equivalent/3.
@ -146,7 +155,10 @@ possible_version_weight(Package, Weight)
% Otherwise covered by `no_version_error` and `versions_conflict_error`.
error(1, "No valid version for '{0}' satisfies '@{1}'", Package, Constraint)
:- attr("node_version_satisfies", Package, Constraint),
C = #count{ Version : attr("version", Package, Version), version_satisfies(Package, Constraint, Version)},
C = #count{
Version
: attr("version", Package, Version), version_satisfies(Package, Constraint, Version)
},
C < 1.
attr("node_version_satisfies", Package, Constraint)
@ -1077,11 +1089,11 @@ build(Package) :- not attr("hash", Package, _), attr("node", Package).
% 200+ Shifted priorities for build nodes; correspond to priorities 0 - 99.
% 100 - 199 Unshifted priorities. Currently only includes minimizing #builds.
% 0 - 99 Priorities for non-built nodes.
build_priority(Package, 200) :- build(Package), attr("node", Package), optimize_for_reuse().
build_priority(Package, build_prio) :- build(Package), attr("node", Package), optimize_for_reuse().
build_priority(Package, 0) :- not build(Package), attr("node", Package), optimize_for_reuse().
% don't adjust build priorities if reuse is not enabled
build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse().
build_priority(Package, build_prio) :- attr("node", Package), not optimize_for_reuse().
% don't assign versions from installed packages unless reuse is enabled
% NOTE: that "installed" means the declared version was only included because
@ -1101,22 +1113,56 @@ build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse().
#defined installed_hash/2.
%-----------------------------------------------------------------------------
% Calculate min depth of nodes in the DAG
% We use this to optimize nodes closer to roots with higher precedence.
%-----------------------------------------------------------------------------
#const max_depth = 4.
% roots have depth 0.
depth(Package, 0) :- attr("root", Package).
%depth(Package, D+1) :- depth(Dependent, D), depends_on(Dependent, Package), D < max_depth.
%parent_depth(Package, D) :-
% depends_on(Dependent, Package),
% depth(Dependent, D),
% D < max_depth - 1.
%depth(Package, M+1) :-
% M = #min{ D: parent_depth(Package, D); max_depth - 1 },
% attr("node", Package).
% other nodes' depth is the minimum depth of any dependent plus one.
depth(Package, N + 1) :-
N = #min{
D: depends_on(Dependent, Package),
depth(Dependent, D),
D < max_depth;
max_depth - 1
},
N = 0..max_depth - 1,
not attr("root", Package),
attr("node", Package).
%-----------------------------------------------------------------
% Optimization to avoid errors
%-----------------------------------------------------------------
% Some errors are handled as rules instead of constraints because
% it allows us to explain why something failed. Here we optimize
% HEAVILY against the facts generated by those rules.
#minimize{ 0@1000: #true}.
#minimize{ 0@1001: #true}.
#minimize{ 0@1002: #true}.
#minimize{ 1000@1000+Priority,Msg: error(Priority, Msg) }.
#minimize{ 1000@1000+Priority,Msg,Arg1: error(Priority, Msg, Arg1) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2: error(Priority, Msg, Arg1, Arg2) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3: error(Priority, Msg, Arg1, Arg2, Arg3) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4) }.
#minimize{ 1000@1000+Priority,Msg,Arg1,Arg2,Arg3,Arg4,Arg5: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4, Arg5) }.
% ensure that error costs are always in the solution.
#minimize{ 0@error_prio + (0..max_error_priority): #true}.
% TODO: why 1000 and not just 1? 1000 seems unnecessary since priorities are lexicographic.
#minimize{ 1000@error_prio+Priority,Msg: error(Priority, Msg) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1: error(Priority, Msg, Arg1) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2: error(Priority, Msg, Arg1, Arg2) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3: error(Priority, Msg, Arg1, Arg2, Arg3) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3,Arg4: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4) }.
#minimize{ 1000@error_prio+Priority,Msg,Arg1,Arg2,Arg3,Arg4,Arg5: error(Priority, Msg, Arg1, Arg2, Arg3, Arg4, Arg5) }.
%-----------------------------------------------------------------------------
% How to optimize the spec (high to low priority)
@ -1126,199 +1172,157 @@ build_priority(Package, 0) :- attr("node", Package), not optimize_for_reuse().
% 2. a `#minimize{ 0@2 : #true }.` statement that ensures the criterion
% is displayed (clingo doesn't display sums over empty sets by default)
% Ensure that values are returned by clingo for every distinct optimization criterion.
% Some criteria are "fixed" and have only one bucket. Others are summed into multiple
% buckets -- per build priority and per depth in the graph.
% If we don't do this, it's very hard to read the sums back. We use `0@...` because
% it doesn't affect the sums -- it just ensure that clingo returns them.
% "fixed" criteria have one bucket -- their priority.
#minimize{ 0@N: opt_criterion(N, "fixed", _) }.
% "leveled" criteria sum into a bucket per depth in the graph, per build priority
#minimize{
0@(((max_depth - D - 1) * depth_offset) + N + build_prio)
: opt_criterion(N, "leveled", _), depth(_, D)
}.
#minimize{
0@(((max_depth - D - 1) * depth_offset) + N)
: opt_criterion(N, "leveled", _), depth(_, D)
}.
% Try hard to reuse installed packages (i.e., minimize the number built)
opt_criterion(100, "number of packages to build (vs. reuse)").
#minimize { 0@100: #true }.
#minimize { 1@100,Package : build(Package), optimize_for_reuse() }.
opt_criterion(build_prio, "fixed", "number of packages to build (vs. reuse)").
#minimize { 1@build_prio,Package : build(Package), optimize_for_reuse() }.
#defined optimize_for_reuse/0.
% A condition group specifies one or more specs that must be satisfied.
% Specs declared first are preferred, so we assign increasing weights and
% minimize the weights.
opt_criterion(75, "requirement weight").
#minimize{ 0@275: #true }.
#minimize{ 0@75: #true }.
#minimize {
Weight@75+Priority
opt_criterion(65, "leveled", "requirement weight").
#minimize{
Weight@(65 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: requirement_weight(Package, Weight),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
% Minimize the number of deprecated versions being used
opt_criterion(73, "deprecated versions used").
#minimize{ 0@273: #true }.
#minimize{ 0@73: #true }.
opt_criterion(60, "leveled", "deprecated versions used").
#minimize{
1@73+Priority,Package
1@(60 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: attr("deprecated", Package, _),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
% Minimize the:
% 1. Version weight
% 2. Number of variants with a non default value, if not set
% for the root package.
opt_criterion(70, "version weight").
#minimize{ 0@270: #true }.
#minimize{ 0@70: #true }.
#minimize {
Weight@70+Priority
: attr("root", Package), version_weight(Package, Weight),
build_priority(Package, Priority)
}.
opt_criterion(65, "number of non-default variants (roots)").
#minimize{ 0@265: #true }.
#minimize{ 0@65: #true }.
#minimize {
1@65+Priority,Package,Variant,Value
: variant_not_default(Package, Variant, Value),
attr("root", Package),
build_priority(Package, Priority)
}.
opt_criterion(60, "preferred providers for roots").
#minimize{ 0@260: #true }.
#minimize{ 0@60: #true }.
opt_criterion(55, "leveled", "version badness").
#minimize{
Weight@60+Priority,Provider,Virtual
: provider_weight(Provider, Virtual, Weight),
attr("root", Provider),
build_priority(Provider, Priority)
}.
opt_criterion(55, "default values of variants not being used (roots)").
#minimize{ 0@255: #true }.
#minimize{ 0@55: #true }.
#minimize{
1@55+Priority,Package,Variant,Value
: variant_default_not_used(Package, Variant, Value),
attr("root", Package),
build_priority(Package, Priority)
}.
% Try to use default variants or variants that have been set
opt_criterion(50, "number of non-default variants (non-roots)").
#minimize{ 0@250: #true }.
#minimize{ 0@50: #true }.
#minimize {
1@50+Priority,Package,Variant,Value
: variant_not_default(Package, Variant, Value),
not attr("root", Package),
build_priority(Package, Priority)
}.
% Minimize the weights of the providers, i.e. use as much as
% possible the most preferred providers
opt_criterion(45, "preferred providers (non-roots)").
#minimize{ 0@245: #true }.
#minimize{ 0@45: #true }.
#minimize{
Weight@45+Priority,Provider,Virtual
: provider_weight(Provider, Virtual, Weight), not attr("root", Provider),
build_priority(Provider, Priority)
}.
% Try to minimize the number of compiler mismatches in the DAG.
opt_criterion(40, "compiler mismatches that are not from CLI").
#minimize{ 0@240: #true }.
#minimize{ 0@40: #true }.
#minimize{
1@40+Priority,Package,Dependency
: compiler_mismatch(Package, Dependency),
build_priority(Package, Priority)
}.
opt_criterion(39, "compiler mismatches that are not from CLI").
#minimize{ 0@239: #true }.
#minimize{ 0@39: #true }.
#minimize{
1@39+Priority,Package,Dependency
: compiler_mismatch_required(Package, Dependency),
build_priority(Package, Priority)
}.
% Try to minimize the number of compiler mismatches in the DAG.
opt_criterion(35, "OS mismatches").
#minimize{ 0@235: #true }.
#minimize{ 0@35: #true }.
#minimize{
1@35+Priority,Package,Dependency
: node_os_mismatch(Package, Dependency),
build_priority(Package, Priority)
}.
opt_criterion(30, "non-preferred OS's").
#minimize{ 0@230: #true }.
#minimize{ 0@30: #true }.
#minimize{
Weight@30+Priority,Package
: node_os_weight(Package, Weight),
build_priority(Package, Priority)
}.
% Choose more recent versions for nodes
opt_criterion(25, "version badness").
#minimize{ 0@225: #true }.
#minimize{ 0@25: #true }.
#minimize{
Weight@25+Priority,Package
Weight@(55 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: version_weight(Package, Weight),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
% Try to use all the default values of variants
opt_criterion(20, "default values of variants not being used (non-roots)").
#minimize{ 0@220: #true }.
#minimize{ 0@20: #true }.
opt_criterion(50, "leveled", "number of non-default variants").
#minimize{
1@20+Priority,Package,Variant,Value
1@(50 + ((max_depth - D - 1) * depth_offset) + Priority), Package, Variant, Value
: variant_not_default(Package, Variant, Value),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(45, "leveled", "preferred providers").
#minimize{
Weight@(45 + ((max_depth - D - 1) * depth_offset) + Priority), Provider, Virtual
: provider_weight(Provider, Virtual, Weight),
build_priority(Provider, Priority),
depth(Package, D)
}.
opt_criterion(40, "leveled", "default values of variants not being used").
#minimize{
1@(40 + ((max_depth - D - 1) * depth_offset) + Priority), Package, Variant, Value
: variant_default_not_used(Package, Variant, Value),
not attr("root", Package),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
% Try to use preferred compilers
opt_criterion(15, "non-preferred compilers").
#minimize{ 0@215: #true }.
#minimize{ 0@15: #true }.
opt_criterion(35, "leveled", "compiler mismatches (not from CLI)").
#minimize{
Weight@15+Priority,Package
1@(35 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: compiler_mismatch(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(30, "leveled", "compiler mismatches (from CLI)").
#minimize{
1@(30 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: compiler_mismatch_required(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(25, "leveled", "OS mismatches").
#minimize{
1@(25 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: node_os_mismatch(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(20, "leveled", "non-preferred compilers").
#minimize{
Weight@(20 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: compiler_weight(Package, Weight),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(15, "leveled", "non-preferred OS's").
#minimize{
Weight@(15 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: node_os_weight(Package, Weight),
build_priority(Package, Priority),
depth(Package, D)
}.
% Minimize the number of mismatches for targets in the DAG, try
% to select the preferred target.
opt_criterion(10, "target mismatches").
#minimize{ 0@210: #true }.
#minimize{ 0@10: #true }.
opt_criterion(10, "leveled", "target mismatches").
#minimize{
1@10+Priority,Package,Dependency
: node_target_mismatch(Package, Dependency),
build_priority(Package, Priority)
1@(10 + ((max_depth - D - 1) * depth_offset) + Priority), Dependent, Package
: node_target_mismatch(Dependent, Package),
build_priority(Package, Priority),
depth(Package, D)
}.
opt_criterion(5, "non-preferred targets").
#minimize{ 0@205: #true }.
#minimize{ 0@5: #true }.
opt_criterion(5, "leveled", "non-preferred targets").
#minimize{
Weight@5+Priority,Package
Weight@(5 + ((max_depth - D - 1) * depth_offset) + Priority), Package
: node_target_weight(Package, Weight),
build_priority(Package, Priority)
build_priority(Package, Priority),
depth(Package, D)
}.
%-----------------
% Domain heuristic
%-----------------
#heuristic attr("version", Package, Version) : version_declared(Package, Version, 0), attr("node", Package). [10, true]
#heuristic version_weight(Package, 0) : version_declared(Package, Version, 0), attr("node", Package). [10, true]
#heuristic attr("node_target", Package, Target) : package_target_weight(Target, Package, 0), attr("node", Package). [10, true]
#heuristic node_target_weight(Package, 0) : attr("node", Package). [10, true]
#heuristic attr("variant_value", Package, Variant, Value) : variant_default_value(Package, Variant, Value), attr("node", Package). [10, true]
#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
#heuristic attr("node", Package) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
#heuristic attr("node_os", Package, OS) : buildable_os(OS). [10, true]
%#heuristic provider(Package, Virtual) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
%#heuristic attr("node", Package) : possible_provider_weight(Package, Virtual, 0, _), attr("virtual_node", Virtual). [10, true]
%#heuristic attr("node_os", Package, OS) : buildable_os(OS). [10, true]
%#heuristic attr("node_target", Dependency, Target): depends_on(Package, Dependency), attr("node_target", Package, Target). [20, true]
%#heuristic attr("node_os", Dependency, OS): depends_on(Package, Dependency), attr("node_os", Package, OS). [20, true]
%-----------
% Notes

View File

@ -15,7 +15,7 @@
#show attr/4.
% names of optimization criteria
#show opt_criterion/2.
#show opt_criterion/3.
% error types
#show error/2.
@ -25,4 +25,16 @@
#show error/6.
#show error/7.
% depths
#show depth/2.
%#show parent_depth/2.
% debug
%#show depends_on/2.
%node(Package) :- attr("node", Package).
%#show node/1.
%version(Package, Version) :- attr("version", Package, Version).
%#show version/2.

View File

@ -2928,7 +2928,7 @@ def _new_concretize(self, tests=False):
result.raise_if_unsat()
# take the best answer
opt, i, answer = min(result.answers)
opt, i, answer, _ = min(result.answers)
name = self.name
# TODO: Consolidate this code with similar code in solve.py
if self.virtual:

View File

@ -1779,8 +1779,8 @@ def test_version_weight_and_provenance(self):
num_specs = len(list(result_spec.traverse()))
criteria = [
(num_specs - 1, None, "number of packages to build (vs. reuse)"),
(2, 0, "version badness"),
(None, num_specs - 1, "number of packages to build (vs. reuse)"),
(2, 0, "NON-ROOTS: version badness"),
]
for criterion in criteria:

View File

@ -0,0 +1,72 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import itertools
import pytest
import spack.spec
import spack.solver.asp as asp
import spack.store
pytestmark = [
pytest.mark.skipif(
spack.config.get("config:concretizer") == "original", reason="requires new concretizer"
),
pytest.mark.usefixtures("mutable_config", "mock_packages"),
]
@pytest.fixture
def reusable_specs(mock_packages):
reusable_specs = []
for spec in ["mpich", "openmpi", "zmpi"]:
reusable_specs.extend(s for s in spack.spec.Spec(spec).concretized().traverse(root=True))
return list(sorted(set(reusable_specs)))
@pytest.mark.parametrize(
"root,reuse",
itertools.product(
("mpileaks ^mpich", "mpileaks ^openmpi", "mpileaks ^zmpi", "patch"),
(True, False),
),
)
def test_all_facts_in_solve(database, root, reuse, reusable_specs):
reusable_specs = reusable_specs if reuse else []
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, [spack.spec.Spec(root)], reuse=reusable_specs)
*_, result_attrs = result.answers[0]
result_attrs = set(result_attrs)
def remove_hashes(attrs):
return []
for spec in result.specs:
# check only link and run deps if reusing.
deptype = ("link", "run") if reuse else "all"
# get all facts about the spec and filter out just the "attr" ones.
attrs = setup.spec_clauses(spec, deptype=deptype, body=True, expand_hashes=True)
# only consider attr() functions, not other displayed atoms
# don't consider any DAG/package hashes, as they are added after solving
attrs = set(attr for attr in attrs if attr.name == "attr" and "hash" not in attr.args[0])
# make sure all facts from the solver are in the actual solution.
diff = attrs - result_attrs
# this is a current bug in the solver: we don't manage dependency patches
# properly, and with reuse it can grab something w/o the right patch.
# See https://github.com/spack/spack/issues/32497
# TODO: Remove this XFAIL when #32497 is fixed.
patches = [a for a in diff if a.args[0] == "variant_value" and a.args[2] == "patches"]
if diff and not (diff - set(patches)):
pytest.xfail("Bug in new concretizer with patch constraints. See #32497.")
assert not diff

View File

@ -12,4 +12,6 @@ class Openmpi(Package):
variant("internal-hwloc", default=False)
variant("fabrics", values=any_combination_of("psm", "mxm"))
provides("mpi")
depends_on("hwloc", when="~internal-hwloc")

View File

@ -23,6 +23,8 @@ class Clingo(CMakePackage):
url = "https://github.com/potassco/clingo/archive/v5.2.2.tar.gz"
git = "https://github.com/potassco/clingo.git"
tags = ["windows"]
submodules = True
maintainers = ["tgamblin", "alalazo"]
version("master", branch="master", submodules=True)