diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py index 717dfa0c511..fb34c44213e 100644 --- a/lib/spack/spack/config.py +++ b/lib/spack/spack/config.py @@ -60,6 +60,7 @@ import spack.schema.modules import spack.schema.packages import spack.schema.repos +import spack.schema.toolchains import spack.schema.upstreams import spack.schema.view import spack.util.remote_file_cache as rfc_util @@ -87,6 +88,7 @@ "bootstrap": spack.schema.bootstrap.schema, "ci": spack.schema.ci.schema, "cdash": spack.schema.cdash.schema, + "toolchains": spack.schema.toolchains.schema, } # Same as above, but including keys for environments diff --git a/lib/spack/spack/schema/toolchains.py b/lib/spack/spack/schema/toolchains.py new file mode 100644 index 00000000000..0b839a738f1 --- /dev/null +++ b/lib/spack/spack/schema/toolchains.py @@ -0,0 +1,23 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +"""Schema for repos.yaml configuration file. + +.. literalinclude:: _spack_root/lib/spack/spack/schema/toolchains.py + :lines: 14- +""" +from typing import Any, Dict + +#: Properties for inclusion in other schemas +properties: Dict[str, Any] = {"toolchains": {"type": "object", "default": {}}} + + +#: Full schema with metadata +schema = { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Spack toolchain configuration file schema", + "type": "object", + "additionalProperties": False, + "properties": properties, +} diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 1e1e18cfc86..2edf381bd96 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -782,7 +782,7 @@ def __str__(self) -> str: child = self.spec.name if self.spec else None virtuals_string = f"virtuals={','.join(self.virtuals)}" if self.virtuals else "" when_string = f"when={self.when}" if self.when != Spec() else "" - edge_attrs = filter((virtuals_string, when_string), lambda x: bool(x)) + edge_attrs = filter(lambda x: bool(x), (virtuals_string, when_string)) return f"{parent} {self.depflag}[{' '.join(edge_attrs)}] --> {child}" def flip(self) -> "DependencySpec": @@ -3531,7 +3531,7 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: return False # Edges have been checked above already, hence deps=False - lhs_nodes = [x for x in self.traverse(root=False)] + sorted(mock_nodes_from_old_specfiles) + lhs_nodes = list(self.traverse(root=False)) + sorted(mock_nodes_from_old_specfiles) for rhs in other.traverse(root=False): # Possible lhs nodes to match this rhs node lhss = [lhs for lhs in lhs_nodes if lhs.satisfies(rhs, deps=False)] diff --git a/lib/spack/spack/spec_parser.py b/lib/spack/spack/spec_parser.py index 43152e39e90..cfdd46bd502 100644 --- a/lib/spack/spack/spec_parser.py +++ b/lib/spack/spack/spec_parser.py @@ -56,6 +56,7 @@ specs to avoid ambiguity. Both are provided because ~ can cause shell expansion when it is the first character in an id typed on the command line. """ +import itertools import json import pathlib import re @@ -66,6 +67,7 @@ from llnl.util.tty import color +import spack.config import spack.deptypes import spack.error import spack.paths @@ -162,6 +164,15 @@ def tokenize(text: str) -> Iterator[Token]: yield token +def parseable_tokens(text: str) -> List[Token]: + """Return non-whitespace tokens from the text passed as input + + Raises: + SpecTokenizationError: when unexpected characters are found in the text + """ + return filter(lambda x: x.kind != SpecTokens.WS, tokenize(text)) + + class TokenContext: """Token context passed around by parsers""" @@ -189,6 +200,10 @@ def accept(self, kind: SpecTokens): def expect(self, *kinds: SpecTokens): return self.next_token and self.next_token.kind in kinds + def push(self, token_stream: Iterator[Token]): + self.token_stream = itertools.chain(token_stream, self.token_stream) + self.advance() + class SpecTokenizationError(spack.error.SpecSyntaxError): """Syntax error in a spec string""" @@ -238,11 +253,13 @@ class SpecParser: def __init__(self, literal_str: str): self.literal_str = literal_str - self.ctx = TokenContext(filter(lambda x: x.kind != SpecTokens.WS, tokenize(literal_str))) + self.ctx = TokenContext(parseable_tokens(literal_str)) def tokens(self) -> List[Token]: """Return the entire list of token from the initial text. White spaces are filtered out. + + Note: This list will not show tokens pushed when parsing an alias """ return list(filter(lambda x: x.kind != SpecTokens.WS, tokenize(self.literal_str))) @@ -268,6 +285,9 @@ def add_dependency(dep, **edge_properties): except spack.error.SpecError as e: raise SpecParsingError(str(e), self.ctx.current_token, self.literal_str) from e + # Get toolchain information outside of loop + toolchains = spack.config.get("toolchains", {}) + initial_spec = initial_spec or spack.spec.Spec() root_spec, parser_warnings = SpecNodeParser(self.ctx, self.literal_str).parse(initial_spec) current_spec = root_spec @@ -297,6 +317,15 @@ def add_dependency(dep, **edge_properties): add_dependency(dependency, **edge_properties) elif self.ctx.accept(SpecTokens.DEPENDENCY): + # String replacement for toolchains + # Look ahead to match upcoming value to list of toolchains + if self.next_token.value in toolchains: + assert self.ctx.accept(SpecTokens.UNQUALIFIED_PACKAGE_NAME) + # accepting the token advances it to the current token + # Push associated tokens back to the TokenContext + self.ctx.push(parseable_tokens(toolchains[self.current_token.value])) + continue + is_direct = self.ctx.current_token.value[0] == "%" dependency, warnings = self._parse_node(root_spec) edge_properties = {}