Stand-alone testing: make recipe support and processing spack-/pytest-like (#34236)

This is a refactor of Spack's stand-alone test process to be more spack- and pytest-like. 

It is more spack-like in that test parts are no longer "hidden" in a package's run_test()
method and pytest-like in that any package method whose name starts test_ 
(i.e., a "test" method) is a test part. We also support the ability to embed test parts in a
test method when that makes sense.

Test methods are now implicit test parts. The docstring is the purpose for the test part. 
The name of the method is the name of the test part. The working directory is the active
spec's test stage directory. You can embed test parts using the test_part context manager.

Functionality added by this commit:
* Adds support for multiple test_* stand-alone package test methods, each of which is 
   an implicit test_part for execution and reporting purposes;
* Deprecates package use of run_test();
* Exposes some functionality from run_test() as optional helper methods;
* Adds a SkipTest exception that can be used to flag stand-alone tests as being skipped;
* Updates the packaging guide section on stand-alone tests to provide more examples;
* Restores the ability to run tests "inherited" from provided virtual packages;
* Prints the test log path (like we currently do for build log paths);
* Times and reports the post-install process (since it can include post-install tests);
* Corrects context-related error message to distinguish test recipes from build recipes.
This commit is contained in:
Tamara Dahlgren 2023-05-10 02:34:54 -07:00 committed by GitHub
parent 49677b9be5
commit 9a37c8fcb1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 2367 additions and 1034 deletions

View File

@ -217,6 +217,7 @@ def setup(sphinx):
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.install_test.Pb"),
]
# The reST default role (used for this markup: `text`) to use for all documents.

File diff suppressed because it is too large Load Diff

View File

@ -289,9 +289,14 @@ def _check_build_test_callbacks(pkgs, error_cls):
pkg_cls = spack.repo.path.get_pkg_class(pkg_name)
test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None)
if test_callbacks and "test" in test_callbacks:
msg = '{0} package contains "test" method in ' "build_time_test_callbacks"
instr = 'Remove "test" from: [{0}]'.format(", ".join(test_callbacks))
# TODO (post-34236): "test*"->"test_*" once remove deprecated methods
# TODO (post-34236): "test"->"test_" once remove deprecated methods
has_test_method = test_callbacks and any([m.startswith("test") for m in test_callbacks])
if has_test_method:
msg = '{0} package contains "test*" method(s) in ' "build_time_test_callbacks"
instr = 'Remove all methods whose names start with "test" from: [{0}]'.format(
", ".join(test_callbacks)
)
errors.append(error_cls(msg.format(pkg_name), [instr]))
return errors

View File

@ -43,6 +43,7 @@
from typing import List, Tuple
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
from llnl.util.lang import dedupe
from llnl.util.symlink import symlink
from llnl.util.tty.color import cescape, colorize
@ -53,7 +54,6 @@
import spack.build_systems.python
import spack.builder
import spack.config
import spack.install_test
import spack.main
import spack.package_base
import spack.paths
@ -66,6 +66,7 @@
import spack.util.path
import spack.util.pattern
from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import spack_install_test_log
from spack.installer import InstallError
from spack.util.cpus import cpus_available
from spack.util.environment import (
@ -1075,19 +1076,18 @@ def _setup_pkg_and_run(
# 'pkg' is not defined yet
pass
elif context == "test":
logfile = os.path.join(
pkg.test_suite.stage, spack.install_test.TestSuite.test_log_name(pkg.spec)
)
logfile = os.path.join(pkg.test_suite.stage, pkg.test_suite.test_log_name(pkg.spec))
error_msg = str(exc)
if isinstance(exc, (spack.multimethod.NoSuchMethodError, AttributeError)):
process = "test the installation" if context == "test" else "build from sources"
error_msg = (
"The '{}' package cannot find an attribute while trying to build "
"from sources. This might be due to a change in Spack's package format "
"The '{}' package cannot find an attribute while trying to {}. "
"This might be due to a change in Spack's package format "
"to support multiple build-systems for a single package. You can fix this "
"by updating the build recipe, and you can also report the issue as a bug. "
"by updating the {} recipe, and you can also report the issue as a bug. "
"More information at https://spack.readthedocs.io/en/latest/packaging_guide.html#installation-procedure"
).format(pkg.name)
).format(pkg.name, process, context)
error_msg = colorize("@*R{{{}}}".format(error_msg))
error_msg = "{}\n\n{}".format(str(exc), error_msg)
@ -1360,6 +1360,13 @@ def long_message(self):
out.write("See {0} log for details:\n".format(self.log_type))
out.write(" {0}\n".format(self.log_name))
# Also output the test log path IF it exists
if self.context != "test":
test_log = join_path(os.path.dirname(self.log_name), spack_install_test_log)
if os.path.isfile(test_log):
out.write("\nSee test log for details:\n")
out.write(" {0}n".format(test_log))
return out.getvalue()
def __str__(self):

View File

@ -108,7 +108,10 @@ def execute_build_time_tests(builder: spack.builder.Builder):
builder: builder prescribing the test callbacks. The name of the callbacks is
stored as a list of strings in the ``build_time_test_callbacks`` attribute.
"""
builder.pkg.run_test_callbacks(builder, builder.build_time_test_callbacks, "build")
if not builder.pkg.run_tests or not builder.build_time_test_callbacks:
return
builder.pkg.tester.phase_tests(builder, "build", builder.build_time_test_callbacks)
def execute_install_time_tests(builder: spack.builder.Builder):
@ -118,7 +121,10 @@ def execute_install_time_tests(builder: spack.builder.Builder):
builder: builder prescribing the test callbacks. The name of the callbacks is
stored as a list of strings in the ``install_time_test_callbacks`` attribute.
"""
builder.pkg.run_test_callbacks(builder, builder.install_time_test_callbacks, "install")
if not builder.pkg.run_tests or not builder.install_time_test_callbacks:
return
builder.pkg.tester.phase_tests(builder, "install", builder.install_time_test_callbacks)
class BaseBuilder(spack.builder.Builder):

View File

@ -130,9 +130,11 @@ def __init__(self, wrapped_pkg_object, root_builder):
bases,
{
"run_tests": property(lambda x: x.wrapped_package_object.run_tests),
"test_log_file": property(lambda x: x.wrapped_package_object.test_log_file),
"test_failures": property(lambda x: x.wrapped_package_object.test_failures),
"test_requires_compiler": property(
lambda x: x.wrapped_package_object.test_requires_compiler
),
"test_suite": property(lambda x: x.wrapped_package_object.test_suite),
"tester": property(lambda x: x.wrapped_package_object.tester),
},
)
new_cls.__module__ = package_cls.__module__

View File

@ -2456,7 +2456,16 @@ def populate_buildgroup(self, job_names):
msg = "Error response code ({0}) in populate_buildgroup".format(response_code)
tty.warn(msg)
def report_skipped(self, spec, directory_name, reason):
def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optional[str]):
"""Explicitly report skipping testing of a spec (e.g., it's CI
configuration identifies it as known to have broken tests or
the CI installation failed).
Args:
spec: spec being tested
report_dir: directory where the report will be written
reason: reason the test is being skipped
"""
configuration = CDashConfiguration(
upload_url=self.upload_url,
packages=[spec.name],
@ -2466,7 +2475,7 @@ def report_skipped(self, spec, directory_name, reason):
track=None,
)
reporter = CDash(configuration=configuration)
reporter.test_skipped_report(directory_name, spec, reason)
reporter.test_skipped_report(report_dir, spec, reason)
def translate_deprecated_config(config):

View File

@ -5,7 +5,6 @@
from __future__ import print_function
import inspect
import textwrap
from itertools import zip_longest
@ -15,9 +14,10 @@
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
import spack.install_test
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
from spack.package_base import preferred_version
description = "get detailed information on a particular package"
section = "basic"
@ -261,41 +261,7 @@ def print_tests(pkg):
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.intersects("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
names = spack.install_test.test_function_names(pkg, add_virtuals=True)
if names:
colify(sorted(names), indent=4)
else:

View File

@ -11,6 +11,7 @@
import re
import shutil
import sys
from collections import Counter
from llnl.util import lang, tty
from llnl.util.tty import colify
@ -236,9 +237,8 @@ def test_list(args):
tagged = set(spack.repo.path.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class):
return spack.package_base.has_test_method(pkg_class) and (
not args.tag or pkg_class.name in tagged
)
tests = spack.install_test.test_functions(pkg_class)
return len(tests) and (not args.tag or pkg_class.name in tagged)
if args.list_all:
report_packages = [
@ -358,18 +358,17 @@ def _report_suite_results(test_suite, args, constraints):
tty.msg("test specs:")
failed, skipped, untested = 0, 0, 0
counts = Counter()
for pkg_id in test_specs:
if pkg_id in results:
status = results[pkg_id]
if status == "FAILED":
failed += 1
elif status == "NO-TESTS":
untested += 1
elif status == "SKIPPED":
skipped += 1
# Backward-compatibility: NO-TESTS => NO_TESTS
status = "NO_TESTS" if status == "NO-TESTS" else status
if args.failed and status != "FAILED":
status = spack.install_test.TestStatus[status]
counts[status] += 1
if args.failed and status != spack.install_test.TestStatus.FAILED:
continue
msg = " {0} {1}".format(pkg_id, status)
@ -381,7 +380,7 @@ def _report_suite_results(test_suite, args, constraints):
msg += "\n{0}".format("".join(f.readlines()))
tty.msg(msg)
spack.install_test.write_test_summary(failed, skipped, untested, len(test_specs))
spack.install_test.write_test_summary(counts)
else:
msg = "Test %s has no results.\n" % test_suite.name
msg += " Check if it is running with "

View File

@ -3,34 +3,77 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import base64
import contextlib
import enum
import hashlib
import inspect
import io
import os
import re
import shutil
import sys
from collections import Counter, OrderedDict
from typing import Callable, List, Optional, Tuple, Type, TypeVar, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.lang import nullcontext
from llnl.util.tty.color import colorize
import spack.error
import spack.paths
import spack.util.spack_json as sjson
from spack.installer import InstallError
from spack.spec import Spec
from spack.util.prefix import Prefix
from spack.util.string import plural
#: Stand-alone test failure info type
TestFailureType = Tuple[BaseException, str]
#: Name of the test suite's (JSON) lock file
test_suite_filename = "test_suite.lock"
#: Name of the test suite results (summary) file
results_filename = "results.txt"
#: Name of the Spack install phase-time test log file
spack_install_test_log = "install-time-test-log.txt"
def get_escaped_text_output(filename):
ListOrStringType = Union[str, List[str]]
LogType = Union["tty.log.nixlog", "tty.log.winlog"]
Pb = TypeVar("Pb", bound="spack.package_base.PackageBase")
PackageObjectOrClass = Union[Pb, Type[Pb]]
class TestStatus(enum.Enum):
"""Names of different stand-alone test states."""
NO_TESTS = -1
SKIPPED = 0
FAILED = 1
PASSED = 2
def __str__(self):
return f"{self.name}"
def lower(self):
name = f"{self.name}"
return name.lower()
def get_escaped_text_output(filename: str) -> List[str]:
"""Retrieve and escape the expected text output from the file
Args:
filename (str): path to the file
filename: path to the file
Returns:
list: escaped text lines read from the file
escaped text lines read from the file
"""
with open(filename, "r") as f:
with open(filename) as f:
# Ensure special characters are escaped as needed
expected = f.read()
@ -52,6 +95,651 @@ def get_test_stage_dir():
)
def cache_extra_test_sources(pkg: Pb, srcs: ListOrStringType):
"""Copy relative source paths to the corresponding install test subdir
This routine is intended as an optional install test setup helper for
grabbing source files/directories during the installation process and
copying them to the installation test subdirectory for subsequent use
during install testing.
Args:
pkg: package being tested
srcs: relative path for file(s) and or subdirectory(ies) located in
the staged source path that are to be copied to the corresponding
location(s) under the install testing directory.
Raises:
spack.installer.InstallError: if any of the source paths are absolute
or do not exist
under the build stage
"""
errors = []
paths = [srcs] if isinstance(srcs, str) else srcs
for path in paths:
pre = f"Source path ('{path}')"
src_path = os.path.join(pkg.stage.source_path, path)
dest_path = os.path.join(install_test_root(pkg), path)
if os.path.isabs(path):
errors.append(f"{pre} must be relative to the build stage directory.")
continue
if os.path.isdir(src_path):
fs.install_tree(src_path, dest_path)
elif os.path.exists(src_path):
fs.mkdirp(os.path.dirname(dest_path))
fs.copy(src_path, dest_path)
else:
errors.append(f"{pre} for the copy does not exist")
if errors:
raise InstallError("\n".join(errors), pkg=pkg)
def check_outputs(expected: Union[list, set, str], actual: str):
"""Ensure the expected outputs are contained in the actual outputs.
Args:
expected: expected raw output string(s)
actual: actual output string
Raises:
RuntimeError: the expected output is not found in the actual output
"""
expected = expected if isinstance(expected, (list, set)) else [expected]
errors = []
for check in expected:
if not re.search(check, actual):
errors.append(f"Expected '{check}' in output '{actual}'")
if errors:
raise RuntimeError("\n ".join(errors))
def find_required_file(
root: str, filename: str, expected: int = 1, recursive: bool = True
) -> ListOrStringType:
"""Find the required file(s) under the root directory.
Args:
root: root directory for the search
filename: name of the file being located
expected: expected number of files to be found under the directory
(default is 1)
recursive: ``True`` if subdirectories are to be recursively searched,
else ``False`` (default is ``True``)
Returns: the path(s), relative to root, to the required file(s)
Raises:
Exception: SkipTest when number of files detected does not match expected
"""
paths = fs.find(root, filename, recursive=recursive)
num_paths = len(paths)
if num_paths != expected:
files = ": {}".format(", ".join(paths)) if num_paths else ""
raise SkipTest(
"Expected {} of {} under {} but {} found{}".format(
plural(expected, "copy", "copies"),
filename,
root,
plural(num_paths, "copy", "copies"),
files,
)
)
return paths[0] if expected == 1 else paths
def install_test_root(pkg: Pb):
"""The install test root directory.
Args:
pkg: package being tested
"""
return os.path.join(pkg.metadata_dir, "test")
def print_message(logger: LogType, msg: str, verbose: bool = False):
"""Print the message to the log, optionally echoing.
Args:
logger: instance of the output logger (e.g. nixlog or winlog)
msg: message being output
verbose: ``True`` displays verbose output, ``False`` suppresses
it (``False`` is default)
"""
if verbose:
with logger.force_echo():
tty.info(msg, format="g")
else:
tty.info(msg, format="g")
class PackageTest:
"""The class that manages stand-alone (post-install) package tests."""
def __init__(self, pkg: Pb):
"""
Args:
pkg: package being tested
Raises:
ValueError: if the package is not concrete
"""
if not pkg.spec.concrete:
raise ValueError("Stand-alone tests require a concrete package")
self.counts: "Counter" = Counter() # type: ignore[attr-defined]
self.pkg = pkg
self.test_failures: List[TestFailureType] = []
self.test_parts: OrderedDict[str, "TestStatus"] = OrderedDict()
self.test_log_file: str
self.pkg_id: str
if pkg.test_suite:
# Running stand-alone tests
self.test_log_file = pkg.test_suite.log_file_for_spec(pkg.spec)
self.tested_file = pkg.test_suite.tested_file_for_spec(pkg.spec)
self.pkg_id = pkg.test_suite.test_pkg_id(pkg.spec)
else:
# Running phase-time tests for a single package whose results are
# retained in the package's stage directory.
pkg.test_suite = TestSuite([pkg.spec])
self.test_log_file = fs.join_path(pkg.stage.path, spack_install_test_log)
self.pkg_id = pkg.spec.format("{name}-{version}-{hash:7}")
# Internal logger for test part processing
self._logger = None
@property
def logger(self) -> Optional[LogType]:
"""The current logger or, if none, sets to one."""
if not self._logger:
self._logger = tty.log.log_output(self.test_log_file)
return self._logger
@contextlib.contextmanager
def test_logger(self, verbose: bool = False, externals: bool = False):
"""Context manager for setting up the test logger
Args:
verbose: Display verbose output, including echoing to stdout,
otherwise suppress it
externals: ``True`` for performing tests if external package,
``False`` to skip them
"""
fs.touch(self.test_log_file) # Otherwise log_parse complains
fs.set_install_permissions(self.test_log_file)
with tty.log.log_output(self.test_log_file, verbose) as self._logger:
with self.logger.force_echo(): # type: ignore[union-attr]
tty.msg("Testing package " + colorize(r"@*g{" + self.pkg_id + r"}"))
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
try:
yield self.logger
finally:
# reset debug level
tty.set_debug(old_debug)
@property
def archived_install_test_log(self) -> str:
return fs.join_path(self.pkg.metadata_dir, spack_install_test_log)
def archive_install_test_log(self, dest_dir: str):
if os.path.exists(self.test_log_file):
fs.install(self.test_log_file, self.archived_install_test_log)
def add_failure(self, exception: Exception, msg: str):
"""Add the failure details to the current list."""
self.test_failures.append((exception, msg))
def status(self, name: str, status: "TestStatus", msg: Optional[str] = None):
"""Track and print the test status for the test part name."""
part_name = f"{self.pkg.__class__.__name__}::{name}"
extra = "" if msg is None else f": {msg}"
# Handle the special case of a test part consisting of subparts.
# The containing test part can be PASSED while sub-parts (assumed
# to start with the same name) may not have PASSED. This extra
# check is used to ensure the containing test part is not claiming
# to have passed when at least one subpart failed.
if status == TestStatus.PASSED:
for pname, substatus in self.test_parts.items():
if pname != part_name and pname.startswith(part_name):
if substatus == TestStatus.FAILED:
print(f"{substatus}: {part_name}{extra}")
self.test_parts[part_name] = substatus
self.counts[substatus] += 1
return
print(f"{status}: {part_name}{extra}")
self.test_parts[part_name] = status
self.counts[status] += 1
def phase_tests(
self, builder: spack.builder.Builder, phase_name: str, method_names: List[str]
):
"""Execute the builder's package phase-time tests.
Args:
builder: builder for package being tested
phase_name: the name of the build-time phase (e.g., ``build``, ``install``)
method_names: phase-specific callback method names
"""
verbose = tty.is_verbose()
fail_fast = spack.config.get("config:fail_fast", False)
with self.test_logger(verbose=verbose, externals=False) as logger:
# Report running each of the methods in the build log
print_message(logger, f"Running {phase_name}-time tests", verbose)
builder.pkg.test_suite.current_test_spec = builder.pkg.spec
builder.pkg.test_suite.current_base_spec = builder.pkg.spec
# TODO (post-34236): "test"->"test_" once remove deprecated methods
have_tests = any(name.startswith("test") for name in method_names)
if have_tests:
copy_test_files(builder.pkg, builder.pkg.spec)
for name in method_names:
try:
# Prefer the method in the package over the builder's.
# We need this primarily to pick up arbitrarily named test
# methods but also some build-time checks.
fn = getattr(builder.pkg, name, getattr(builder, name))
msg = f"RUN-TESTS: {phase_name}-time tests [{name}]"
print_message(logger, msg, verbose)
fn()
except AttributeError as e:
msg = f"RUN-TESTS: method not implemented [{name}]"
print_message(logger, msg, verbose)
self.add_failure(e, msg)
if fail_fast:
break
if have_tests:
print_message(logger, "Completed testing", verbose)
# Raise any collected failures here
if self.test_failures:
raise TestFailure(self.test_failures)
def stand_alone_tests(self, kwargs):
"""Run the package's stand-alone tests.
Args:
kwargs (dict): arguments to be used by the test process
"""
import spack.build_environment
spack.build_environment.start_build_process(self.pkg, test_process, kwargs)
def parts(self) -> int:
"""The total number of (checked) test parts."""
try:
# New in Python 3.10
total = self.counts.total() # type: ignore[attr-defined]
except AttributeError:
nums = [n for _, n in self.counts.items()]
total = sum(nums)
return total
def print_log_path(self):
"""Print the test log file path."""
log = self.archived_install_test_log
if not os.path.isfile(log):
log = self.test_log_file
if not (log and os.path.isfile(log)):
tty.debug("There is no test log file (staged or installed)")
return
print(f"\nSee test results at:\n {log}")
def ran_tests(self) -> bool:
"""``True`` if ran tests, ``False`` otherwise."""
return self.parts() > self.counts[TestStatus.NO_TESTS]
def summarize(self):
"""Collect test results summary lines for this spec."""
lines = []
lines.append("{:=^80}".format(f" SUMMARY: {self.pkg_id} "))
for name, status in self.test_parts.items():
msg = f"{name} .. {status}"
lines.append(msg)
summary = [f"{n} {s.lower()}" for s, n in self.counts.items() if n > 0]
totals = " {} of {} parts ".format(", ".join(summary), self.parts())
lines.append(f"{totals:=^80}")
return lines
@contextlib.contextmanager
def test_part(pkg: Pb, test_name: str, purpose: str, work_dir: str = ".", verbose: bool = False):
wdir = "." if work_dir is None else work_dir
tester = pkg.tester
# TODO (post-34236): "test"->"test_" once remove deprecated methods
assert test_name and test_name.startswith(
"test"
), f"Test name must start with 'test' but {test_name} was provided"
if test_name == "test":
tty.warn(
"{}: the 'test' method is deprecated. Convert stand-alone "
"test(s) to methods with names starting 'test_'.".format(pkg.name)
)
title = "test: {}: {}".format(test_name, purpose or "unspecified purpose")
with fs.working_dir(wdir, create=True):
try:
context = tester.logger.force_echo if verbose else nullcontext
with context():
tty.info(title, format="g")
yield
tester.status(test_name, TestStatus.PASSED)
except SkipTest as e:
tester.status(test_name, TestStatus.SKIPPED, str(e))
except (AssertionError, BaseException) as e:
# print a summary of the error to the log file
# so that cdash and junit reporters know about it
exc_type, _, tb = sys.exc_info()
tester.status(test_name, TestStatus.FAILED, str(e))
import traceback
# remove the current call frame to exclude the extract_stack
# call from the error
stack = traceback.extract_stack()[:-1]
# Package files have a line added at import time, so we re-read
# the file to make line numbers match. We have to subtract two
# from the line number because the original line number is
# inflated once by the import statement and the lines are
# displaced one by the import statement.
for i, entry in enumerate(stack):
filename, lineno, function, text = entry
if spack.repo.is_package_file(filename):
with open(filename) as f:
lines = f.readlines()
new_lineno = lineno - 2
text = lines[new_lineno]
if isinstance(entry, tuple):
new_entry = (filename, new_lineno, function, text)
stack[i] = new_entry # type: ignore[call-overload]
elif isinstance(entry, list):
stack[i][1] = new_lineno # type: ignore[index]
# Format and print the stack
out = traceback.format_list(stack)
for line in out:
print(line.rstrip("\n"))
if exc_type is spack.util.executable.ProcessError or exc_type is TypeError:
iostr = io.StringIO()
spack.build_environment.write_log_summary(
iostr, "test", tester.test_log_file, last=1
) # type: ignore[assignment]
m = iostr.getvalue()
else:
# We're below the package context, so get context from
# stack instead of from traceback.
# The traceback is truncated here, so we can't use it to
# traverse the stack.
m = "\n".join(spack.build_environment.get_package_context(tb))
exc = e # e is deleted after this block
# If we fail fast, raise another error
if spack.config.get("config:fail_fast", False):
raise TestFailure([(exc, m)])
else:
tester.add_failure(exc, m)
def copy_test_files(pkg: Pb, test_spec: spack.spec.Spec):
"""Copy the spec's cached and custom test files to the test stage directory.
Args:
pkg: package being tested
test_spec: spec being tested, where the spec may be virtual
Raises:
TestSuiteError: package must be part of an active test suite
"""
if pkg is None or pkg.test_suite is None:
base = "Cannot copy test files"
msg = (
f"{base} without a package"
if pkg is None
else f"{pkg.name}: {base}: test suite is missing"
)
raise TestSuiteError(msg)
# copy installed test sources cache into test stage dir
if test_spec.concrete:
cache_source = install_test_root(test_spec.package)
cache_dir = pkg.test_suite.current_test_cache_dir
if os.path.isdir(cache_source) and not os.path.exists(cache_dir):
fs.install_tree(cache_source, cache_dir)
# copy test data into test stage data dir
try:
pkg_cls = test_spec.package_class
except spack.repo.UnknownPackageError:
tty.debug(f"{test_spec.name}: skipping test data copy since no package class found")
return
data_source = Prefix(pkg_cls.package_dir).test
data_dir = pkg.test_suite.current_test_data_dir
if os.path.isdir(data_source) and not os.path.exists(data_dir):
# We assume data dir is used read-only
# maybe enforce this later
shutil.copytree(data_source, data_dir)
def test_function_names(pkg: PackageObjectOrClass, add_virtuals: bool = False) -> List[str]:
"""Grab the names of all non-empty test functions.
Args:
pkg: package or package class of interest
add_virtuals: ``True`` adds test methods of provided package
virtual, ``False`` only returns test functions of the package
Returns:
names of non-empty test functions
Raises:
ValueError: occurs if pkg is not a package class
"""
fns = test_functions(pkg, add_virtuals)
return [f"{cls_name}.{fn.__name__}" for (cls_name, fn) in fns]
def test_functions(
pkg: PackageObjectOrClass, add_virtuals: bool = False
) -> List[Tuple[str, Callable]]:
"""Grab all non-empty test functions.
Args:
pkg: package or package class of interest
add_virtuals: ``True`` adds test methods of provided package
virtual, ``False`` only returns test functions of the package
Returns:
list of non-empty test functions' (name, function)
Raises:
ValueError: occurs if pkg is not a package class
"""
instance = isinstance(pkg, spack.package_base.PackageBase)
if not (instance or issubclass(pkg, spack.package_base.PackageBase)): # type: ignore[arg-type]
raise ValueError(f"Expected a package (class), not {pkg} ({type(pkg)})")
pkg_cls = pkg.__class__ if instance else pkg
classes = [pkg_cls]
if add_virtuals:
vpkgs = virtuals(pkg)
for vname in vpkgs:
try:
classes.append((Spec(vname)).package_class)
except spack.repo.UnknownPackageError:
tty.debug(f"{vname}: virtual does not appear to have a package file")
# TODO (post-34236): Remove if removing empty test method check
def skip(line):
# This should match the lines in the deprecated test() method
ln = line.strip()
return ln.startswith("#") or ("warn" in ln and "deprecated" in ln)
doc_regex = r'\s+("""[\w\s\(\)\-\,\;\:]+""")'
tests = []
for clss in classes:
methods = inspect.getmembers(clss, predicate=lambda x: inspect.isfunction(x))
for name, test_fn in methods:
# TODO (post-34236): "test"->"test_" once remove deprecated methods
if not name.startswith("test"):
continue
# TODO (post-34236): Could remove empty method check once remove
# TODO (post-34236): deprecated methods though some use cases,
# TODO (post-34236): such as checking packages have actual, non-
# TODO (post-34236): empty tests, may want this check to remain.
source = re.sub(doc_regex, r"", inspect.getsource(test_fn)).splitlines()[1:]
lines = [ln.strip() for ln in source if not skip(ln)]
if not lines:
continue
tests.append((clss.__name__, test_fn)) # type: ignore[union-attr]
return tests
def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool = False):
"""Process test parts associated with the package.
Args:
pkg: package being tested
test_specs: list of test specs
verbose: Display verbose output (suppress by default)
Raises:
TestSuiteError: package must be part of an active test suite
"""
if pkg is None or pkg.test_suite is None:
base = "Cannot process tests"
msg = (
f"{base} without a package"
if pkg is None
else f"{pkg.name}: {base}: test suite is missing"
)
raise TestSuiteError(msg)
test_suite = pkg.test_suite
tester = pkg.tester
try:
work_dir = test_suite.test_dir_for_spec(pkg.spec)
for spec in test_specs:
test_suite.current_test_spec = spec
# grab test functions associated with the spec, which may be virtual
try:
tests = test_functions(spec.package_class)
except spack.repo.UnknownPackageError:
# some virtuals don't have a package
tests = []
if len(tests) == 0:
tester.status(spec.name, TestStatus.NO_TESTS)
continue
# copy custom and cached test files to the test stage directory
copy_test_files(pkg, spec)
# Run the tests
for _, test_fn in tests:
with test_part(
pkg,
test_fn.__name__,
purpose=getattr(test_fn, "__doc__"),
work_dir=work_dir,
verbose=verbose,
):
test_fn(pkg)
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
if tester.test_failures:
raise TestFailure(tester.test_failures)
finally:
if tester.ran_tests():
fs.touch(tester.tested_file)
# log one more test message to provide a completion timestamp
# for CDash reporting
tty.msg("Completed testing")
lines = tester.summarize()
tty.msg("\n{}".format("\n".join(lines)))
if tester.test_failures:
# Print the test log file path
tty.msg(f"\n\nSee test results at:\n {tester.test_log_file}")
else:
tty.msg("No tests to run")
def test_process(pkg: Pb, kwargs):
verbose = kwargs.get("verbose", True)
externals = kwargs.get("externals", False)
with pkg.tester.test_logger(verbose, externals) as logger:
if pkg.spec.external and not externals:
print_message(logger, "Skipped tests for external package", verbose)
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
return
if not pkg.spec.installed:
print_message(logger, "Skipped not installed package", verbose)
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
return
# run test methods from the package and all virtuals it provides
v_names = virtuals(pkg)
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
process_test_parts(pkg, test_specs, verbose)
def virtuals(pkg):
"""Return a list of unique virtuals for the package.
Args:
pkg: package of interest
Returns: names of unique virtual packages
"""
# provided virtuals have to be deduped by name
v_names = list({vspec.name for vspec in pkg.virtuals_provided})
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
return v_names
def get_all_test_suites():
"""Retrieves all validly staged TestSuites
@ -83,7 +771,7 @@ def get_named_test_suites(name):
list: a list of matching TestSuite instances, which may be empty if none
Raises:
TestSuiteNameError: If no name is provided
Exception: TestSuiteNameError if no name is provided
"""
if not name:
raise TestSuiteNameError("Test suite name is required.")
@ -92,22 +780,22 @@ def get_named_test_suites(name):
return [ts for ts in test_suites if ts.name == name]
def get_test_suite(name):
def get_test_suite(name: str) -> Optional["TestSuite"]:
"""Ensure there is only one matching test suite with the provided name.
Returns:
str or None: the name if one matching test suite, else None
the name if one matching test suite, else None
Raises:
TestSuiteNameError: If there is more than one matching TestSuite
TestSuiteNameError: If there are more than one matching TestSuites
"""
names = get_named_test_suites(name)
if len(names) > 1:
raise TestSuiteNameError('Too many suites named "{0}". May shadow hash.'.format(name))
suites = get_named_test_suites(name)
if len(suites) > 1:
raise TestSuiteNameError(f"Too many suites named '{name}'. May shadow hash.")
if not names:
if not suites:
return None
return names[0]
return suites[0]
def write_test_suite_file(suite):
@ -116,24 +804,25 @@ def write_test_suite_file(suite):
sjson.dump(suite.to_dict(), stream=f)
def write_test_summary(num_failed, num_skipped, num_untested, num_specs):
"""Write a well formatted summary of the totals for each relevant status
category."""
failed = "{0} failed, ".format(num_failed) if num_failed else ""
skipped = "{0} skipped, ".format(num_skipped) if num_skipped else ""
no_tests = "{0} no-tests, ".format(num_untested) if num_untested else ""
num_passed = num_specs - num_failed - num_untested - num_skipped
def write_test_summary(counts: "Counter"):
"""Write summary of the totals for each relevant status category.
print(
"{:=^80}".format(
" {0}{1}{2}{3} passed of {4} specs ".format(
failed, no_tests, skipped, num_passed, num_specs
)
)
)
Args:
counts: counts of the occurrences of relevant test status types
"""
summary = [f"{n} {s.lower()}" for s, n in counts.items() if n > 0]
try:
# New in Python 3.10
total = counts.total() # type: ignore[attr-defined]
except AttributeError:
nums = [n for _, n in counts.items()]
total = sum(nums)
if total:
print("{:=^80}".format(" {} of {} ".format(", ".join(summary), plural(total, "spec"))))
class TestSuite(object):
class TestSuite:
"""The class that manages specs for ``spack test run`` execution."""
def __init__(self, specs, alias=None):
@ -147,7 +836,7 @@ def __init__(self, specs, alias=None):
self._hash = None
self._stage = None
self.fails = 0
self.counts: "Counter" = Counter()
@property
def name(self):
@ -173,12 +862,11 @@ def __call__(self, *args, **kwargs):
fail_first = kwargs.get("fail_first", False)
externals = kwargs.get("externals", False)
skipped, untested = 0, 0
for spec in self.specs:
try:
if spec.package.test_suite:
raise TestSuiteSpecError(
"Package {0} cannot be run in two test suites at once".format(
"Package {} cannot be run in two test suites at once".format(
spec.package.name
)
)
@ -201,45 +889,55 @@ def __call__(self, *args, **kwargs):
if remove_directory:
shutil.rmtree(test_dir)
# Log test status based on whether any non-pass-only test
# functions were called
tested = os.path.exists(self.tested_file_for_spec(spec))
if tested:
status = "PASSED"
status = TestStatus.PASSED
else:
self.ensure_stage()
if spec.external and not externals:
status = "SKIPPED"
skipped += 1
status = TestStatus.SKIPPED
elif not spec.installed:
status = "SKIPPED"
skipped += 1
status = TestStatus.SKIPPED
else:
status = "NO-TESTS"
untested += 1
status = TestStatus.NO_TESTS
self.counts[status] += 1
self.write_test_result(spec, status)
except BaseException as exc:
self.fails += 1
tty.debug("Test failure: {0}".format(str(exc)))
status = TestStatus.FAILED
self.counts[status] += 1
tty.debug(f"Test failure: {str(exc)}")
if isinstance(exc, (SyntaxError, TestSuiteSpecError)):
# Create the test log file and report the error.
self.ensure_stage()
msg = "Testing package {0}\n{1}".format(self.test_pkg_id(spec), str(exc))
msg = f"Testing package {self.test_pkg_id(spec)}\n{str(exc)}"
_add_msg_to_file(self.log_file_for_spec(spec), msg)
self.write_test_result(spec, "FAILED")
msg = f"Test failure: {str(exc)}"
_add_msg_to_file(self.log_file_for_spec(spec), msg)
self.write_test_result(spec, TestStatus.FAILED)
if fail_first:
break
finally:
spec.package.test_suite = None
self.current_test_spec = None
self.current_base_spec = None
write_test_summary(self.fails, skipped, untested, len(self.specs))
write_test_summary(self.counts)
if self.fails:
raise TestSuiteFailure(self.fails)
if self.counts[TestStatus.FAILED]:
for spec in self.specs:
print(
"\nSee {} test results at:\n {}".format(
spec.format("{name}-{version}-{hash:7}"), self.log_file_for_spec(spec)
)
)
failures = self.counts[TestStatus.FAILED]
if failures:
raise TestSuiteFailure(failures)
def ensure_stage(self):
"""Ensure the test suite stage directory exists."""
@ -272,7 +970,7 @@ def test_pkg_id(cls, spec):
"""The standard install test package identifier.
Args:
spec (spack.spec.Spec): instance of the spec under test
spec: instance of the spec under test
Returns:
str: the install test package identifier
@ -379,7 +1077,7 @@ def write_test_result(self, spec, result):
spec (spack.spec.Spec): instance of the spec under test
result (str): result from the spec's test execution (e.g, PASSED)
"""
msg = "{0} {1}".format(self.test_pkg_id(spec), result)
msg = f"{self.test_pkg_id(spec)} {result}"
_add_msg_to_file(self.results_file, msg)
def write_reproducibility_data(self):
@ -419,9 +1117,8 @@ def from_dict(d):
specs: list of the test suite's specs in dictionary form
alias: the test suite alias
Returns:
TestSuite: Instance of TestSuite created from the specs
TestSuite: Instance created from the specs
"""
specs = [Spec.from_dict(spec_dict) for spec_dict in d["specs"]]
alias = d.get("alias", None)
@ -435,16 +1132,19 @@ def from_file(filename):
Args:
filename (str): The path to the JSON file containing the test
suite specs and optional alias.
Raises:
BaseException: sjson.SpackJSONError if problem parsing the file
"""
try:
with open(filename, "r") as f:
with open(filename) as f:
data = sjson.load(f)
test_suite = TestSuite.from_dict(data)
content_hash = os.path.basename(os.path.dirname(filename))
test_suite._hash = content_hash
return test_suite
except Exception as e:
raise sjson.SpackJSONError("error parsing JSON TestSuite:", str(e)) from e
raise sjson.SpackJSONError("error parsing JSON TestSuite:", e)
def _add_msg_to_file(filename, msg):
@ -455,20 +1155,29 @@ def _add_msg_to_file(filename, msg):
msg (str): message to be appended to the file
"""
with open(filename, "a+") as f:
f.write("{0}\n".format(msg))
f.write(f"{msg}\n")
class SkipTest(Exception):
"""Raised when a test (part) is being skipped."""
class TestFailure(spack.error.SpackError):
"""Raised when package tests have failed for an installation."""
def __init__(self, failures):
def __init__(self, failures: List[TestFailureType]):
# Failures are all exceptions
msg = "%d tests failed.\n" % len(failures)
num = len(failures)
msg = "{} failed.\n".format(plural(num, "test"))
for failure, message in failures:
msg += "\n\n%s\n" % str(failure)
msg += "\n%s\n" % message
super(TestFailure, self).__init__(msg)
super().__init__(msg)
class TestSuiteError(spack.error.SpackError):
"""Raised when there is an error with the test suite."""
class TestSuiteFailure(spack.error.SpackError):
@ -477,7 +1186,7 @@ class TestSuiteFailure(spack.error.SpackError):
def __init__(self, num_failures):
msg = "%d test(s) in the suite failed.\n" % num_failures
super(TestSuiteFailure, self).__init__(msg)
super().__init__(msg)
class TestSuiteSpecError(spack.error.SpackError):

View File

@ -278,6 +278,19 @@ def _print_installed_pkg(message):
print(colorize("@*g{[+]} ") + spack.util.path.debug_padded_filter(message))
def print_install_test_log(pkg: "spack.package_base.PackageBase"):
"""Output install test log file path but only if have test failures.
Args:
pkg: instance of the package under test
"""
if not pkg.run_tests or not (pkg.tester and pkg.tester.test_failures):
# The tests were not run or there were no test failures
return
pkg.tester.print_log_path()
def _print_timer(pre, pkg_id, timer):
phases = ["{}: {}.".format(p.capitalize(), _hms(timer.duration(p))) for p in timer.phases]
phases.append("Total: {}".format(_hms(timer.duration())))
@ -536,6 +549,25 @@ def install_msg(name, pid):
return pre + colorize("@*{Installing} @*g{%s}" % name)
def archive_install_logs(pkg, phase_log_dir):
"""
Copy install logs to their destination directory(ies)
Args:
pkg (spack.package_base.PackageBase): the package that was built and installed
phase_log_dir (str): path to the archive directory
"""
# Archive the whole stdout + stderr for the package
fs.install(pkg.log_path, pkg.install_log_path)
# Archive all phase log paths
for phase_log in pkg.phase_log_files:
log_file = os.path.basename(phase_log)
fs.install(phase_log, os.path.join(phase_log_dir, log_file))
# Archive the install-phase test log, if present
pkg.archive_install_test_log()
def log(pkg):
"""
Copy provenance into the install directory on success
@ -553,22 +585,11 @@ def log(pkg):
# FIXME : this potentially catches too many things...
tty.debug(e)
# Archive the whole stdout + stderr for the package
fs.install(pkg.log_path, pkg.install_log_path)
# Archive all phase log paths
for phase_log in pkg.phase_log_files:
log_file = os.path.basename(phase_log)
log_file = os.path.join(os.path.dirname(packages_dir), log_file)
fs.install(phase_log, log_file)
archive_install_logs(pkg, os.path.dirname(packages_dir))
# Archive the environment modifications for the build.
fs.install(pkg.env_mods_path, pkg.install_env_path)
# Archive the install-phase test log, if present
if pkg.test_install_log_path and os.path.exists(pkg.test_install_log_path):
fs.install(pkg.test_install_log_path, pkg.install_test_install_log_path)
if os.path.exists(pkg.configure_args_path):
# Archive the args used for the build
fs.install(pkg.configure_args_path, pkg.install_configure_args_path)
@ -1932,14 +1953,17 @@ def run(self):
self._real_install()
# Run post install hooks before build stage is removed.
self.timer.start("post-install")
spack.hooks.post_install(self.pkg.spec, self.explicit)
self.timer.stop("post-install")
# Stop the timer and save results
self.timer.stop()
with open(self.pkg.times_log_path, "w") as timelog:
self.timer.write_json(timelog)
# Run post install hooks before build stage is removed.
spack.hooks.post_install(self.pkg.spec, self.explicit)
print_install_test_log(self.pkg)
_print_timer(pre=self.pre, pkg_id=self.pkg_id, timer=self.timer)
_print_installed_pkg(self.pkg.prefix)

View File

@ -69,7 +69,15 @@
from spack.builder import run_after, run_before
from spack.dependency import all_deptypes
from spack.directives import *
from spack.install_test import get_escaped_text_output
from spack.install_test import (
SkipTest,
cache_extra_test_sources,
check_outputs,
find_required_file,
get_escaped_text_output,
install_test_root,
test_part,
)
from spack.installer import (
ExternalPackageError,
InstallError,

View File

@ -25,13 +25,12 @@
import textwrap
import time
import traceback
import types
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar
import llnl.util.filesystem as fsys
import llnl.util.tty as tty
from llnl.util.lang import classproperty, memoized, nullcontext
from llnl.util.lang import classproperty, memoized
from llnl.util.link_tree import LinkTree
import spack.compilers
@ -55,12 +54,18 @@
import spack.util.path
import spack.util.web
from spack.filesystem_view import YamlFilesystemView
from spack.install_test import TestFailure, TestSuite
from spack.install_test import (
PackageTest,
TestFailure,
TestStatus,
TestSuite,
cache_extra_test_sources,
install_test_root,
)
from spack.installer import InstallError, PackageInstaller
from spack.stage import ResourceStage, Stage, StageComposite, compute_stage_name
from spack.util.executable import ProcessError, which
from spack.util.package_hash import package_hash
from spack.util.prefix import Prefix
from spack.util.web import FetchError
from spack.version import GitVersion, StandardVersion, Version
@ -73,24 +78,21 @@
_ALLOWED_URL_SCHEMES = ["http", "https", "ftp", "file", "git"]
# Filename for the Spack build/install log.
#: Filename for the Spack build/install log.
_spack_build_logfile = "spack-build-out.txt"
# Filename for the Spack build/install environment file.
#: Filename for the Spack build/install environment file.
_spack_build_envfile = "spack-build-env.txt"
# Filename for the Spack build/install environment modifications file.
#: Filename for the Spack build/install environment modifications file.
_spack_build_envmodsfile = "spack-build-env-mods.txt"
# Filename for the Spack install phase-time test log.
_spack_install_test_log = "install-time-test-log.txt"
# Filename of json with total build and phase times (seconds)
_spack_times_log = "install_times.json"
# Filename for the Spack configure args file.
#: Filename for the Spack configure args file.
_spack_configure_argsfile = "spack-configure-args.txt"
#: Filename of json with total build and phase times (seconds)
spack_times_log = "install_times.json"
def deprecated_version(pkg, version):
"""Return True if the version is deprecated, False otherwise.
@ -181,8 +183,7 @@ class DetectablePackageMeta(object):
def __init__(cls, name, bases, attr_dict):
if hasattr(cls, "executables") and hasattr(cls, "libraries"):
msg = "a package can have either an 'executables' or 'libraries' attribute"
msg += " [package '{0.name}' defines both]"
raise spack.error.SpackError(msg.format(cls))
raise spack.error.SpackError(f"{msg} [package '{name}' defines both]")
# On windows, extend the list of regular expressions to look for
# filenames ending with ".exe"
@ -423,17 +424,7 @@ def remove_files_from_view(self, view, merge_map):
view.remove_files(merge_map.values())
def test_log_pathname(test_stage, spec):
"""Build the pathname of the test log file
Args:
test_stage (str): path to the test stage directory
spec (spack.spec.Spec): instance of the spec under test
Returns:
(str): the pathname of the test log file
"""
return os.path.join(test_stage, "test-{0}-out.txt".format(TestSuite.test_pkg_id(spec)))
Pb = TypeVar("Pb", bound="PackageBase")
class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
@ -638,19 +629,13 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
"tags",
]
#: Boolean. If set to ``True``, the smoke/install test requires a compiler.
#: This is currently used by smoke tests to ensure a compiler is available
#: to build a custom test code.
test_requires_compiler = False
#: Set to ``True`` to indicate the stand-alone test requires a compiler.
#: It is used to ensure a compiler and build dependencies like 'cmake'
#: are available to build a custom test code.
test_requires_compiler: bool = False
#: List of test failures encountered during a smoke/install test run.
test_failures = None
#: TestSuite instance used to manage smoke/install tests for one or more specs.
test_suite = None
#: Path to the log file used for tests
test_log_file = None
#: TestSuite instance used to manage stand-alone tests for 1+ specs.
test_suite: Optional["TestSuite"] = None
def __init__(self, spec):
# this determines how the package should be built.
@ -672,6 +657,7 @@ def __init__(self, spec):
# init internal variables
self._stage = None
self._fetcher = None
self._tester: Optional["PackageTest"] = None
# Set up timing variables
self._fetch_time = 0.0
@ -736,9 +722,9 @@ def possible_dependencies(
for name, conditions in cls.dependencies.items():
# check whether this dependency could be of the type asked for
types = [dep.type for cond, dep in conditions.items()]
types = set.union(*types)
if not any(d in types for d in deptype):
deptypes = [dep.type for cond, dep in conditions.items()]
deptypes = set.union(*deptypes)
if not any(d in deptypes for d in deptype):
continue
# expand virtuals if enabled, otherwise just stop at virtuals
@ -1148,30 +1134,41 @@ def configure_args_path(self):
"""Return the configure args file path associated with staging."""
return os.path.join(self.stage.path, _spack_configure_argsfile)
@property
def test_install_log_path(self):
"""Return the install phase-time test log file path, if set."""
return getattr(self, "test_log_file", None)
@property
def install_test_install_log_path(self):
"""Return the install location for the install phase-time test log."""
return fsys.join_path(self.metadata_dir, _spack_install_test_log)
@property
def times_log_path(self):
"""Return the times log json file."""
return os.path.join(self.metadata_dir, _spack_times_log)
return os.path.join(self.metadata_dir, spack_times_log)
@property
def install_configure_args_path(self):
"""Return the configure args file path on successful installation."""
return os.path.join(self.metadata_dir, _spack_configure_argsfile)
# TODO (post-34236): Update tests and all packages that use this as a
# TODO (post-34236): package method to the function already available
# TODO (post-34236): to packages. Once done, remove this property.
@property
def install_test_root(self):
"""Return the install test root directory."""
return os.path.join(self.metadata_dir, "test")
tty.warn(
"The 'pkg.install_test_root' property is deprecated with removal "
"expected v0.21. Use 'install_test_root(pkg)' instead."
)
return install_test_root(self)
def archive_install_test_log(self):
"""Archive the install-phase test log, if present."""
if getattr(self, "tester", None):
self.tester.archive_install_test_log(self.metadata_dir)
@property
def tester(self):
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve tester for package without concrete version.")
if not self._tester:
self._tester = PackageTest(self)
return self._tester
@property
def installed(self):
@ -1208,7 +1205,7 @@ def _make_fetcher(self):
@property
def fetcher(self):
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve fetcher for" " package without concrete version.")
raise ValueError("Cannot retrieve fetcher for package without concrete version.")
if not self._fetcher:
self._fetcher = self._make_fetcher()
return self._fetcher
@ -1842,6 +1839,9 @@ def do_install(self, **kwargs):
builder = PackageInstaller([(self, kwargs)])
builder.install()
# TODO (post-34236): Update tests and all packages that use this as a
# TODO (post-34236): package method to the routine made available to
# TODO (post-34236): packages. Once done, remove this method.
def cache_extra_test_sources(self, srcs):
"""Copy relative source paths to the corresponding install test subdir
@ -1856,45 +1856,13 @@ def cache_extra_test_sources(self, srcs):
be copied to the corresponding location(s) under the install
testing directory.
"""
paths = [srcs] if isinstance(srcs, str) else srcs
for path in paths:
src_path = os.path.join(self.stage.source_path, path)
dest_path = os.path.join(self.install_test_root, path)
if os.path.isdir(src_path):
fsys.install_tree(src_path, dest_path)
else:
fsys.mkdirp(os.path.dirname(dest_path))
fsys.copy(src_path, dest_path)
@contextlib.contextmanager
def _setup_test(self, verbose, externals):
self.test_failures = []
if self.test_suite:
self.test_log_file = self.test_suite.log_file_for_spec(self.spec)
self.tested_file = self.test_suite.tested_file_for_spec(self.spec)
pkg_id = self.test_suite.test_pkg_id(self.spec)
else:
self.test_log_file = fsys.join_path(self.stage.path, _spack_install_test_log)
self.test_suite = TestSuite([self.spec])
self.test_suite.stage = self.stage.path
pkg_id = self.spec.format("{name}-{version}-{hash:7}")
fsys.touch(self.test_log_file) # Otherwise log_parse complains
with tty.log.log_output(self.test_log_file, verbose) as logger:
with logger.force_echo():
tty.msg("Testing package {0}".format(pkg_id))
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
try:
yield logger
finally:
# reset debug level
tty.set_debug(old_debug)
msg = (
"'pkg.cache_extra_test_sources(srcs) is deprecated with removal "
"expected in v0.21. Use 'cache_extra_test_sources(pkg, srcs)' "
"instead."
)
warnings.warn(msg)
cache_extra_test_sources(self, srcs)
def do_test(self, dirty=False, externals=False):
if self.test_requires_compiler:
@ -1909,15 +1877,31 @@ def do_test(self, dirty=False, externals=False):
)
return
kwargs = {"dirty": dirty, "fake": False, "context": "test", "externals": externals}
if tty.is_verbose():
kwargs["verbose"] = True
spack.build_environment.start_build_process(self, test_process, kwargs)
kwargs = {
"dirty": dirty,
"fake": False,
"context": "test",
"externals": externals,
"verbose": tty.is_verbose(),
}
self.tester.stand_alone_tests(kwargs)
# TODO (post-34236): Remove this deprecated method when eliminate test,
# TODO (post-34236): run_test, etc.
@property
def _test_deprecated_warning(self):
alt = f"Use any name starting with 'test_' instead in {self.spec.name}."
return f"The 'test' method is deprecated. {alt}"
# TODO (post-34236): Remove this deprecated method when eliminate test,
# TODO (post-34236): run_test, etc.
def test(self):
# Defer tests to virtual and concrete packages
pass
warnings.warn(self._test_deprecated_warning)
# TODO (post-34236): Remove this deprecated method when eliminate test,
# TODO (post-34236): run_test, etc.
def run_test(
self,
exe,
@ -1925,7 +1909,7 @@ def run_test(
expected=[],
status=0,
installed=False,
purpose="",
purpose=None,
skip_missing=False,
work_dir=None,
):
@ -1947,22 +1931,56 @@ def run_test(
in the install prefix bin directory or the provided work_dir
work_dir (str or None): path to the smoke test directory
"""
def test_title(purpose, test_name):
if not purpose:
return f"test: {test_name}: execute {test_name}"
match = re.search(r"test: ([^:]*): (.*)", purpose)
if match:
# The test title has all the expected parts
return purpose
match = re.search(r"test: (.*)", purpose)
if match:
reason = match.group(1)
return f"test: {test_name}: {reason}"
return f"test: {test_name}: {purpose}"
base_exe = os.path.basename(exe)
alternate = f"Use 'test_part' instead for {self.spec.name} to process {base_exe}."
warnings.warn(f"The 'run_test' method is deprecated. {alternate}")
extra = re.compile(r"[\s,\- ]")
details = (
[extra.sub("", options)]
if isinstance(options, str)
else [extra.sub("", os.path.basename(opt)) for opt in options]
)
details = "_".join([""] + details) if details else ""
test_name = f"test_{base_exe}{details}"
tty.info(test_title(purpose, test_name), format="g")
wdir = "." if work_dir is None else work_dir
with fsys.working_dir(wdir, create=True):
try:
runner = which(exe)
if runner is None and skip_missing:
self.tester.status(test_name, TestStatus.SKIPPED, f"{exe} is missing")
return
assert runner is not None, "Failed to find executable '{0}'".format(exe)
assert runner is not None, f"Failed to find executable '{exe}'"
self._run_test_helper(runner, options, expected, status, installed, purpose)
print("PASSED")
self.tester.status(test_name, TestStatus.PASSED, None)
return True
except BaseException as e:
except (AssertionError, BaseException) as e:
# print a summary of the error to the log file
# so that cdash and junit reporters know about it
exc_type, _, tb = sys.exc_info()
print("FAILED: {0}".format(e))
self.tester.status(test_name, TestStatus.FAILED, str(e))
import traceback
# remove the current call frame to exclude the extract_stack
@ -1991,7 +2009,7 @@ def run_test(
if exc_type is spack.util.executable.ProcessError:
out = io.StringIO()
spack.build_environment.write_log_summary(
out, "test", self.test_log_file, last=1
out, "test", self.tester.test_log_file, last=1
)
m = out.getvalue()
else:
@ -2007,28 +2025,27 @@ def run_test(
if spack.config.get("config:fail_fast", False):
raise TestFailure([(exc, m)])
else:
self.test_failures.append((exc, m))
self.tester.add_failure(exc, m)
return False
# TODO (post-34236): Remove this deprecated method when eliminate test,
# TODO (post-34236): run_test, etc.
def _run_test_helper(self, runner, options, expected, status, installed, purpose):
status = [status] if isinstance(status, int) else status
expected = [expected] if isinstance(expected, str) else expected
options = [options] if isinstance(options, str) else options
if purpose:
tty.msg(purpose)
else:
tty.debug("test: {0}: expect command status in {1}".format(runner.name, status))
if installed:
msg = "Executable '{0}' expected in prefix".format(runner.name)
msg += ", found in {0} instead".format(runner.path)
msg = f"Executable '{runner.name}' expected in prefix, "
msg += f"found in {runner.path} instead"
assert runner.path.startswith(self.spec.prefix), msg
tty.msg(f"Expecting return code in {status}")
try:
output = runner(*options, output=str.split, error=str.split)
assert 0 in status, "Expected {0} execution to fail".format(runner.name)
assert 0 in status, f"Expected {runner.name} execution to fail"
except ProcessError as err:
output = str(err)
match = re.search(r"exited with status ([0-9]+)", output)
@ -2037,8 +2054,8 @@ def _run_test_helper(self, runner, options, expected, status, installed, purpose
for check in expected:
cmd = " ".join([runner.name] + options)
msg = "Expected '{0}' to match output of `{1}`".format(check, cmd)
msg += "\n\nOutput: {0}".format(output)
msg = f"Expected '{check}' to match output of `{cmd}`"
msg += f"\n\nOutput: {output}"
assert re.search(check, output), msg
def unit_test_check(self):
@ -2068,21 +2085,23 @@ def build_log_path(self):
return self.install_log_path if self.spec.installed else self.log_path
@classmethod
def inject_flags(cls: Type, name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
def inject_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that injects all flags through the compiler wrapper.
"""
return flags, None, None
@classmethod
def env_flags(cls: Type, name: str, flags: Iterable[str]):
def env_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that adds all flags to canonical environment variables.
"""
return None, flags, None
@classmethod
def build_system_flags(cls: Type, name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
def build_system_flags(
cls: Type[Pb], name: str, flags: Iterable[str]
) -> FLAG_HANDLER_RETURN_TYPE:
"""
flag_handler that passes flags to the build system arguments. Any
package using `build_system_flags` must also implement
@ -2170,7 +2189,7 @@ def flag_handler(self) -> FLAG_HANDLER_TYPE:
return self._flag_handler
@flag_handler.setter
def flag_handler(self, var: FLAG_HANDLER_TYPE):
def flag_handler(self, var: FLAG_HANDLER_TYPE) -> None:
self._flag_handler = var
# The flag handler method is called for each of the allowed compiler flags.
@ -2417,165 +2436,6 @@ def rpath_args(self):
def builder(self):
return spack.builder.create(self)
@staticmethod
def run_test_callbacks(builder, method_names, callback_type="install"):
"""Tries to call all of the listed methods, returning immediately
if the list is None."""
if not builder.pkg.run_tests or method_names is None:
return
fail_fast = spack.config.get("config:fail_fast", False)
with builder.pkg._setup_test(verbose=False, externals=False) as logger:
# Report running each of the methods in the build log
print_test_message(logger, "Running {0}-time tests".format(callback_type), True)
builder.pkg.test_suite.current_test_spec = builder.pkg.spec
builder.pkg.test_suite.current_base_spec = builder.pkg.spec
if "test" in method_names:
_copy_cached_test_files(builder.pkg, builder.pkg.spec)
for name in method_names:
try:
fn = getattr(builder, name)
msg = "RUN-TESTS: {0}-time tests [{1}]".format(callback_type, name)
print_test_message(logger, msg, True)
fn()
except AttributeError as e:
msg = "RUN-TESTS: method not implemented [{0}]".format(name)
print_test_message(logger, msg, True)
builder.pkg.test_failures.append((e, msg))
if fail_fast:
break
# Raise any collected failures here
if builder.pkg.test_failures:
raise TestFailure(builder.pkg.test_failures)
def has_test_method(pkg):
"""Determine if the package defines its own stand-alone test method.
Args:
pkg (str): the package being checked
Returns:
(bool): ``True`` if the package overrides the default method; else
``False``
"""
if not inspect.isclass(pkg):
tty.die("{0}: is not a class, it is {1}".format(pkg, type(pkg)))
return (issubclass(pkg, PackageBase) and pkg.test != PackageBase.test) or (
isinstance(pkg, PackageBase) and pkg.test.__func__ != PackageBase.test
)
def print_test_message(logger, msg, verbose):
if verbose:
with logger.force_echo():
tty.msg(msg)
else:
tty.msg(msg)
def _copy_cached_test_files(pkg, spec):
"""Copy any cached stand-alone test-related files."""
# copy installed test sources cache into test cache dir
if spec.concrete:
cache_source = spec.package.install_test_root
cache_dir = pkg.test_suite.current_test_cache_dir
if os.path.isdir(cache_source) and not os.path.exists(cache_dir):
fsys.install_tree(cache_source, cache_dir)
# copy test data into test data dir
data_source = Prefix(spec.package.package_dir).test
data_dir = pkg.test_suite.current_test_data_dir
if os.path.isdir(data_source) and not os.path.exists(data_dir):
# We assume data dir is used read-only
# maybe enforce this later
shutil.copytree(data_source, data_dir)
def test_process(pkg, kwargs):
verbose = kwargs.get("verbose", False)
externals = kwargs.get("externals", False)
with pkg._setup_test(verbose, externals) as logger:
if pkg.spec.external and not externals:
print_test_message(logger, "Skipped tests for external package", verbose)
return
if not pkg.spec.installed:
print_test_message(logger, "Skipped not installed package", verbose)
return
# run test methods from the package and all virtuals it
# provides virtuals have to be deduped by name
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
ran_actual_test_function = False
try:
with fsys.working_dir(pkg.test_suite.test_dir_for_spec(pkg.spec)):
for spec in test_specs:
pkg.test_suite.current_test_spec = spec
# Fail gracefully if a virtual has no package/tests
try:
spec_pkg = spec.package
except spack.repo.UnknownPackageError:
continue
_copy_cached_test_files(pkg, spec)
# grab the function for each method so we can call
# it with the package
test_fn = spec_pkg.__class__.test
if not isinstance(test_fn, types.FunctionType):
test_fn = test_fn.__func__
# Skip any test methods consisting solely of 'pass'
# since they do not contribute to package testing.
source = (inspect.getsource(test_fn)).splitlines()[1:]
lines = (ln.strip() for ln in source)
statements = [ln for ln in lines if not ln.startswith("#")]
if len(statements) > 0 and statements[0] == "pass":
continue
# Run the tests
ran_actual_test_function = True
context = logger.force_echo if verbose else nullcontext
with context():
test_fn(pkg)
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
if pkg.test_failures:
raise TestFailure(pkg.test_failures)
finally:
# flag the package as having been tested (i.e., ran one or more
# non-pass-only methods
if ran_actual_test_function:
fsys.touch(pkg.tested_file)
# log one more test message to provide a completion timestamp
# for CDash reporting
tty.msg("Completed testing")
else:
print_test_message(logger, "No tests to run", verbose)
inject_flags = PackageBase.inject_flags
env_flags = PackageBase.env_flags
@ -2663,16 +2523,6 @@ def __init__(self, message, long_msg=None):
super(PackageError, self).__init__(message, long_msg)
class PackageVersionError(PackageError):
"""Raised when a version URL cannot automatically be determined."""
def __init__(self, version):
super(PackageVersionError, self).__init__(
"Cannot determine a URL automatically for version %s" % version,
"Please provide a url for this version in the package.py file.",
)
class NoURLError(PackageError):
"""Raised when someone tries to build a URL for a package with no URLs."""

View File

@ -1063,14 +1063,21 @@ def dump_provenance(self, spec, path):
"Repository %s does not contain package %s." % (self.namespace, spec.fullname)
)
# Install patch files needed by the package.
package_path = self.filename_for_package_name(spec.name)
if not os.path.exists(package_path):
# Spec has no files (e.g., package, patches) to copy
tty.debug(f"{spec.name} does not have a package to dump")
return
# Install patch files needed by the (concrete) package.
fs.mkdirp(path)
for patch in itertools.chain.from_iterable(spec.package.patches.values()):
if patch.path:
if os.path.exists(patch.path):
fs.install(patch.path, path)
else:
tty.warn("Patch file did not exist: %s" % patch.path)
if spec.concrete:
for patch in itertools.chain.from_iterable(spec.package.patches.values()):
if patch.path:
if os.path.exists(patch.path):
fs.install(patch.path, path)
else:
tty.warn("Patch file did not exist: %s" % patch.path)
# Install the package.py file itself.
fs.install(self.filename_for_package_name(spec.name), path)

View File

@ -133,8 +133,9 @@ def wrapper(instance, *args, **kwargs):
# Everything else is an error (the installation
# failed outside of the child process)
package["result"] = "error"
package["stdout"] = self.fetch_log(pkg)
package["message"] = str(exc) or "Unknown error"
package["stdout"] = self.fetch_log(pkg)
package["stdout"] += package["message"]
package["exception"] = traceback.format_exc()
raise

View File

@ -12,7 +12,7 @@
import socket
import time
import xml.sax.saxutils
from typing import Dict
from typing import Dict, Optional
from urllib.parse import urlencode
from urllib.request import HTTPHandler, Request, build_opener
@ -113,14 +113,14 @@ def report_build_name(self, pkg_name):
else self.base_buildname
)
def build_report_for_package(self, directory_name, package, duration):
def build_report_for_package(self, report_dir, package, duration):
if "stdout" not in package:
# Skip reporting on packages that did not generate any output.
# Skip reporting on packages that do not generate output.
return
self.current_package_name = package["name"]
self.buildname = self.report_build_name(self.current_package_name)
report_data = self.initialize_report(directory_name)
report_data = self.initialize_report(report_dir)
for phase in CDASH_PHASES:
report_data[phase] = {}
report_data[phase]["loglines"] = []
@ -215,7 +215,7 @@ def clean_log_event(event):
report_file_name = package["name"] + "_" + report_name
else:
report_file_name = report_name
phase_report = os.path.join(directory_name, report_file_name)
phase_report = os.path.join(report_dir, report_file_name)
with codecs.open(phase_report, "w", "utf-8") as f:
env = spack.tengine.make_environment()
@ -231,7 +231,7 @@ def clean_log_event(event):
f.write(t.render(report_data))
self.upload(phase_report)
def build_report(self, directory_name, specs):
def build_report(self, report_dir, specs):
# Do an initial scan to determine if we are generating reports for more
# than one package. When we're only reporting on a single package we
# do not explicitly include the package's name in the CDash build name.
@ -260,7 +260,7 @@ def build_report(self, directory_name, specs):
if "time" in spec:
duration = int(spec["time"])
for package in spec["packages"]:
self.build_report_for_package(directory_name, package, duration)
self.build_report_for_package(report_dir, package, duration)
self.finalize_report()
def extract_standalone_test_data(self, package, phases, report_data):
@ -273,13 +273,13 @@ def extract_standalone_test_data(self, package, phases, report_data):
testing["generator"] = self.generator
testing["parts"] = extract_test_parts(package["name"], package["stdout"].splitlines())
def report_test_data(self, directory_name, package, phases, report_data):
def report_test_data(self, report_dir, package, phases, report_data):
"""Generate and upload the test report(s) for the package."""
for phase in phases:
# Write the report.
report_name = phase.capitalize() + ".xml"
report_file_name = package["name"] + "_" + report_name
phase_report = os.path.join(directory_name, report_file_name)
report_file_name = "_".join([package["name"], package["id"], report_name])
phase_report = os.path.join(report_dir, report_file_name)
with codecs.open(phase_report, "w", "utf-8") as f:
env = spack.tengine.make_environment()
@ -297,7 +297,7 @@ def report_test_data(self, directory_name, package, phases, report_data):
tty.debug("Preparing to upload {0}".format(phase_report))
self.upload(phase_report)
def test_report_for_package(self, directory_name, package, duration):
def test_report_for_package(self, report_dir, package, duration):
if "stdout" not in package:
# Skip reporting on packages that did not generate any output.
tty.debug("Skipping report for {0}: No generated output".format(package["name"]))
@ -311,14 +311,14 @@ def test_report_for_package(self, directory_name, package, duration):
self.buildname = self.report_build_name(self.current_package_name)
self.starttime = self.endtime - duration
report_data = self.initialize_report(directory_name)
report_data = self.initialize_report(report_dir)
report_data["hostname"] = socket.gethostname()
phases = ["testing"]
self.extract_standalone_test_data(package, phases, report_data)
self.report_test_data(directory_name, package, phases, report_data)
self.report_test_data(report_dir, package, phases, report_data)
def test_report(self, directory_name, specs):
def test_report(self, report_dir, specs):
"""Generate reports for each package in each spec."""
tty.debug("Processing test report")
for spec in specs:
@ -326,21 +326,33 @@ def test_report(self, directory_name, specs):
if "time" in spec:
duration = int(spec["time"])
for package in spec["packages"]:
self.test_report_for_package(directory_name, package, duration)
self.test_report_for_package(report_dir, package, duration)
self.finalize_report()
def test_skipped_report(self, directory_name, spec, reason=None):
def test_skipped_report(
self, report_dir: str, spec: spack.spec.Spec, reason: Optional[str] = None
):
"""Explicitly report spec as being skipped (e.g., CI).
Examples are the installation failed or the package is known to have
broken tests.
Args:
report_dir: directory where the report is to be written
spec: spec being tested
reason: optional reason the test is being skipped
"""
output = "Skipped {0} package".format(spec.name)
if reason:
output += "\n{0}".format(reason)
package = {"name": spec.name, "id": spec.dag_hash(), "result": "skipped", "stdout": output}
self.test_report_for_package(directory_name, package, duration=0.0)
self.test_report_for_package(report_dir, package, duration=0.0)
def concretization_report(self, directory_name, msg):
def concretization_report(self, report_dir, msg):
self.buildname = self.base_buildname
report_data = self.initialize_report(directory_name)
report_data = self.initialize_report(report_dir)
report_data["update"] = {}
report_data["update"]["starttime"] = self.endtime
report_data["update"]["endtime"] = self.endtime
@ -350,7 +362,7 @@ def concretization_report(self, directory_name, msg):
env = spack.tengine.make_environment()
update_template = posixpath.join(self.template_dir, "Update.xml")
t = env.get_template(update_template)
output_filename = os.path.join(directory_name, "Update.xml")
output_filename = os.path.join(report_dir, "Update.xml")
with open(output_filename, "w") as f:
f.write(t.render(report_data))
# We don't have a current package when reporting on concretization
@ -360,9 +372,9 @@ def concretization_report(self, directory_name, msg):
self.success = False
self.finalize_report()
def initialize_report(self, directory_name):
if not os.path.exists(directory_name):
os.mkdir(directory_name)
def initialize_report(self, report_dir):
if not os.path.exists(report_dir):
os.mkdir(report_dir)
report_data = {}
report_data["buildname"] = self.buildname
report_data["buildstamp"] = self.buildstamp

View File

@ -9,17 +9,23 @@
import llnl.util.tty as tty
from spack.install_test import TestStatus
# The keys here represent the only recognized (ctest/cdash) status values
completed = {"failed": "Completed", "passed": "Completed", "notrun": "No tests to run"}
completed = {
"failed": "Completed",
"passed": "Completed",
"skipped": "Completed",
"notrun": "No tests to run",
}
log_regexp = re.compile(r"^==> \[([0-9:.\-]*)(?:, [0-9]*)?\] (.*)")
returns_regexp = re.compile(r"\[([0-9 ,]*)\]")
skip_msgs = ["Testing package", "Results for", "Detected the following"]
skip_msgs = ["Testing package", "Results for", "Detected the following", "Warning:"]
skip_regexps = [re.compile(r"{0}".format(msg)) for msg in skip_msgs]
status_values = ["FAILED", "PASSED", "NO-TESTS"]
status_regexps = [re.compile(r"^({0})".format(stat)) for stat in status_values]
status_regexps = [re.compile(r"^({0})".format(str(stat))) for stat in TestStatus]
def add_part_output(part, line):
@ -36,12 +42,14 @@ def elapsed(current, previous):
return diff.total_seconds()
# TODO (post-34236): Should remove with deprecated test methods since don't
# TODO (post-34236): have an XFAIL mechanism with the new test_part() approach.
def expected_failure(line):
if not line:
return False
match = returns_regexp.search(line)
xfail = "0" not in match.group(0) if match else False
xfail = "0" not in match.group(1) if match else False
return xfail
@ -54,12 +62,12 @@ def new_part():
"name": None,
"loglines": [],
"output": None,
"status": "passed",
"status": None,
}
# TODO (post-34236): Remove this when remove deprecated methods
def part_name(source):
# TODO: Should be passed the package prefix and only remove it
elements = []
for e in source.replace("'", "").split(" "):
elements.append(os.path.basename(e) if os.sep in e else e)
@ -73,10 +81,14 @@ def process_part_end(part, curr_time, last_time):
stat = part["status"]
if stat in completed:
# TODO (post-34236): remove the expected failure mapping when
# TODO (post-34236): remove deprecated test methods.
if stat == "passed" and expected_failure(part["desc"]):
part["completed"] = "Expected to fail"
elif part["completed"] == "Unknown":
part["completed"] = completed[stat]
elif stat is None or stat == "unknown":
part["status"] = "passed"
part["output"] = "\n".join(part["loglines"])
@ -96,16 +108,16 @@ def status(line):
match = regex.search(line)
if match:
stat = match.group(0)
stat = "notrun" if stat == "NO-TESTS" else stat
stat = "notrun" if stat == "NO_TESTS" else stat
return stat.lower()
def extract_test_parts(default_name, outputs):
parts = []
part = {}
testdesc = ""
last_time = None
curr_time = None
for line in outputs:
line = line.strip()
if not line:
@ -115,12 +127,16 @@ def extract_test_parts(default_name, outputs):
if skip(line):
continue
# Skipped tests start with "Skipped" and end with "package"
# The spec was explicitly reported as skipped (e.g., installation
# failed, package known to have failing tests, won't test external
# package).
if line.startswith("Skipped") and line.endswith("package"):
stat = "skipped"
part = new_part()
part["command"] = "Not Applicable"
part["completed"] = line
part["completed"] = completed[stat]
part["elapsed"] = 0.0
part["loglines"].append(line)
part["name"] = default_name
part["status"] = "notrun"
parts.append(part)
@ -137,40 +153,53 @@ def extract_test_parts(default_name, outputs):
if msg.startswith("Installing"):
continue
# New command means the start of a new test part
if msg.startswith("'") and msg.endswith("'"):
# TODO (post-34236): Remove this check when remove run_test(),
# TODO (post-34236): etc. since no longer supporting expected
# TODO (post-34236): failures.
if msg.startswith("Expecting return code"):
if part:
part["desc"] += f"; {msg}"
continue
# Terminate without further parsing if no more test messages
if "Completed testing" in msg:
# Process last lingering part IF it didn't generate status
process_part_end(part, curr_time, last_time)
return parts
# New test parts start "test: <name>: <desc>".
if msg.startswith("test: "):
# Update the last part processed
process_part_end(part, curr_time, last_time)
part = new_part()
part["command"] = msg
part["name"] = part_name(msg)
desc = msg.split(":")
part["name"] = desc[1].strip()
part["desc"] = ":".join(desc[2:]).strip()
parts.append(part)
# Save off the optional test description if it was
# tty.debuged *prior to* the command and reset
if testdesc:
part["desc"] = testdesc
testdesc = ""
# There is no guarantee of a 1-to-1 mapping of a test part and
# a (single) command (or executable) since the introduction of
# PR 34236.
#
# Note that tests where the package does not save the output
# (e.g., output=str.split, error=str.split) will not have
# a command printed to the test log.
elif msg.startswith("'") and msg.endswith("'"):
if part:
if part["command"]:
part["command"] += "; " + msg.replace("'", "")
else:
part["command"] = msg.replace("'", "")
else:
part = new_part()
part["command"] = msg.replace("'", "")
else:
# Update the last part processed since a new log message
# means a non-test action
process_part_end(part, curr_time, last_time)
if testdesc:
# We had a test description but no command so treat
# as a new part (e.g., some import tests)
part = new_part()
part["name"] = "_".join(testdesc.split())
part["command"] = "unknown"
part["desc"] = testdesc
parts.append(part)
process_part_end(part, curr_time, curr_time)
# Assuming this is a description for the next test part
testdesc = msg
else:
tty.debug("Did not recognize test output '{0}'".format(line))
@ -197,12 +226,14 @@ def extract_test_parts(default_name, outputs):
# If no parts, create a skeleton to flag that the tests are not run
if not parts:
part = new_part()
stat = "notrun"
part["command"] = "Not Applicable"
stat = "failed" if outputs[0].startswith("Cannot open log") else "notrun"
part["command"] = "unknown"
part["completed"] = completed[stat]
part["elapsed"] = 0.0
part["name"] = default_name
part["status"] = stat
part["output"] = "\n".join(outputs)
parts.append(part)
return parts

View File

@ -1632,7 +1632,9 @@ def root(self):
@property
def package(self):
assert self.concrete, "Spec.package can only be called on concrete specs"
assert self.concrete, "{0}: Spec.package can only be called on concrete specs".format(
self.name
)
if not self._package:
self._package = spack.repo.path.get(self)
return self._package

View File

@ -21,7 +21,7 @@
(["wrong-variant-in-depends-on"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
# This package has a GitHub patch URL without full_index=1
(["invalid-github-patch-url"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
# This package has a stand-alone 'test' method in build-time callbacks
# This package has a stand-alone 'test*' method in build-time callbacks
(["fail-test-audit"], ["PKG-DIRECTIVES", "PKG-PROPERTIES"]),
# This package has no issues
(["mpileaks"], None),

View File

@ -7,6 +7,8 @@
import pytest
from llnl.util.filesystem import touch
import spack.paths
@ -125,6 +127,7 @@ def test_build_time_tests_are_executed_from_default_builder():
@pytest.mark.regression("34518")
@pytest.mark.usefixtures("builder_test_repository", "config", "working_env")
def test_monkey_patching_wrapped_pkg():
"""Confirm 'run_tests' is accessible through wrappers."""
s = spack.spec.Spec("old-style-autotools").concretized()
builder = spack.builder.create(s.package)
assert s.package.run_tests is False
@ -139,12 +142,29 @@ def test_monkey_patching_wrapped_pkg():
@pytest.mark.regression("34440")
@pytest.mark.usefixtures("builder_test_repository", "config", "working_env")
def test_monkey_patching_test_log_file():
"""Confirm 'test_log_file' is accessible through wrappers."""
s = spack.spec.Spec("old-style-autotools").concretized()
builder = spack.builder.create(s.package)
assert s.package.test_log_file is None
assert builder.pkg.test_log_file is None
assert builder.pkg_with_dispatcher.test_log_file is None
s.package.test_log_file = "/some/file"
assert builder.pkg.test_log_file == "/some/file"
assert builder.pkg_with_dispatcher.test_log_file == "/some/file"
s.package.tester.test_log_file = "/some/file"
assert builder.pkg.tester.test_log_file == "/some/file"
assert builder.pkg_with_dispatcher.tester.test_log_file == "/some/file"
# Windows context manager's __exit__ fails with ValueError ("I/O operation
# on closed file").
@pytest.mark.skipif(sys.platform == "win32", reason="Does not run on windows")
def test_install_time_test_callback(tmpdir, config, mock_packages, mock_stage):
"""Confirm able to run stand-alone test as a post-install callback."""
s = spack.spec.Spec("py-test-callback").concretized()
builder = spack.builder.create(s.package)
builder.pkg.run_tests = True
s.package.tester.test_log_file = tmpdir.join("install_test.log")
touch(s.package.tester.test_log_file)
for phase_fn in builder:
phase_fn.execute()
with open(s.package.tester.test_log_file, "r") as f:
results = f.read().replace("\n", " ")
assert "PyTestCallback test" in results

View File

@ -566,8 +566,7 @@ def test_ci_run_standalone_tests_not_installed_cdash(
ci.run_standalone_tests(**args)
out = capfd.readouterr()[0]
# CDash *and* log file output means log file ignored
assert "xml option is ignored" in out
assert "0 passed of 0" in out
assert "xml option is ignored with CDash" in out
# copy test results (though none)
artifacts_dir = tmp_path / "artifacts"
@ -595,9 +594,10 @@ def test_ci_skipped_report(tmpdir, mock_packages, config):
reason = "Testing skip"
handler.report_skipped(spec, tmpdir.strpath, reason=reason)
report = fs.join_path(tmpdir, "{0}_Testing.xml".format(pkg))
expected = "Skipped {0} package".format(pkg)
with open(report, "r") as f:
reports = [name for name in tmpdir.listdir() if str(name).endswith("Testing.xml")]
assert len(reports) == 1
expected = f"Skipped {pkg} package"
with open(reports[0], "r") as f:
have = [0, 0]
for line in f:
if expected in line:

View File

@ -701,6 +701,7 @@ def test_env_with_config(environment_from_manifest):
def test_with_config_bad_include(environment_from_manifest):
"""Confirm missing include paths raise expected exception and error."""
e = environment_from_manifest(
"""
spack:
@ -709,14 +710,10 @@ def test_with_config_bad_include(environment_from_manifest):
- no/such/file.yaml
"""
)
with pytest.raises(spack.config.ConfigFileError) as exc:
with pytest.raises(spack.config.ConfigFileError, match="2 missing include path"):
with e:
e.concretize()
err = str(exc)
assert "missing include" in err
assert "/no/such/directory" in err
assert os.path.join("no", "such", "file.yaml") in err
assert ev.active_environment() is None

View File

@ -1072,11 +1072,18 @@ def test_install_empty_env(
],
)
def test_installation_fail_tests(install_mockery, mock_fetch, name, method):
"""Confirm build-time tests with unknown methods fail."""
output = install("--test=root", "--no-cache", name, fail_on_error=False)
# Check that there is a single test failure reported
assert output.count("TestFailure: 1 test failed") == 1
# Check that the method appears twice: no attribute error and in message
assert output.count(method) == 2
assert output.count("method not implemented") == 1
assert output.count("TestFailure: 1 tests failed") == 1
# Check that the path to the test log file is also output
assert "See test log for details" in output
def test_install_use_buildcache(

View File

@ -41,7 +41,7 @@ def _module_files(module_type, *specs):
["rm", "doesnotexist"], # Try to remove a non existing module
["find", "mpileaks"], # Try to find a module with multiple matches
["find", "doesnotexist"], # Try to find a module with no matches
["find", "--unkown_args"], # Try to give an unknown argument
["find", "--unknown_args"], # Try to give an unknown argument
]
)
def failure_args(request):

View File

@ -85,7 +85,15 @@ def mock_pkg_git_repo(git, tmpdir_factory):
@pytest.fixture(scope="module")
def mock_pkg_names():
repo = spack.repo.path.get_repo("builtin.mock")
names = set(name for name in repo.all_package_names() if not name.startswith("pkg-"))
# Be sure to include virtual packages since packages with stand-alone
# tests may inherit additional tests from the virtuals they provide,
# such as packages that implement `mpi`.
names = set(
name
for name in repo.all_package_names(include_virtuals=True)
if not name.startswith("pkg-")
)
return names

View File

@ -16,6 +16,7 @@
import spack.package_base
import spack.paths
import spack.store
from spack.install_test import TestStatus
from spack.main import SpackCommand
install = SpackCommand("install")
@ -59,15 +60,14 @@ def test_test_dup_alias(
"""Ensure re-using an alias fails with suggestion to change."""
install("libdwarf")
# Run the tests with the alias once
out = spack_test("run", "--alias", "libdwarf", "libdwarf")
assert "Spack test libdwarf" in out
# Run the (no) tests with the alias once
spack_test("run", "--alias", "libdwarf", "libdwarf")
# Try again with the alias but don't let it fail on the error
with capfd.disabled():
out = spack_test("run", "--alias", "libdwarf", "libdwarf", fail_on_error=False)
assert "already exists" in out
assert "already exists" in out and "Try another alias" in out
def test_test_output(
@ -83,51 +83,39 @@ def test_test_output(
# Grab test stage directory contents
testdir = os.path.join(mock_test_stage, stage_files[0])
testdir_files = os.listdir(testdir)
testlogs = [name for name in testdir_files if str(name).endswith("out.txt")]
assert len(testlogs) == 1
# Grab the output from the test log
testlog = list(filter(lambda x: x.endswith("out.txt") and x != "results.txt", testdir_files))
outfile = os.path.join(testdir, testlog[0])
# Grab the output from the test log to confirm expected result
outfile = os.path.join(testdir, testlogs[0])
with open(outfile, "r") as f:
output = f.read()
assert "BEFORE TEST" in output
assert "true: expect command status in [" in output
assert "AFTER TEST" in output
assert "FAILED" not in output
assert "test_print" in output
assert "PASSED" in output
def test_test_output_on_error(
mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
@pytest.mark.parametrize(
"pkg_name,failure", [("test-error", "exited with status 1"), ("test-fail", "not callable")]
)
def test_test_output_fails(
mock_packages,
mock_archive,
mock_fetch,
install_mockery_mutable_config,
mock_test_stage,
pkg_name,
failure,
):
install("test-error")
# capfd interferes with Spack's capturing
with capfd.disabled():
out = spack_test("run", "test-error", fail_on_error=False)
"""Confirm stand-alone test failure with expected outputs."""
install(pkg_name)
out = spack_test("run", pkg_name, fail_on_error=False)
# Confirm package-specific failure is in the output
assert failure in out
# Confirm standard failure tagging AND test log reference also output
assert "TestFailure" in out
assert "Command exited with status 1" in out
def test_test_output_on_failure(
mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
):
install("test-fail")
with capfd.disabled():
out = spack_test("run", "test-fail", fail_on_error=False)
assert "Expected 'not in the output' to match output of `true`" in out
assert "TestFailure" in out
def test_show_log_on_error(
mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config, capfd, mock_test_stage
):
"""Make sure spack prints location of test log on failure."""
install("test-error")
with capfd.disabled():
out = spack_test("run", "test-error", fail_on_error=False)
assert "See test log" in out
assert mock_test_stage in out
assert "See test log for details" in out
@pytest.mark.usefixtures(
@ -136,11 +124,12 @@ def test_show_log_on_error(
@pytest.mark.parametrize(
"pkg_name,msgs",
[
("test-error", ["FAILED: Command exited", "TestFailure"]),
("test-fail", ["FAILED: Expected", "TestFailure"]),
("test-error", ["exited with status 1", "TestFailure"]),
("test-fail", ["not callable", "TestFailure"]),
],
)
def test_junit_output_with_failures(tmpdir, mock_test_stage, pkg_name, msgs):
"""Confirm stand-alone test failure expected outputs in JUnit reporting."""
install(pkg_name)
with tmpdir.as_cwd():
spack_test(
@ -173,6 +162,7 @@ def test_cdash_output_test_error(
mock_test_stage,
capfd,
):
"""Confirm stand-alone test error expected outputs in CDash reporting."""
install("test-error")
with tmpdir.as_cwd():
spack_test(
@ -183,12 +173,10 @@ def test_cdash_output_test_error(
fail_on_error=False,
)
report_dir = tmpdir.join("cdash_reports")
print(tmpdir.listdir())
assert report_dir in tmpdir.listdir()
report_file = report_dir.join("test-error_Testing.xml")
assert report_file in report_dir.listdir()
content = report_file.open().read()
assert "FAILED: Command exited with status 1" in content
reports = [name for name in report_dir.listdir() if str(name).endswith("Testing.xml")]
assert len(reports) == 1
content = reports[0].open().read()
assert "Command exited with status 1" in content
def test_cdash_upload_clean_test(
@ -203,10 +191,12 @@ def test_cdash_upload_clean_test(
with tmpdir.as_cwd():
spack_test("run", "--log-file=cdash_reports", "--log-format=cdash", "printing-package")
report_dir = tmpdir.join("cdash_reports")
assert report_dir in tmpdir.listdir()
report_file = report_dir.join("printing-package_Testing.xml")
assert report_file in report_dir.listdir()
content = report_file.open().read()
reports = [name for name in report_dir.listdir() if str(name).endswith("Testing.xml")]
assert len(reports) == 1
content = reports[0].open().read()
assert "passed" in content
assert "Running test_print" in content, "Expected first command output"
assert "second command" in content, "Expected second command output"
assert "</Test>" in content
assert "<Text>" not in content
@ -226,17 +216,19 @@ def test_test_help_cdash(mock_test_stage):
def test_test_list_all(mock_packages):
"""make sure `spack test list --all` returns all packages with tests"""
"""Confirm `spack test list --all` returns all packages with test methods"""
pkgs = spack_test("list", "--all").strip().split()
assert set(pkgs) == set(
[
"fail-test-audit",
"mpich",
"printing-package",
"py-extension1",
"py-extension2",
"py-test-callback",
"simple-standalone-test",
"test-error",
"test-fail",
"fail-test-audit",
]
)
@ -248,15 +240,6 @@ def test_test_list(mock_packages, mock_archive, mock_fetch, install_mockery_muta
assert pkg_with_tests in output
@pytest.mark.skipif(sys.platform == "win32", reason="Not supported on Windows (yet)")
def test_has_test_method_fails(capsys):
with pytest.raises(SystemExit):
spack.package_base.has_test_method("printing-package")
captured = capsys.readouterr()[1]
assert "is not a class" in captured
def test_read_old_results(mock_packages, mock_test_stage):
"""Take test data generated before the switch to full hash everywhere
and make sure we can still read it in"""
@ -276,7 +259,7 @@ def test_read_old_results(mock_packages, mock_test_stage):
# The results command should still print the old test results
results_output = spack_test("results")
assert "PASSED" in results_output
assert str(TestStatus.PASSED) in results_output
def test_test_results_none(mock_packages, mock_test_stage):
@ -291,15 +274,10 @@ def test_test_results_none(mock_packages, mock_test_stage):
@pytest.mark.parametrize(
"status,expected",
[
("FAILED", "1 failed"),
("NO-TESTS", "1 no-tests"),
("SKIPPED", "1 skipped"),
("PASSED", "1 passed"),
],
"status", [TestStatus.FAILED, TestStatus.NO_TESTS, TestStatus.SKIPPED, TestStatus.PASSED]
)
def test_test_results_status(mock_packages, mock_test_stage, status, expected):
def test_test_results_status(mock_packages, mock_test_stage, status):
"""Confirm 'spack test results' returns expected status."""
name = "trivial"
spec = spack.spec.Spec("trivial-smoke-test").concretized()
suite = spack.install_test.TestSuite([spec], name)
@ -313,11 +291,11 @@ def test_test_results_status(mock_packages, mock_test_stage, status, expected):
args.insert(1, opt)
results = spack_test(*args)
if opt == "--failed" and status != "FAILED":
assert status not in results
if opt == "--failed" and status != TestStatus.FAILED:
assert str(status) not in results
else:
assert status in results
assert expected in results
assert str(status) in results
assert "1 {0}".format(status.lower()) in results
@pytest.mark.regression("35337")

View File

@ -1,6 +1,7 @@
==> Testing package printing-package-1.0-hzgcoow
BEFORE TEST
==> [2022-02-28-20:21:46.510616] test: true: expect command status in [0]
==> [2022-02-28-20:21:46.510937] '/bin/true'
PASSED
AFTER TEST
==> [2022-12-06-20:21:46.550943] test: test_print: Test python print example.
==> [2022-12-06-20:21:46.553219] '/usr/tce/bin/python' '-c' 'print("Running test_print")'
Running test_print
==> [2022-12-06-20:21:46.721077] '/usr/tce/bin/python' '-c' 'print("Running test_print")'
PASSED: test_print
==> [2022-12-06-20:21:46.822608] Completed testing

View File

@ -34,7 +34,7 @@ class Legion(CMakePackage):
homepage = "https://legion.stanford.edu/"
git = "https://github.com/StanfordLegion/legion.git"
maintainers = ['pmccormick', 'streichler']
maintainers('pmccormick', 'streichler')
tags = ['e4s']
version('21.03.0', tag='legion-21.03.0')
version('stable', branch='stable')
@ -355,7 +355,7 @@ class Legion(CMakePackage):
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([join_path('examples', 'local_function_tasks')])
cache_extra_test_sources(self, [join_path('examples', 'local_function_tasks')])
def run_local_function_tasks_test(self):
"""Run stand alone test: local_function_tasks"""

View File

@ -27,8 +27,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
homepage = 'http://www.mfem.org'
git = 'https://github.com/mfem/mfem.git'
maintainers = ['v-dobrev', 'tzanio', 'acfisher',
'goxberry', 'markcmiller86']
maintainers('v-dobrev', 'tzanio', 'acfisher', 'goxberry', 'markcmiller86')
test_requires_compiler = True
@ -815,8 +814,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir,
self.examples_data_dir])
cache_extra_test_sources(self, [self.examples_src_dir, self.examples_data_dir])
def test(self):
test_dir = join_path(

View File

@ -23,6 +23,7 @@
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
spack_times_log,
)
from spack.spec import Spec
@ -243,7 +244,7 @@ def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
spec.package.do_install()
# Ensure dependency directory exists after the installation.
install_times = os.path.join(spec.package.prefix, ".spack", "install_times.json")
install_times = os.path.join(spec.package.prefix, ".spack", spack_times_log)
assert os.path.isfile(install_times)
# Ensure the phases are included
@ -252,7 +253,7 @@ def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
# The order should be maintained
phases = [x["name"] for x in times["phases"]]
assert phases == ["stage", "one", "two", "three", "install"]
assert phases == ["stage", "one", "two", "three", "install", "post-install"]
assert all(isinstance(x["seconds"], float) for x in times["phases"])

View File

@ -1384,3 +1384,32 @@ def test_single_external_implicit_install(install_mockery, explicit_args, is_exp
s.external_path = "/usr"
create_installer([(s, explicit_args)]).install()
assert spack.store.db.get_record(pkg).explicit == is_explicit
@pytest.mark.parametrize("run_tests", [True, False])
def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, run_tests):
"""Confirm printing of install log skipped if not run/no failures."""
name = "trivial-install-test-package"
s = spack.spec.Spec(name).concretized()
pkg = s.package
pkg.run_tests = run_tests
spack.installer.print_install_test_log(pkg)
out = capfd.readouterr()[0]
assert out == ""
def test_print_install_test_log_missing(
tmpdir, install_mockery, mock_packages, ensure_debug, capfd
):
"""Confirm expected error on attempt to print missing test log file."""
name = "trivial-install-test-package"
s = spack.spec.Spec(name).concretized()
pkg = s.package
pkg.run_tests = True
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
pkg.tester.add_failure(AssertionError("test"), "test-failure")
spack.installer.print_install_test_log(pkg)
err = capfd.readouterr()[1]
assert "no test log file" in err

View File

@ -17,8 +17,11 @@
import llnl.util.filesystem as fs
import spack.install_test
import spack.package_base
import spack.repo
from spack.build_systems.generic import Package
from spack.installer import InstallError
@pytest.fixture(scope="module")
@ -117,14 +120,14 @@ def test_possible_dependencies_with_multiple_classes(mock_packages, mpileaks_pos
assert expected == spack.package_base.possible_dependencies(*pkgs)
def setup_install_test(source_paths, install_test_root):
def setup_install_test(source_paths, test_root):
"""
Set up the install test by creating sources and install test roots.
The convention used here is to create an empty file if the path name
ends with an extension otherwise, a directory is created.
"""
fs.mkdirp(install_test_root)
fs.mkdirp(test_root)
for path in source_paths:
if os.path.splitext(path)[1]:
fs.touchp(path)
@ -159,10 +162,11 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
"""Test the package's cache extra test sources helper function."""
s = spack.spec.Spec(spec).concretized()
s.package.spec.concretize()
source_path = s.package.stage.source_path
source_path = s.package.stage.source_path
srcs = [fs.join_path(source_path, src) for src in sources]
setup_install_test(srcs, s.package.install_test_root)
test_root = spack.install_test.install_test_root(s.package)
setup_install_test(srcs, test_root)
emsg_dir = "Expected {0} to be a directory"
emsg_file = "Expected {0} to be a file"
@ -173,10 +177,10 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
else:
assert os.path.isdir(src), emsg_dir.format(src)
s.package.cache_extra_test_sources(extras)
spack.install_test.cache_extra_test_sources(s.package, extras)
src_dests = [fs.join_path(s.package.install_test_root, src) for src in sources]
exp_dests = [fs.join_path(s.package.install_test_root, e) for e in expect]
src_dests = [fs.join_path(test_root, src) for src in sources]
exp_dests = [fs.join_path(test_root, e) for e in expect]
poss_dests = set(src_dests) | set(exp_dests)
msg = "Expected {0} to{1} exist"
@ -192,3 +196,146 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect):
# Perform a little cleanup
shutil.rmtree(os.path.dirname(source_path))
def test_cache_extra_sources_fails(install_mockery):
s = spack.spec.Spec("a").concretized()
s.package.spec.concretize()
with pytest.raises(InstallError) as exc_info:
spack.install_test.cache_extra_test_sources(s.package, ["/a/b", "no-such-file"])
errors = str(exc_info.value)
assert "'/a/b') must be relative" in errors
assert "'no-such-file') for the copy does not exist" in errors
def test_package_exes_and_libs():
with pytest.raises(spack.error.SpackError, match="defines both"):
class BadDetectablePackage(spack.package.Package):
executables = ["findme"]
libraries = ["libFindMe.a"]
def test_package_url_and_urls():
class URLsPackage(spack.package.Package):
url = "https://www.example.com/url-package-1.0.tgz"
urls = ["https://www.example.com/archive"]
s = spack.spec.Spec("a")
with pytest.raises(ValueError, match="defines both"):
URLsPackage(s)
def test_package_license():
class LicensedPackage(spack.package.Package):
extendees = None # currently a required attribute for is_extension()
license_files = None
s = spack.spec.Spec("a")
pkg = LicensedPackage(s)
assert pkg.global_license_file is None
pkg.license_files = ["license.txt"]
assert os.path.basename(pkg.global_license_file) == pkg.license_files[0]
class BaseTestPackage(Package):
extendees = None # currently a required attribute for is_extension()
def test_package_version_fails():
s = spack.spec.Spec("a")
pkg = BaseTestPackage(s)
with pytest.raises(ValueError, match="does not have a concrete version"):
pkg.version()
def test_package_tester_fails():
s = spack.spec.Spec("a")
pkg = BaseTestPackage(s)
with pytest.raises(ValueError, match="without concrete version"):
pkg.tester()
def test_package_fetcher_fails():
s = spack.spec.Spec("a")
pkg = BaseTestPackage(s)
with pytest.raises(ValueError, match="without concrete version"):
pkg.fetcher
def test_package_no_extendees():
s = spack.spec.Spec("a")
pkg = BaseTestPackage(s)
assert pkg.extendee_args is None
def test_package_test_no_compilers(mock_packages, monkeypatch, capfd):
def compilers(compiler, arch_spec):
return None
monkeypatch.setattr(spack.compilers, "compilers_for_spec", compilers)
s = spack.spec.Spec("a")
pkg = BaseTestPackage(s)
pkg.test_requires_compiler = True
pkg.do_test()
error = capfd.readouterr()[1]
assert "Skipping tests for package" in error
assert "test requires missing compiler" in error
# TODO (post-34236): Remove when remove deprecated run_test(), etc.
@pytest.mark.parametrize(
"msg,installed,purpose,expected",
[
("do-nothing", False, "test: echo", "do-nothing"),
("not installed", True, "test: echo not installed", "expected in prefix"),
],
)
def test_package_run_test_install(
install_mockery_mutable_config, mock_fetch, capfd, msg, installed, purpose, expected
):
"""Confirm expected outputs from run_test for installed/not installed exe."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
pkg.run_test(
"echo", msg, expected=[expected], installed=installed, purpose=purpose, work_dir="."
)
output = capfd.readouterr()[0]
assert expected in output
# TODO (post-34236): Remove when remove deprecated run_test(), etc.
@pytest.mark.parametrize(
"skip,failures,status",
[
(True, 0, str(spack.install_test.TestStatus.SKIPPED)),
(False, 1, str(spack.install_test.TestStatus.FAILED)),
],
)
def test_package_run_test_missing(
install_mockery_mutable_config, mock_fetch, capfd, skip, failures, status
):
"""Confirm expected results from run_test for missing exe when skip or not."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
pkg.run_test("no-possible-program", skip_missing=skip)
output = capfd.readouterr()[0]
assert len(pkg.tester.test_failures) == failures
assert status in output
# TODO (post-34236): Remove when remove deprecated run_test(), etc.
def test_package_run_test_fail_fast(install_mockery_mutable_config, mock_fetch):
"""Confirm expected exception when run_test with fail_fast enabled."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
with spack.config.override("config:fail_fast", True):
with pytest.raises(spack.install_test.TestFailure, match="Failed to find executable"):
pkg.run_test("no-possible-program")

View File

@ -312,14 +312,6 @@ def test_fetch_options(version_str, digest_end, extra_options):
assert fetcher.extra_options == extra_options
def test_has_test_method_fails(capsys):
with pytest.raises(SystemExit):
spack.package_base.has_test_method("printing-package")
captured = capsys.readouterr()[1]
assert "is not a class" in captured
def test_package_deprecated_version(mock_packages, mock_fetch, mock_stage):
spec = Spec("deprecated-versions")
pkg_cls = spack.repo.path.get_pkg_class(spec.name)

View File

@ -152,3 +152,18 @@ def test_repo_path_handles_package_removal(tmpdir, mock_packages):
with spack.repo.use_repositories(builder.root, override=False) as repos:
r = repos.repo_for_pkg("c")
assert r.namespace == "builtin.mock"
def test_repo_dump_virtuals(tmpdir, mutable_mock_repo, mock_packages, ensure_debug, capsys):
# Start with a package-less virtual
vspec = spack.spec.Spec("something")
mutable_mock_repo.dump_provenance(vspec, tmpdir)
captured = capsys.readouterr()[1]
assert "does not have a package" in captured
# Now with a virtual with a package
vspec = spack.spec.Spec("externalvirtual")
mutable_mock_repo.dump_provenance(vspec, tmpdir)
captured = capsys.readouterr()[1]
assert "Installing" in captured
assert "package.py" in os.listdir(tmpdir), "Expected the virtual's package to be copied"

View File

@ -2,6 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import llnl.util.filesystem as fs
@ -9,10 +11,12 @@
import spack.reporters.extract
import spack.spec
from spack.install_test import TestStatus
from spack.reporters import CDash, CDashConfiguration
# Use a path variable to appease Spack style line length checks
fake_install_prefix = fs.join_path(
os.sep,
"usr",
"spack",
"spack",
@ -28,17 +32,41 @@
)
def test_reporters_extract_basics():
# This test has a description, command, and status
fake_bin = fs.join_path(fake_install_prefix, "bin", "fake")
name = "test_no_status"
desc = "basic description"
status = TestStatus.PASSED
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165] test: {0}: {1}
==> [2022-02-15-18:44:21.250200] '{2}'
{3}: {0}
""".format(
name, desc, fake_bin, status
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 1
assert parts[0]["command"] == "{0}".format(fake_bin)
assert parts[0]["desc"] == desc
assert parts[0]["loglines"] == ["{0}: {1}".format(status, name)]
assert parts[0]["status"] == status.lower()
def test_reporters_extract_no_parts(capfd):
# This test ticks three boxes:
# 1) has Installing, which is skipped;
# 2) does not define any test parts;
# 3) has a status value without a part so generates a warning
status = TestStatus.NO_TESTS
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-11-17:14:38.875259] Installing {0} to {1}
NO-TESTS
{2}
""".format(
fake_install_test_root, fake_test_cache
fake_install_test_root, fake_test_cache, status
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
@ -49,61 +77,67 @@ def test_reporters_extract_no_parts(capfd):
assert "No part to add status" in err
def test_reporters_extract_no_command():
# This test ticks 2 boxes:
# 1) has a test description with no command or status
# 2) has a test description, command, and status
fake_bin = fs.join_path(fake_install_prefix, "bin", "fake")
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165] command with no status
==> [2022-02-15-18:44:21.250175] running test program
==> [2022-02-15-18:44:21.250200] '{0}'
PASSED
""".format(
fake_bin
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 2
assert parts[0]["command"] == "unknown"
assert parts[1]["loglines"] == ["PASSED"]
assert parts[1]["elapsed"] == 0.0
def test_reporters_extract_missing_desc():
# This test parts with and without descriptions *and* a test part that has
# multiple commands
fake_bin = fs.join_path(fake_install_prefix, "bin", "importer")
names = ["test_fake_bin", "test_fake_util", "test_multiple_commands"]
descs = ["", "import fake util module", ""]
failed = TestStatus.FAILED
passed = TestStatus.PASSED
results = [passed, failed, passed]
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165] '{0}' '-c' 'import fake.bin'
PASSED
==> [2022-02-15-18:44:21.250200] '{0}' '-c' 'import fake.util'
PASSED
==> [2022-02-15-18:44:21.250165] test: {0}: {1}
==> [2022-02-15-18:44:21.250170] '{5}' '-c' 'import fake.bin'
{2}: {0}
==> [2022-02-15-18:44:21.250185] test: {3}: {4}
==> [2022-02-15-18:44:21.250200] '{5}' '-c' 'import fake.util'
{6}: {3}
==> [2022-02-15-18:44:21.250205] test: {7}: {8}
==> [2022-02-15-18:44:21.250210] 'exe1 1'
==> [2022-02-15-18:44:21.250250] 'exe2 2'
{9}: {7}
""".format(
fake_bin
names[0],
descs[0],
results[0],
names[1],
descs[1],
fake_bin,
results[1],
names[2],
descs[2],
results[2],
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 2
assert parts[0]["desc"] is None
assert parts[1]["desc"] is None
assert len(parts) == 3
for i, (name, desc, status) in enumerate(zip(names, descs, results)):
assert parts[i]["name"] == name
assert parts[i]["desc"] == desc
assert parts[i]["status"] == status.lower()
assert parts[2]["command"] == "exe1 1; exe2 2"
# TODO (post-34236): Remove this test when removing deprecated run_test(), etc.
def test_reporters_extract_xfail():
fake_bin = fs.join_path(fake_install_prefix, "bin", "fake-app")
outputs = """
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165] Expecting return code in [3]
==> [2022-02-15-18:44:21.250165] test: test_fake: Checking fake imports
==> [2022-02-15-18:44:21.250175] Expecting return code in [3]
==> [2022-02-15-18:44:21.250200] '{0}'
PASSED
{1}
""".format(
fake_bin
fake_bin, str(TestStatus.PASSED)
).splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 1
parts[0]["command"] == fake_bin
parts[0]["completed"] == "Expected to fail"
@ -123,6 +157,7 @@ def test_reporters_extract_skipped(state):
parts[0]["completed"] == expected
# TODO (post-34236): Remove this test when removing deprecated run_test(), etc.
def test_reporters_skip():
# This test ticks 3 boxes:
# 1) covers an as yet uncovered skip messages
@ -134,7 +169,7 @@ def test_reporters_skip():
==> Testing package fake-1.0-abcdefg
==> [2022-02-15-18:44:21.250165, 123456] Detected the following modules: fake1
==> {0}
==> [2022-02-15-18:44:21.250175, 123456] running fake program
==> [2022-02-15-18:44:21.250175, 123456] test: test_fake: running fake program
==> [2022-02-15-18:44:21.250200, 123456] '{1}'
INVALID
Results for test suite abcdefghijklmn
@ -150,6 +185,27 @@ def test_reporters_skip():
assert parts[0]["elapsed"] == 0.0
def test_reporters_skip_new():
outputs = """
==> [2023-04-06-15:55:13.094025] test: test_skip:
SKIPPED: test_skip: Package must be built with +python
==> [2023-04-06-15:55:13.540029] Completed testing
==> [2023-04-06-15:55:13.540275]
======================= SUMMARY: fake-1.0-abcdefg ========================
fake::test_skip .. SKIPPED
=========================== 1 skipped of 1 part ==========================
""".splitlines()
parts = spack.reporters.extract.extract_test_parts("fake", outputs)
assert len(parts) == 1
part = parts[0]
assert part["name"] == "test_skip"
assert part["status"] == "skipped"
assert part["completed"] == "Completed"
assert part["loglines"][0].startswith("SKIPPED:")
def test_reporters_report_for_package_no_stdout(tmpdir, monkeypatch, capfd):
class MockCDash(CDash):
def upload(*args, **kwargs):

View File

@ -2,13 +2,17 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import os
import sys
import pytest
from llnl.util.filesystem import join_path, mkdirp, touch
import spack.install_test
import spack.spec
from spack.util.executable import which
def _true(*args, **kwargs):
@ -28,7 +32,7 @@ def ensure_results(filename, expected):
assert have
def test_test_log_pathname(mock_packages, config):
def test_test_log_name(mock_packages, config):
"""Ensure test log path is reasonable."""
spec = spack.spec.Spec("libdwarf").concretized()
@ -87,7 +91,10 @@ def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage):
@pytest.mark.parametrize(
"arguments,status,msg",
[({}, "SKIPPED", "Skipped"), ({"externals": True}, "NO-TESTS", "No tests")],
[
({}, spack.install_test.TestStatus.SKIPPED, "Skipped"),
({"externals": True}, spack.install_test.TestStatus.NO_TESTS, "No tests"),
],
)
def test_test_external(
mock_packages, install_mockery, mock_test_stage, monkeypatch, arguments, status, msg
@ -101,7 +108,7 @@ def test_test_external(
test_suite = spack.install_test.TestSuite([spec])
test_suite(**arguments)
ensure_results(test_suite.results_file, status)
ensure_results(test_suite.results_file, str(status))
if arguments:
ensure_results(test_suite.log_file_for_spec(spec), msg)
@ -181,3 +188,252 @@ def add_suite(package):
with pytest.raises(spack.install_test.TestSuiteNameError) as exc_info:
spack.install_test.get_test_suite(name)
assert "many suites named" in str(exc_info)
@pytest.mark.parametrize(
"virtuals,expected",
[(False, ["Mpich.test_mpich"]), (True, ["Mpi.test_hello", "Mpich.test_mpich"])],
)
def test_test_function_names(mock_packages, install_mockery, virtuals, expected):
"""Confirm test_function_names works as expected with/without virtuals."""
spec = spack.spec.Spec("mpich").concretized()
tests = spack.install_test.test_function_names(spec.package, add_virtuals=virtuals)
assert sorted(tests) == sorted(expected)
def test_test_functions_fails():
"""Confirm test_functions raises error if no package."""
with pytest.raises(ValueError, match="Expected a package"):
spack.install_test.test_functions(str)
def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, capsys):
"""Confirm works for package providing a package-less virtual."""
spec = spack.spec.Spec("simple-standalone-test").concretized()
fns = spack.install_test.test_functions(spec.package, add_virtuals=True)
out = capsys.readouterr()
assert len(fns) == 1, "Expected only one test function"
assert "does not appear to have a package file" in out[1]
# TODO: This test should go away when compilers as dependencies is supported
def test_test_virtuals():
"""Confirm virtuals picks up non-unique, provided compilers."""
# This is an unrealistic case but it is set up to retrieve all possible
# virtual names in a single call.
def satisfies(spec):
return True
# Ensure spec will pick up the llvm+clang virtual compiler package names.
VirtualSpec = collections.namedtuple("VirtualSpec", ["name", "satisfies"])
vspec = VirtualSpec("llvm", satisfies)
# Ensure the package name is in the list that provides c, cxx, and fortran
# to pick up the three associated compilers and that virtuals provided will
# be deduped.
MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "virtuals_provided"])
pkg = MyPackage("gcc", vspec, [vspec, vspec])
# This check assumes the method will not provide a unique set of compilers
v_names = spack.install_test.virtuals(pkg)
for name, number in [("c", 2), ("cxx", 2), ("fortran", 1), ("llvm", 1)]:
assert v_names.count(name) == number, "Expected {0} of '{1}'".format(number, name)
def test_package_copy_test_files_fails(mock_packages):
"""Confirm copy_test_files fails as expected without package or test_suite."""
vspec = spack.spec.Spec("something")
# Try without a package
with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
spack.install_test.copy_test_files(None, vspec)
assert "without a package" in str(exc_info)
# Try with a package without a test suite
MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "test_suite"])
pkg = MyPackage("SomePackage", vspec, None)
with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
spack.install_test.copy_test_files(pkg, vspec)
assert "test suite is missing" in str(exc_info)
def test_package_copy_test_files_skips(mock_packages, ensure_debug, capsys):
"""Confirm copy_test_files errors as expected if no package class found."""
# Try with a non-concrete spec and package with a test suite
MockSuite = collections.namedtuple("TestSuite", ["specs"])
MyPackage = collections.namedtuple("MyPackage", ["name", "spec", "test_suite"])
vspec = spack.spec.Spec("something")
pkg = MyPackage("SomePackage", vspec, MockSuite([]))
spack.install_test.copy_test_files(pkg, vspec)
out = capsys.readouterr()[1]
assert "skipping test data copy" in out
assert "no package class found" in out
def test_process_test_parts(mock_packages):
"""Confirm process_test_parts fails as expected without package or test_suite."""
# Try without a package
with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
spack.install_test.process_test_parts(None, [])
assert "without a package" in str(exc_info)
# Try with a package without a test suite
MyPackage = collections.namedtuple("MyPackage", ["name", "test_suite"])
pkg = MyPackage("SomePackage", None)
with pytest.raises(spack.install_test.TestSuiteError) as exc_info:
spack.install_test.process_test_parts(pkg, [])
assert "test suite is missing" in str(exc_info)
def test_test_part_fail(tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage):
"""Confirm test_part with a ProcessError results in FAILED status."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
touch(pkg.tester.test_log_file)
name = "test_fail"
with spack.install_test.test_part(pkg, name, "fake ProcessError"):
raise spack.util.executable.ProcessError("Mock failure")
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED
def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_stage):
"""Confirm test_part that succeeds results in PASSED status."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
name = "test_echo"
msg = "nothing"
with spack.install_test.test_part(pkg, name, "echo"):
echo = which("echo")
echo(msg)
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.PASSED
def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_stage):
"""Confirm test_part that raises SkipTest results in test status SKIPPED."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
name = "test_skip"
with spack.install_test.test_part(pkg, name, "raise SkipTest"):
raise spack.install_test.SkipTest("Skipping the test")
for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.SKIPPED
def test_test_part_missing_exe_fail_fast(
tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage
):
"""Confirm test_part with fail fast enabled raises exception."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
touch(pkg.tester.test_log_file)
name = "test_fail_fast"
with spack.config.override("config:fail_fast", True):
with pytest.raises(spack.install_test.TestFailure, match="object is not callable"):
with spack.install_test.test_part(pkg, name, "fail fast"):
missing = which("no-possible-program")
missing()
test_parts = pkg.tester.test_parts
assert len(test_parts) == 1
for part_name, status in test_parts.items():
assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED
def test_test_part_missing_exe(
tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage
):
"""Confirm test_part with missing executable fails."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
touch(pkg.tester.test_log_file)
name = "test_missing_exe"
with spack.install_test.test_part(pkg, name, "missing exe"):
missing = which("no-possible-program")
missing()
test_parts = pkg.tester.test_parts
assert len(test_parts) == 1
for part_name, status in test_parts.items():
assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED
def test_check_special_outputs(tmpdir):
"""This test covers two related helper methods"""
contents = """CREATE TABLE packages (
name varchar(80) primary key,
has_code integer,
url varchar(160));
INSERT INTO packages VALUES('sqlite',1,'https://www.sqlite.org');
INSERT INTO packages VALUES('readline',1,'https://tiswww.case.edu/php/chet/readline/rltop.html');
INSERT INTO packages VALUES('xsdk',0,'http://xsdk.info');
COMMIT;
"""
filename = tmpdir.join("special.txt")
with open(filename, "w") as f:
f.write(contents)
expected = spack.install_test.get_escaped_text_output(filename)
spack.install_test.check_outputs(expected, contents)
# Let's also cover case where something expected is NOT in the output
expected.append("should not find me")
with pytest.raises(RuntimeError, match="Expected"):
spack.install_test.check_outputs(expected, contents)
def test_find_required_file(tmpdir):
filename = "myexe"
dirs = ["a", "b"]
for d in dirs:
path = tmpdir.join(d)
mkdirp(path)
touch(join_path(path, filename))
path = join_path(tmpdir.join("c"), "d")
mkdirp(path)
touch(join_path(path, filename))
# First just find a single path
results = spack.install_test.find_required_file(
tmpdir.join("c"), filename, expected=1, recursive=True
)
assert isinstance(results, str)
# Ensure none file if do not recursively search that directory
with pytest.raises(spack.install_test.SkipTest, match="Expected 1"):
spack.install_test.find_required_file(
tmpdir.join("c"), filename, expected=1, recursive=False
)
# Now make sure we get all of the files
results = spack.install_test.find_required_file(tmpdir, filename, expected=3, recursive=True)
assert isinstance(results, list) and len(results) == 3
def test_packagetest_fails(mock_packages):
MyPackage = collections.namedtuple("MyPackage", ["spec"])
s = spack.spec.Spec("a")
pkg = MyPackage(s)
with pytest.raises(ValueError, match="require a concrete package"):
spack.install_test.PackageTest(pkg)

View File

@ -337,15 +337,15 @@ def test_remove_complex_package_logic_filtered():
("grads", "rrlmwml3f2frdnqavmro3ias66h5b2ce"),
("llvm", "nufffum5dabmaf4l5tpfcblnbfjknvd3"),
# has @when("@4.1.0") and raw unicode literals
("mfem", "tiiv7uq7v2xtv24vdij5ptcv76dpazrw"),
("mfem@4.0.0", "tiiv7uq7v2xtv24vdij5ptcv76dpazrw"),
("mfem@4.1.0", "gxastq64to74qt4he4knpyjfdhh5auel"),
("mfem", "qtneutm6khd6epd2rhyuv2y6zavsxbed"),
("mfem@4.0.0", "qtneutm6khd6epd2rhyuv2y6zavsxbed"),
("mfem@4.1.0", "uit2ydzhra3b2mlvnq262qlrqqmuwq3d"),
# has @when("@1.5.0:")
("py-torch", "qs7djgqn7dy7r3ps4g7hv2pjvjk4qkhd"),
("py-torch@1.0", "qs7djgqn7dy7r3ps4g7hv2pjvjk4qkhd"),
("py-torch@1.6", "p4ine4hc6f2ik2f2wyuwieslqbozll5w"),
# has a print with multiple arguments
("legion", "zdpawm4avw3fllxcutvmqb5c3bj5twqt"),
("legion", "sffy6vz3dusxnxeetofoomlaieukygoj"),
# has nested `with when()` blocks and loops
("trilinos", "vqrgscjrla4hi7bllink7v6v6dwxgc2p"),
],

View File

@ -14,6 +14,7 @@
import spack.store
import spack.util.file_permissions as fp
import spack.util.spack_json as sjson
from spack.package_base import spack_times_log
def compute_hash(path: str, block_size: int = 1048576) -> str:
@ -161,6 +162,10 @@ def check_spec_manifest(spec):
if path == manifest_file:
continue
# Do not check the install times log file.
if entry == spack_times_log:
continue
data = manifest.pop(path, {})
results += check_entry(path, data)

View File

@ -14,8 +14,8 @@ class FailTestAudit(MakefilePackage):
version("1.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="abcdef0123456789abcdef0123456789")
build_time_test_callbacks = ["test"]
build_time_test_callbacks = ["test_build_callbacks"]
def test(self):
print("test: test-install-callbacks")
print("PASSED")
def test_build_callbacks(self):
"""test build time test callbacks"""
print("test-build-callbacks")

View File

@ -0,0 +1,16 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Mpi(Package):
"""Virtual package for the Message Passing Interface."""
homepage = "https://www.mpi-forum.org/"
virtual = True
def test_hello(self):
print("Hello there!")

View File

@ -28,3 +28,6 @@ class Mpich(Package):
def install(self, spec, prefix):
touch(prefix.mpich)
def test_mpich(self):
print("Testing mpich")

View File

@ -26,7 +26,8 @@ def install(self, spec, prefix):
print("AFTER INSTALL")
def test(self):
print("BEFORE TEST")
self.run_test("true") # run /bin/true
print("AFTER TEST")
def test_print(self):
"""Test print example."""
print("Running test_print")
print("And a second command")

View File

@ -0,0 +1,29 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.pkg.builtin.mock.python as mp
from spack.package import *
class PyTestCallback(mp.Python):
"""A package for testing stand-alone test methods as a callback."""
homepage = "http://www.example.com"
url = "http://www.example.com/test-callback-1.0.tar.gz"
# TODO (post-34236): "test" -> "test_callback" once remove "test" support
install_time_test_callbacks = ["test"]
version("1.0", "00000000000000000000000000000110")
version("2.0", "00000000000000000000000000000120")
def install(self, spec, prefix):
mkdirp(prefix.bin)
# TODO (post-34236): "test" -> "test_callback" once remove "test" support
def test(self):
super(PyTestCallback, self).test()
print("PyTestCallback test")

View File

@ -14,6 +14,9 @@ class SimpleStandaloneTest(Package):
version("1.0", md5="0123456789abcdef0123456789abcdef")
def test(self):
msg = "simple stand-alone test"
self.run_test("echo", [msg], expected=[msg], purpose="test: running {0}".format(msg))
provides("standalone-test")
def test_echo(self):
"""simple stand-alone test"""
echo = which("echo")
echo("testing echo", output=str.split, error=str.split)

View File

@ -17,5 +17,7 @@ class TestError(Package):
def install(self, spec, prefix):
mkdirp(prefix.bin)
def test(self):
self.run_test("false")
def test_false(self):
"""TestError test"""
false = which("false")
false()

View File

@ -17,5 +17,7 @@ class TestFail(Package):
def install(self, spec, prefix):
mkdirp(prefix.bin)
def test(self):
self.run_test("true", expected=["not in the output"])
def test_fails(self):
"""trigger test failure"""
unknown = which("unknown-program")
unknown()