WIP -- wait for 18205 to continue

This commit is contained in:
Gregory Becker
2020-10-21 18:37:21 -07:00
parent 1452020f22
commit afe1fd89b9
19 changed files with 253 additions and 179 deletions

View File

@@ -175,7 +175,7 @@ In the ``perl`` package, we can see:
@run_after('build')
@on_package_attributes(run_tests=True)
def test(self):
def build_test(self):
make('test')
As you can guess, this runs ``make test`` *after* building the package,

View File

@@ -56,7 +56,7 @@ overridden like so:
.. code-block:: python
def test(self):
def build_test(self):
scons('check')

View File

@@ -324,7 +324,8 @@ class log_output(object):
work within test frameworks like nose and pytest.
"""
def __init__(self, file_like=None, echo=False, debug=0, buffer=False):
def __init__(self, file_like=None, output=None, error=None,
echo=False, debug=0, buffer=False):
"""Create a new output log context manager.
Args:
@@ -349,13 +350,15 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False):
"""
self.file_like = file_like
self.output = output or sys.stdout
self.error = error or sys.stderr
self.echo = echo
self.debug = debug
self.buffer = buffer
self._active = False # used to prevent re-entry
def __call__(self, file_like=None, echo=None, debug=None, buffer=None):
def __call__(self, file_like=None, output=None, error=None,
echo=None, debug=None, buffer=None):
"""This behaves the same as init. It allows a logger to be reused.
Arguments are the same as for ``__init__()``. Args here take
@@ -376,6 +379,10 @@ def __call__(self, file_like=None, echo=None, debug=None, buffer=None):
"""
if file_like is not None:
self.file_like = file_like
if output is not None:
self.output = output
if error is not None:
self.error = error
if echo is not None:
self.echo = echo
if debug is not None:
@@ -434,8 +441,8 @@ def __enter__(self):
self.process = fork_context.Process(
target=_writer_daemon,
args=(
input_stream, read_fd, write_fd, self.echo, self.log_file,
child_pipe
input_stream, read_fd, write_fd, self.echo, self.output,
self.log_file, child_pipe
)
)
self.process.daemon = True # must set before start()
@@ -448,43 +455,54 @@ def __enter__(self):
# Flush immediately before redirecting so that anything buffered
# goes to the original stream
sys.stdout.flush()
sys.stderr.flush()
self.output.flush()
self.error.flush()
# sys.stdout.flush()
# sys.stderr.flush()
# Now do the actual output rediction.
self.use_fds = _file_descriptors_work(sys.stdout, sys.stderr)
self.use_fds = _file_descriptors_work(self.output, self.error)#sys.stdout, sys.stderr)
if self.use_fds:
# We try first to use OS-level file descriptors, as this
# redirects output for subprocesses and system calls.
# Save old stdout and stderr file descriptors
self._saved_stdout = os.dup(sys.stdout.fileno())
self._saved_stderr = os.dup(sys.stderr.fileno())
self._saved_output = os.dup(self.output.fileno())
self._saved_error = os.dup(self.error.fileno())
# self._saved_stdout = os.dup(sys.stdout.fileno())
# self._saved_stderr = os.dup(sys.stderr.fileno())
# redirect to the pipe we created above
os.dup2(write_fd, sys.stdout.fileno())
os.dup2(write_fd, sys.stderr.fileno())
os.dup2(write_fd, self.output.fileno())
os.dup2(write_fd, self.error.fileno())
# os.dup2(write_fd, sys.stdout.fileno())
# os.dup2(write_fd, sys.stderr.fileno())
os.close(write_fd)
else:
# Handle I/O the Python way. This won't redirect lower-level
# output, but it's the best we can do, and the caller
# shouldn't expect any better, since *they* have apparently
# redirected I/O the Python way.
# Save old stdout and stderr file objects
self._saved_stdout = sys.stdout
self._saved_stderr = sys.stderr
self._saved_output = self.output
self._saved_error = self.error
# self._saved_stdout = sys.stdout
# self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(write_fd, 'w')
sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out
self.output = pipe_fd_out
self.error = pipe_fd_out
# sys.stdout = pipe_fd_out
# sys.stderr = pipe_fd_out
# Unbuffer stdout and stderr at the Python level
if not self.buffer:
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
self.output = Unbuffered(self.output)
self.error = Unbuffered(self.error)
# sys.stdout = Unbuffered(sys.stdout)
# sys.stderr = Unbuffered(sys.stderr)
# Force color and debug settings now that we have redirected.
tty.color.set_color_when(forced_color)
@@ -499,20 +517,29 @@ def __enter__(self):
def __exit__(self, exc_type, exc_val, exc_tb):
# Flush any buffered output to the logger daemon.
sys.stdout.flush()
sys.stderr.flush()
self.output.flush()
self.error.flush()
# sys.stdout.flush()
# sys.stderr.flush()
# restore previous output settings, either the low-level way or
# the python way
if self.use_fds:
os.dup2(self._saved_stdout, sys.stdout.fileno())
os.close(self._saved_stdout)
os.dup2(self._saved_output, self.output.fileno())
os.close(self._saved_output)
os.dup2(self._saved_stderr, sys.stderr.fileno())
os.close(self._saved_stderr)
os.dup2(self._saved_error, self.error.fileno())
os.close(self._saved_error)
# os.dup2(self._saved_stdout, sys.stdout.fileno())
# os.close(self._saved_stdout)
# os.dup2(self._saved_stderr, sys.stderr.fileno())
# os.close(self._saved_stderr)
else:
sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr
self.output = self._saved_output
self.error = self._saved_error
# sys.stdout = self._saved_stdout
# sys.stderr = self._saved_stderr
# print log contents in parent if needed.
if self.write_log_in_parent:
@@ -546,16 +573,17 @@ def force_echo(self):
# output. We us these control characters rather than, say, a
# separate pipe, because they're in-band and assured to appear
# exactly before and after the text we want to echo.
sys.stdout.write(xon)
sys.stdout.flush()
self.output.write(xon)
self.output.flush()
try:
yield
finally:
sys.stdout.write(xoff)
sys.stdout.flush()
self.output.write(xoff)
self.output.flush()
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
def _writer_daemon(stdin, read_fd, write_fd, echo, echo_stream, log_file,
control_pipe):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
@@ -598,6 +626,7 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
immediately closed by the writer daemon)
echo (bool): initial echo setting -- controlled by user and
preserved across multiple writer daemons
echo_stream (stream): output to echo to when echoing
log_file (file-like): file to log all output
control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent
@@ -652,8 +681,8 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
# Echo to stdout if requested or forced.
if echo or force_echo:
sys.stdout.write(line)
sys.stdout.flush()
echo_stream.write(line)
echo_stream.flush()
# Stripped output to log file.
log_file.write(_strip(line))

View File

@@ -365,6 +365,7 @@ def _proper_compiler_style(cspec, aspec):
compilers = spack.compilers.compilers_for_spec(
cspec, arch_spec=aspec
)
# If the spec passed as argument is concrete we want to check
# the versions match exactly
if (cspec.concrete and compilers and
@@ -454,7 +455,7 @@ def concretize_compiler_flags(self, spec):
# continue. `return True` here to force concretization to keep
# running.
return True
raise Exception
compiler_match = lambda other: (
spec.compiler == other.compiler and
spec.architecture == other.architecture)

View File

@@ -1133,7 +1133,7 @@ def build_process():
# Spawn a daemon that reads from a pipe and redirects
# everything to log_path
with log_output(pkg.log_path, echo, True) as logger:
with log_output(pkg.log_path, echo=echo, debug=True) as logger:
for phase_name, phase_attr in zip(
pkg.phases, pkg._InstallPhase_phases):

View File

@@ -1720,15 +1720,72 @@ def test_process():
# maybe enforce this later
shutil.copytree(data_source, data_dir)
# Get all methods named test_* from package
d = spec_pkg.__class__.__dict__
test_fns = [
fn for name, fn in d.items()
if (name == 'test' or name.startswith('test_'))
and hasattr(fn, '__call__')
]
# grab the function for each method so we can call
# it with this package in place of its `self`
# object
test_fn = spec_pkg.__class__.test
if not isinstance(test_fn, types.FunctionType):
test_fn = test_fn.__func__
test_fns = list(map(
lambda x: x.__func__ if not isinstance(
x, types.FunctionType) else x,
test_fns))
# Run the tests
test_fn(self)
for fn in test_fns:
# Run the tests
print('TEST: %s' %
(fn.__doc__ or fn.__name__))
try:
fn(self)
print('PASSED')
except BaseException as e:
# print a summary of the error to the log file
# so that cdash and junit reporters know about it
exc_type, _, tb = sys.exc_info()
print('FAILED: {0}'.format(e))
# construct combined stacktrace of processes
import traceback
stack = traceback.extract_stack()[:-1]
stack += traceback.extract_tb(tb)
# Package files have a line added at import time,
# so they are effectively one-indexed. Other files
# we subtract 1 from the lineno for zero-indexing.
for i, entry in enumerate(stack):
filename, lineno, function, text = entry
if not spack.paths.is_package_file(filename):
lineno = lineno - 1
stack[i] = (filename, lineno, function, text)
# Format the stack to print and print it
out = traceback.format_list(stack)
for line in out:
print(line.rstrip('\n'))
if exc_type is spack.util.executable.ProcessError:
out = six.StringIO()
spack.build_environment.write_log_summary(
out, 'test', self.test_log_file, last=1)
m = out.getvalue()
else:
# Get context from combined stack
m = '\n'.join(
spack.build_environment.get_package_context(
stack)
)
exc = e # e is deleted after this block
# If we fail fast, raise another error
if spack.config.get('config:fail_fast', False):
raise TestFailure([(exc, m)])
else:
self.test_failures.append((exc, m))
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
@@ -1742,9 +1799,6 @@ def test_process():
spack.build_environment.fork(
self, test_process, dirty=dirty, fake=False, context='test')
def test(self):
pass
def run_test(self, exe, options=[], expected=[], status=0,
installed=False, purpose='', skip_missing=False,
work_dir=None):

View File

@@ -72,12 +72,12 @@ def __init__(self, args):
tty.verbose("Using CDash auth token from environment")
self.authtoken = os.environ.get('SPACK_CDASH_AUTH_TOKEN')
packages = []
if getattr(args, 'spec', ''):
packages = args.spec
elif getattr(args, 'specs', ''):
packages = args.specs
else:
packages = []
elif getattr(args, 'specfiles', ''):
for file in args.specfiles:
with open(file, 'r') as f:
s = spack.spec.Spec.from_yaml(f)

View File

@@ -118,7 +118,7 @@ def test_install_dirty_flag(arguments, expected):
assert args.dirty == expected
def test_package_output(tmpdir, capsys, install_mockery, mock_fetch):
def test_package_output(tmpdir, install_mockery, mock_fetch):
"""Ensure output printed from pkgs is captured by output redirection."""
# we can't use output capture here because it interferes with Spack's
# logging. TODO: see whether we can get multiple log_outputs to work
@@ -697,18 +697,16 @@ def test_install_only_dependencies_of_all_in_env(
assert os.path.exists(dep.prefix)
def test_install_help_does_not_show_cdash_options(capsys):
def test_install_help_does_not_show_cdash_options():
"""Make sure `spack install --help` does not describe CDash arguments"""
with pytest.raises(SystemExit):
install('--help')
captured = capsys.readouterr()
assert 'CDash URL' not in captured.out
output = install('--help')
assert 'CDash URL' not in output
def test_install_help_cdash(capsys):
def test_install_help_cdash():
"""Make sure `spack install --help-cdash` describes CDash arguments"""
install_cmd = SpackCommand('install')
out = install_cmd('--help-cdash')
out = install('--help-cdash')
assert 'CDash URL' in out

View File

@@ -58,7 +58,7 @@ def test_test_output(mock_test_stage, mock_packages, mock_archive, mock_fetch,
with open(outfile, 'r') as f:
output = f.read()
assert "BEFORE TEST" in output
assert "true: expect command status in [" in output
assert "RUNNING TEST" in output
assert "AFTER TEST" in output
assert "FAILED" not in output
@@ -84,7 +84,7 @@ def test_test_output_on_failure(
with capfd.disabled():
out = spack_test('run', 'test-fail', fail_on_error=False)
assert "Expected 'not in the output' to match output of `true`" in out
assert "assert False" in out
assert "TestFailure" in out
@@ -106,8 +106,8 @@ def test_show_log_on_error(
'install_mockery_mutable_config'
)
@pytest.mark.parametrize('pkg_name,msgs', [
('test-error', ['FAILED: Command exited', 'TestFailure']),
('test-fail', ['FAILED: Expected', 'TestFailure'])
('test-error', ['FAILED:', 'Command exited', 'TestFailure']),
('test-fail', ['FAILED:', 'assert False', 'TestFailure'])
])
def test_junit_output_with_failures(tmpdir, mock_test_stage, pkg_name, msgs):
install(pkg_name)
@@ -143,7 +143,6 @@ def test_cdash_output_test_error(
'--log-file=cdash_reports',
'test-error')
report_dir = tmpdir.join('cdash_reports')
print(tmpdir.listdir())
assert report_dir in tmpdir.listdir()
report_file = report_dir.join('test-error_Test.xml')
assert report_file in report_dir.listdir()

View File

@@ -263,12 +263,12 @@ def concretize_multi_provider(self):
('dealii', 'develop'),
('xsdk', '0.4.0'),
])
def concretize_difficult_packages(self, a, b):
def concretize_difficult_packages(self, spec, version):
"""Test a couple of large packages that are often broken due
to current limitations in the concretizer"""
s = Spec(a + '@' + b)
s = Spec(spec + '@' + version)
s.concretize()
assert s[a].version == ver(b)
assert s[spec].version == ver(version)
def test_concretize_two_virtuals(self):

View File

@@ -7,7 +7,7 @@
import re
import shlex
import subprocess
from six import string_types, text_type
from six import string_types, text_type, StringIO
import llnl.util.tty as tty
@@ -93,17 +93,11 @@ def __call__(self, *args, **kwargs):
* python streams, e.g. open Python file objects, or ``os.devnull``
* filenames, which will be automatically opened for writing
* ``str``, as in the Python string type. If you set these to ``str``,
output and error will be written to pipes and returned as a string.
If both ``output`` and ``error`` are set to ``str``, then one string
is returned containing output concatenated with error. Not valid
for ``input``
* ``str.split``, as in the ``split`` method of the Python string type.
Behaves the same as ``str``, except that value is also written to
``stdout`` or ``stderr``.
By default, the subprocess inherits the parent's file descriptors.
Returns:
(str) The interleaved output and error
"""
# Environment
env_arg = kwargs.get('env', None)
@@ -126,17 +120,20 @@ def __call__(self, *args, **kwargs):
ignore_errors = (ignore_errors, )
input = kwargs.pop('input', None)
output = kwargs.pop('output', None)
error = kwargs.pop('error', None)
output = kwargs.pop('output', sys.stdout)
error = kwargs.pop('error', sys.stderr)
if input is str:
raise ValueError('Cannot use `str` as input stream.')
if output is str:
output = os.devnull
if error is str:
error = os.devnull
def streamify(arg, mode):
if isinstance(arg, string_types):
return open(arg, mode), True
elif arg in (str, str.split):
return subprocess.PIPE, False
else:
return arg, False
@@ -161,37 +158,45 @@ def streamify(arg, mode):
tty.debug(cmd_line)
try:
proc = subprocess.Popen(
cmd,
stdin=istream,
stderr=estream,
stdout=ostream,
env=env)
out, err = proc.communicate()
output_string = StringIO()
# Determine whether any of our streams are StringIO
# We cannot call `Popen` directly with a StringIO object
output_use_stringIO = False
if not hasattr(ostream, 'fileno'):
output_use_stringIO = True
ostream_stringIO = ostream
ostream = subprocess.PIPE
error_use_stringIO = False
if not hasattr(estream, 'fileno'):
error_use_stringIO = True
estream_stringIO = estream
estream = subprocess.PIPE
result = None
if output in (str, str.split) or error in (str, str.split):
result = ''
if output in (str, str.split):
outstr = text_type(out.decode('utf-8'))
result += outstr
if output is str.split:
sys.stdout.write(outstr)
if error in (str, str.split):
errstr = text_type(err.decode('utf-8'))
result += errstr
if error is str.split:
sys.stderr.write(errstr)
try:
with tty.log.log_output(
output_string, output=ostream, error=estream, echo=True):
proc = subprocess.Popen(
cmd,
stdin=istream,
stderr=estream,
stdout=ostream,
env=env)
out, err = proc.communicate()
if output_use_stringIO:
ostream_stringIO.write(out)
if error_use_stringIO:
estream_stringIO.write(err)
result = output_string.getvalue()
rc = self.returncode = proc.returncode
if fail_on_error and rc != 0 and (rc not in ignore_errors):
long_msg = cmd_line
if result:
# If the output is not captured in the result, it will have
# been stored either in the specified files (e.g. if
# 'output' specifies a file) or written to the parent's
# stdout/stderr (e.g. if 'output' is not specified)
if output == os.devnull or error == os.devnull:
# If the output is not being printed anywhere, include it
# in the error message. Otherwise, don't pollute the error
# message.
long_msg += '\n' + result
raise ProcessError('Command exited with status %d:' %

View File

@@ -130,7 +130,7 @@ def sign(cls, key, file, output, clearsign=False):
@classmethod
def verify(cls, signature, file, suppress_warnings=False):
if suppress_warnings:
cls.gpg()('--verify', signature, file, error=str)
cls.gpg()('--verify', signature, file, error=os.devnull)
else:
cls.gpg()('--verify', signature, file)

View File

@@ -25,7 +25,7 @@ def install(self, spec, prefix):
print("AFTER INSTALL")
def test(self):
def test_true(self):
print("BEFORE TEST")
self.run_test('true') # run /bin/true
which('echo')('RUNNING TEST') # run an executable
print("AFTER TEST")

View File

@@ -18,4 +18,4 @@ def install(self, spec, prefix):
mkdirp(prefix.bin)
def test(self):
self.run_test('false')
which('false')()

View File

@@ -18,4 +18,4 @@ def install(self, spec, prefix):
mkdirp(prefix.bin)
def test(self):
self.run_test('true', expected=['not in the output'])
assert False

View File

@@ -41,14 +41,18 @@ def configure_args(self):
'--with-repmgr-ssl=no',
]
def test(self):
"""Perform smoke tests on the installed package binaries."""
exes = [
def test_version_arg(self):
"""Test executables run and respond to -V argument"""
cmd = [
'db_checkpoint', 'db_deadlock', 'db_dump', 'db_load',
'db_printlog', 'db_stat', 'db_upgrade', 'db_verify'
]
for exe in exes:
reason = 'test version of {0} is {1}'.format(exe,
self.spec.version)
self.run_test(exe, ['-V'], [self.spec.version.string],
installed=True, purpose=reason, skip_missing=True)
for cmd in cmds:
exe = which(cmd)
if not exe:
# not guaranteeing all executables for all versions
continue
assert self.prefix in exe.path
output = exe('-V')
assert self.spec.version.string in output

View File

@@ -129,31 +129,13 @@ def flag_handler(self, name, flags):
flags.append('-fcommon')
return (flags, None, None)
def _test_check_versions(self):
spec_vers = str(self.spec.version)
def test_check_versions(self):
"""Check that executables run and respond to "--version" argument."""
cmds = ['ar', 'c++filt', 'coffdump', 'dlltool', 'elfedit', 'gprof',
'ld', 'nm', 'objdump', 'ranlib', 'readelf', 'size', 'strings']
checks = {
'ar': spec_vers,
'c++filt': spec_vers,
'coffdump': spec_vers,
'dlltool': spec_vers,
'elfedit': spec_vers,
'gprof': spec_vers,
'ld': spec_vers,
'nm': spec_vers,
'objdump': spec_vers,
'ranlib': spec_vers,
'readelf': spec_vers,
'size': spec_vers,
'strings': spec_vers,
}
for exe in checks:
expected = checks[exe]
reason = 'test: ensuring version of {0} is {1}' \
.format(exe, expected)
self.run_test(exe, '--version', expected, installed=True,
purpose=reason, skip_missing=True)
def test(self):
self._test_check_versions()
for cmd in cmds:
exe = which(cmd, required=True)
assert self.prefix in exe.path
output = exe('--version')
assert str(self.spec.version) in output

View File

@@ -29,13 +29,13 @@ def setup_build_tests(self):
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources(self.extra_install_tests)
def _run_smoke_tests(self):
"""Build and run the added smoke (install) test."""
def test_link_and_run(self):
"""Check ability to link and run with libsigsegv."""
data_dir = self.test_suite.current_test_data_dir
prog = 'smoke_test'
src = data_dir.join('{0}.c'.format(prog))
options = [
compiler_options = [
'-I{0}'.format(self.prefix.include),
src,
'-o',
@@ -43,16 +43,16 @@ def _run_smoke_tests(self):
'-L{0}'.format(self.prefix.lib),
'-lsigsegv',
'{0}{1}'.format(self.compiler.cc_rpath_arg, self.prefix.lib)]
reason = 'test: checking ability to link to the library'
self.run_test('cc', options, [], installed=False, purpose=reason)
which('cc', required=True)(*compiler_options)
# Now run the program and confirm the output matches expectations
expected = get_escaped_text_output(data_dir.join('smoke_test.out'))
reason = 'test: checking ability to use the library'
self.run_test(prog, [], expected, purpose=reason)
with open(data_dir.join('smoke_test.out'), 'r') as f:
expected = f.read()
output = which(prog)(output=str, error=str)
assert expected in output
def _run_build_tests(self):
"""Run selected build tests."""
def test_libsigsegv_unit_tests(self):
"""Run selected sigsegv tests from package unit tests"""
passed = 'Test passed'
checks = {
'sigsegv1': [passed],
@@ -62,14 +62,12 @@ def _run_build_tests(self):
'stackoverflow2': ['recursion', 'overflow', 'violation', passed],
}
for exe, expected in checks.items():
reason = 'test: checking {0} output'.format(exe)
self.run_test(exe, [], expected, installed=True, purpose=reason,
skip_missing=True)
def test(self):
# Run the simple built-in smoke test
self._run_smoke_tests()
# Run test programs pulled from the build
self._run_build_tests()
for cmd, expected in checks.items():
exe = which(cmd)
if not exe:
# It could have been installed before we knew to capture this
# file from the build system
continue
output = exe(output=str, error=str)
for e in expected:
assert e in output

View File

@@ -1110,21 +1110,25 @@ def remove_files_from_view(self, view, merge_map):
else:
os.remove(dst)
def test(self):
# do not use self.command because we are also testing the run env
exe = self.command.name
def test_fail(self):
which('false')()
self.run_test('/bin/false')
def test_expected_fail(self):
"""Ensure the test suite is properly catching failed exes"""
false = which('false')
false(fail_on_error=False)
assert false.returncode == 1
self.run_test('/usr/bin/false')
def test_hello_world(self):
"""Test that a python hello world program works"""
exe = which(self.command.name)
self.run_test('/usr/bin/true', status=1)
assert self.prefix in exe.path
output = exe('-c', 'print("hello world!")', output=str, error=str)
assert 'hello world!' in output
# test hello world
self.run_test(exe, options=['-c', 'print("hello world!")'],
expected=['hello world!'])
# check that the executable comes from the spec prefix
# also checks imports work
self.run_test(exe, options=['-c', 'import sys; print(sys.executable)'],
expected=[self.spec.prefix])
def test_import(self):
"""Test that python can import builtin modules"""
exe = which(self.command.name)
output = exe('-c', 'import sys; print(sys.executable)', output=str, error=str)
assert self.spec.prefix in output