build env: simplify handling of parallel jobs (#11524)
This PR implements several refactors requested in #11373, specifically: - Config scopes are used to handle builtin defaults, command line overrides and package overrides (`parallel=False`) - `Package.make_jobs` attribute has been removed; `make_jobs` remains as a module-scope variable in the build environment. - The use of the argument `-j` has been rationalized across commands - move '-j'/'--jobs' argument into `spack.cmd.common.arguments` - Add unit tests to check that setting parallel jobs works as expected - add new test to ensure that build job setting is isolated to each build - Fix packages that used `Package.make_jobs` (i.e. `bazel`)
This commit is contained in:
parent
1bd4521f72
commit
c291866b9a
@ -404,12 +404,9 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
|
||||
def _set_variables_for_single_module(pkg, module):
|
||||
"""Helper function to set module variables for single module."""
|
||||
# number of jobs spack will build with.
|
||||
jobs = spack.config.get('config:build_jobs') or multiprocessing.cpu_count()
|
||||
if not pkg.parallel:
|
||||
jobs = 1
|
||||
elif pkg.make_jobs:
|
||||
jobs = pkg.make_jobs
|
||||
|
||||
jobs = spack.config.get('config:build_jobs') if pkg.parallel else 1
|
||||
assert jobs is not None, "no default set for config:build_jobs"
|
||||
|
||||
m = module
|
||||
m.make_jobs = jobs
|
||||
|
@ -15,9 +15,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-j', '--jobs', action='store', type=int,
|
||||
help="explicitly set number of make jobs (default: #cpus)")
|
||||
arguments.add_common_arguments(subparser, ['jobs'])
|
||||
subparser.add_argument(
|
||||
'--keep-prefix', action='store_true', dest='keep_prefix',
|
||||
help="don't remove the install prefix if installation fails")
|
||||
@ -38,7 +36,6 @@ def bootstrap(parser, args, **kwargs):
|
||||
'keep_prefix': args.keep_prefix,
|
||||
'keep_stage': args.keep_stage,
|
||||
'install_deps': 'dependencies',
|
||||
'make_jobs': args.jobs,
|
||||
'verbose': args.verbose,
|
||||
'dirty': args.dirty
|
||||
})
|
||||
|
@ -72,6 +72,35 @@ def _specs(self, **kwargs):
|
||||
return sorted(specs.values())
|
||||
|
||||
|
||||
class SetParallelJobs(argparse.Action):
|
||||
"""Sets the correct value for parallel build jobs.
|
||||
|
||||
The value is is set in the command line configuration scope so that
|
||||
it can be retrieved using the spack.config API.
|
||||
"""
|
||||
def __call__(self, parser, namespace, jobs, option_string):
|
||||
# Jobs is a single integer, type conversion is already applied
|
||||
# see https://docs.python.org/3/library/argparse.html#action-classes
|
||||
if jobs < 1:
|
||||
msg = 'invalid value for argument "{0}" '\
|
||||
'[expected a positive integer, got "{1}"]'
|
||||
raise ValueError(msg.format(option_string, jobs))
|
||||
|
||||
spack.config.set('config:build_jobs', jobs, scope='command_line')
|
||||
|
||||
setattr(namespace, 'jobs', jobs)
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
# This default is coded as a property so that look-up
|
||||
# of this value is done only on demand
|
||||
return spack.config.get('config:build_jobs')
|
||||
|
||||
@default.setter
|
||||
def default(self, value):
|
||||
pass
|
||||
|
||||
|
||||
_arguments['constraint'] = Args(
|
||||
'constraint', nargs=argparse.REMAINDER, action=ConstraintAction,
|
||||
help='constraint to select a subset of installed packages')
|
||||
@ -111,17 +140,13 @@ def _specs(self, **kwargs):
|
||||
'-L', '--very-long', action='store_true',
|
||||
help='show full dependency hashes as well as versions')
|
||||
|
||||
_arguments['jobs'] = Args(
|
||||
'-j', '--jobs', action='store', type=int, dest='jobs',
|
||||
help="explicitely set number of make jobs. default is #cpus")
|
||||
|
||||
_arguments['tags'] = Args(
|
||||
'-t', '--tags', action='append',
|
||||
help='filter a package query by tags')
|
||||
|
||||
_arguments['jobs'] = Args(
|
||||
'-j', '--jobs', action='store', type=int, dest="jobs",
|
||||
help="explicitly set number of make jobs, default is #cpus.")
|
||||
'-j', '--jobs', action=SetParallelJobs, type=int, dest='jobs',
|
||||
help='explicitly set number of parallel jobs')
|
||||
|
||||
_arguments['install_status'] = Args(
|
||||
'-I', '--install-status', action='store_true', default=False,
|
||||
|
@ -50,10 +50,6 @@ def diy(self, args):
|
||||
if not args.spec:
|
||||
tty.die("spack diy requires a package spec argument.")
|
||||
|
||||
if args.jobs is not None:
|
||||
if args.jobs <= 0:
|
||||
tty.die("the -j option must be a positive integer")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
if len(specs) > 1:
|
||||
tty.die("spack diy only takes one spec.")
|
||||
|
@ -35,7 +35,6 @@ def update_kwargs_from_args(args, kwargs):
|
||||
'keep_stage': args.keep_stage,
|
||||
'restage': not args.dont_restage,
|
||||
'install_source': args.install_source,
|
||||
'make_jobs': args.jobs,
|
||||
'verbose': args.verbose,
|
||||
'fake': args.fake,
|
||||
'dirty': args.dirty,
|
||||
@ -232,10 +231,6 @@ def install(parser, args, **kwargs):
|
||||
else:
|
||||
tty.die("install requires a package argument or a spack.yaml file")
|
||||
|
||||
if args.jobs is not None:
|
||||
if args.jobs <= 0:
|
||||
tty.die("The -j option must be a positive integer!")
|
||||
|
||||
if args.no_checksum:
|
||||
spack.config.set('config:checksum', False, scope='command_line')
|
||||
|
||||
|
@ -396,9 +396,6 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
|
||||
#: By default we build in parallel. Subclasses can override this.
|
||||
parallel = True
|
||||
|
||||
#: # jobs to use for parallel make. If set, overrides default of ncpus.
|
||||
make_jobs = None
|
||||
|
||||
#: By default do not run tests within package's install()
|
||||
run_tests = False
|
||||
|
||||
@ -1369,8 +1366,6 @@ def do_install(self, **kwargs):
|
||||
skip_patch (bool): Skip patch stage of build if True.
|
||||
verbose (bool): Display verbose build output (by default,
|
||||
suppresses it)
|
||||
make_jobs (int): Number of make jobs to use for install. Default
|
||||
is ncpus
|
||||
fake (bool): Don't really build; install fake stub files instead.
|
||||
explicit (bool): True if package was explicitly installed, False
|
||||
if package was implicitly installed (as a dependency).
|
||||
@ -1393,7 +1388,6 @@ def do_install(self, **kwargs):
|
||||
install_deps = kwargs.get('install_deps', True)
|
||||
skip_patch = kwargs.get('skip_patch', False)
|
||||
verbose = kwargs.get('verbose', False)
|
||||
make_jobs = kwargs.get('make_jobs', None)
|
||||
fake = kwargs.get('fake', False)
|
||||
explicit = kwargs.get('explicit', False)
|
||||
tests = kwargs.get('tests', False)
|
||||
@ -1469,9 +1463,6 @@ def do_install(self, **kwargs):
|
||||
self.run_tests = (tests is True or
|
||||
tests and self.name in tests)
|
||||
|
||||
# Set parallelism before starting build.
|
||||
self.make_jobs = make_jobs
|
||||
|
||||
# Then install the package itself.
|
||||
def build_process():
|
||||
"""This implements the process forked for each build.
|
||||
|
@ -279,3 +279,20 @@ def normpaths(paths):
|
||||
|
||||
finally:
|
||||
delattr(dep_pkg, 'libs')
|
||||
|
||||
|
||||
def test_parallel_false_is_not_propagating(config, mock_packages):
|
||||
class AttributeHolder(object):
|
||||
pass
|
||||
|
||||
# Package A has parallel = False and depends on B which instead
|
||||
# can be built in parallel
|
||||
s = spack.spec.Spec('a foobar=bar')
|
||||
s.concretize()
|
||||
|
||||
for spec in s.traverse():
|
||||
expected_jobs = spack.config.get('config:build_jobs') \
|
||||
if s.package.parallel else 1
|
||||
m = AttributeHolder()
|
||||
spack.build_environment._set_variables_for_single_module(s.package, m)
|
||||
assert m.make_jobs == expected_jobs
|
||||
|
40
lib/spack/spack/test/cmd/common/arguments.py
Normal file
40
lib/spack/spack/test/cmd/common/arguments.py
Normal file
@ -0,0 +1,40 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import multiprocessing
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def parser():
|
||||
p = argparse.ArgumentParser()
|
||||
arguments.add_common_arguments(p, ['jobs'])
|
||||
yield p
|
||||
# Cleanup the command line scope if it was set during tests
|
||||
if 'command_line' in spack.config.config.scopes:
|
||||
spack.config.config.remove_scope('command_line')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('cli_args,expected', [
|
||||
(['-j', '24'], 24),
|
||||
([], multiprocessing.cpu_count())
|
||||
])
|
||||
def test_setting_parallel_jobs(parser, cli_args, expected):
|
||||
namespace = parser.parse_args(cli_args)
|
||||
assert namespace.jobs == expected
|
||||
assert spack.config.get('config:build_jobs') == expected
|
||||
|
||||
|
||||
def test_negative_integers_not_allowed_for_parallel_jobs(parser):
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parser.parse_args(['-j', '-2'])
|
||||
|
||||
assert 'expected a positive integer' in str(exc_info.value)
|
@ -288,7 +288,11 @@ def config(configuration_dir):
|
||||
|
||||
real_configuration = spack.config.config
|
||||
|
||||
test_scopes = [
|
||||
defaults = spack.config.InternalConfigScope(
|
||||
'_builtin', spack.config.config_defaults
|
||||
)
|
||||
test_scopes = [defaults]
|
||||
test_scopes += [
|
||||
spack.config.ConfigScope(name, str(configuration_dir.join(name)))
|
||||
for name in ['site', 'system', 'user']]
|
||||
test_scopes.append(spack.config.InternalConfigScope('command_line'))
|
||||
|
@ -32,6 +32,8 @@ class A(AutotoolsPackage):
|
||||
|
||||
depends_on('b', when='foobar=bar')
|
||||
|
||||
parallel = False
|
||||
|
||||
def with_or_without_fee(self, activated):
|
||||
if not activated:
|
||||
return '--no-fee'
|
||||
|
@ -3,6 +3,8 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import inspect
|
||||
|
||||
from spack import *
|
||||
from multiprocessing import cpu_count
|
||||
from spack.util.environment import env_flag
|
||||
@ -90,8 +92,9 @@ def __call__(self, *args, **kwargs):
|
||||
return super(BazelExecutable, self).__call__(*args, **kwargs)
|
||||
|
||||
jobs = cpu_count()
|
||||
dependent_module = inspect.getmodule(dependent_spec.package)
|
||||
if not dependent_spec.package.parallel:
|
||||
jobs = 1
|
||||
elif dependent_spec.package.make_jobs:
|
||||
jobs = dependent_spec.package.make_jobs
|
||||
elif dependent_module.make_jobs:
|
||||
jobs = dependent_module.make_jobs
|
||||
module.bazel = BazelExecutable('bazel', 'build', jobs)
|
||||
|
Loading…
Reference in New Issue
Block a user