move determine_number_of_jobs into spack.util.cpus, use it in concretize (#37620)

This commit is contained in:
Harmen Stoppels 2023-09-07 13:16:51 +02:00 committed by GitHub
parent 4429e17db0
commit 7bd95f6ad3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 86 additions and 49 deletions

View File

@ -15,9 +15,9 @@
from llnl.util import tty from llnl.util import tty
import spack.build_environment
import spack.environment import spack.environment
import spack.tengine import spack.tengine
import spack.util.cpus
import spack.util.executable import spack.util.executable
from spack.environment import depfile from spack.environment import depfile
@ -137,7 +137,7 @@ def _install_with_depfile(self) -> None:
"-C", "-C",
str(self.environment_root()), str(self.environment_root()),
"-j", "-j",
str(spack.build_environment.determine_number_of_jobs(parallel=True)), str(spack.util.cpus.determine_number_of_jobs(parallel=True)),
**kwargs, **kwargs,
) )

View File

@ -68,7 +68,7 @@
from spack.error import NoHeadersError, NoLibrariesError from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import spack_install_test_log from spack.install_test import spack_install_test_log
from spack.installer import InstallError from spack.installer import InstallError
from spack.util.cpus import cpus_available from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import ( from spack.util.environment import (
SYSTEM_DIRS, SYSTEM_DIRS,
EnvironmentModifications, EnvironmentModifications,
@ -537,39 +537,6 @@ def update_compiler_args_for_dep(dep):
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs)) env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs))
def determine_number_of_jobs(
parallel=False, command_line=None, config_default=None, max_cpus=None
):
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel (bool or None): true when package supports parallel builds
command_line (int or None): command line override
config_default (int or None): config default number of jobs
max_cpus (int or None): maximum number of CPUs available. When None, this
value is automatically determined.
"""
if not parallel:
return 1
if command_line is None and "command_line" in spack.config.scopes():
command_line = spack.config.get("config:build_jobs", scope="command_line")
if command_line is not None:
return command_line
max_cpus = max_cpus or cpus_available()
# in some rare cases _builtin config may not be set, so default to max 16
config_default = config_default or spack.config.get("config:build_jobs", 16)
return min(max_cpus, config_default)
def set_module_variables_for_package(pkg): def set_module_variables_for_package(pkg):
"""Populate the Python module of a package with some useful global names. """Populate the Python module of a package with some useful global names.
This makes things easier for package writers. This makes things easier for package writers.

View File

@ -10,9 +10,10 @@
import llnl.util.tty as tty import llnl.util.tty as tty
import spack.builder import spack.builder
from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs from spack.build_environment import SPACK_NO_PARALLEL_MAKE
from spack.directives import build_system, extends, maintainers from spack.directives import build_system, extends, maintainers
from spack.package_base import PackageBase from spack.package_base import PackageBase
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import env_flag from spack.util.environment import env_flag
from spack.util.executable import Executable, ProcessError from spack.util.executable import Executable, ProcessError
@ -92,7 +93,7 @@ def install(self, pkg, spec, prefix):
"--copy", "--copy",
"-i", "-i",
"-j", "-j",
str(determine_number_of_jobs(parallel)), str(determine_number_of_jobs(parallel=parallel)),
"--", "--",
os.getcwd(), os.getcwd(),
] ]

View File

@ -1504,7 +1504,7 @@ def _concretize_separately(self, tests=False):
start = time.time() start = time.time()
max_processes = min( max_processes = min(
len(arguments), # Number of specs len(arguments), # Number of specs
spack.config.get("config:build_jobs"), # Cap on build jobs spack.util.cpus.determine_number_of_jobs(parallel=True),
) )
# TODO: revisit this print as soon as darwin is parallel too # TODO: revisit this print as soon as darwin is parallel too

View File

@ -96,6 +96,7 @@
on_package_attributes, on_package_attributes,
) )
from spack.spec import InvalidSpecDetected, Spec from spack.spec import InvalidSpecDetected, Spec
from spack.util.cpus import determine_number_of_jobs
from spack.util.executable import * from spack.util.executable import *
from spack.variant import ( from spack.variant import (
any_combination_of, any_combination_of,

View File

@ -16,8 +16,9 @@
import spack.package_base import spack.package_base
import spack.spec import spack.spec
import spack.util.spack_yaml as syaml import spack.util.spack_yaml as syaml
from spack.build_environment import _static_to_shared_library, determine_number_of_jobs, dso_suffix from spack.build_environment import _static_to_shared_library, dso_suffix
from spack.paths import build_env_path from spack.paths import build_env_path
from spack.util.cpus import determine_number_of_jobs
from spack.util.environment import EnvironmentModifications from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable from spack.util.executable import Executable
from spack.util.path import Path, convert_to_platform_path from spack.util.path import Path, convert_to_platform_path
@ -442,7 +443,7 @@ def test_parallel_false_is_not_propagating(default_mock_concretization):
spack.build_environment.set_module_variables_for_package(s["b"].package) spack.build_environment.set_module_variables_for_package(s["b"].package)
assert s["b"].package.module.make_jobs == spack.build_environment.determine_number_of_jobs( assert s["b"].package.module.make_jobs == spack.build_environment.determine_number_of_jobs(
s["b"].package.parallel parallel=s["b"].package.parallel
) )
@ -474,28 +475,62 @@ def test_setting_dtags_based_on_config(config_setting, expected_flag, config, mo
def test_build_jobs_sequential_is_sequential(): def test_build_jobs_sequential_is_sequential():
assert ( assert (
determine_number_of_jobs(parallel=False, command_line=8, config_default=8, max_cpus=8) == 1 determine_number_of_jobs(
parallel=False,
max_cpus=8,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 8}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 8}}),
),
)
== 1
) )
def test_build_jobs_command_line_overrides(): def test_build_jobs_command_line_overrides():
assert ( assert (
determine_number_of_jobs(parallel=True, command_line=10, config_default=1, max_cpus=1) determine_number_of_jobs(
parallel=True,
max_cpus=1,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}}),
),
)
== 10 == 10
) )
assert ( assert (
determine_number_of_jobs(parallel=True, command_line=10, config_default=100, max_cpus=100) determine_number_of_jobs(
parallel=True,
max_cpus=100,
config=spack.config.Configuration(
spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}),
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}}),
),
)
== 10 == 10
) )
def test_build_jobs_defaults(): def test_build_jobs_defaults():
assert ( assert (
determine_number_of_jobs(parallel=True, command_line=None, config_default=1, max_cpus=10) determine_number_of_jobs(
parallel=True,
max_cpus=10,
config=spack.config.Configuration(
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}})
),
)
== 1 == 1
) )
assert ( assert (
determine_number_of_jobs(parallel=True, command_line=None, config_default=100, max_cpus=10) determine_number_of_jobs(
parallel=True,
max_cpus=10,
config=spack.config.Configuration(
spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}})
),
)
== 10 == 10
) )

View File

@ -5,6 +5,9 @@
import multiprocessing import multiprocessing
import os import os
from typing import Optional
import spack.config
def cpus_available(): def cpus_available():
@ -18,3 +21,36 @@ def cpus_available():
return len(os.sched_getaffinity(0)) # novermin return len(os.sched_getaffinity(0)) # novermin
except Exception: except Exception:
return multiprocessing.cpu_count() return multiprocessing.cpu_count()
def determine_number_of_jobs(
*,
parallel: bool = False,
max_cpus: int = cpus_available(),
config: Optional["spack.config.Configuration"] = None,
) -> int:
"""
Packages that require sequential builds need 1 job. Otherwise we use the
number of jobs set on the command line. If not set, then we use the config
defaults (which is usually set through the builtin config scope), but we
cap to the number of CPUs available to avoid oversubscription.
Parameters:
parallel: true when package supports parallel builds
max_cpus: maximum number of CPUs to use (defaults to cpus_available())
config: configuration object (defaults to global config)
"""
if not parallel:
return 1
cfg = config or spack.config.CONFIG
# Command line overrides all
try:
command_line = cfg.get("config:build_jobs", default=None, scope="command_line")
if command_line is not None:
return command_line
except ValueError:
pass
return min(max_cpus, cfg.get("config:build_jobs", 16))

View File

@ -6,7 +6,6 @@
import os import os
import re import re
from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import * from spack.package import *

View File

@ -3,7 +3,6 @@
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import * from spack.package import *
from spack.util.executable import which_string from spack.util.executable import which_string

View File

@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys import sys
from spack.build_environment import MakeExecutable, determine_number_of_jobs
from spack.package import * from spack.package import *
from spack.util.executable import which_string from spack.util.executable import which_string