spack buildcache push: parallel in general (#45682)

Make spack buildcache push for the non-oci case also parallel, and --update-index more efficieny
This commit is contained in:
Harmen Stoppels 2024-08-14 17:19:45 +02:00 committed by GitHub
parent 94961ffe0a
commit 29b50527a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 837 additions and 807 deletions

File diff suppressed because it is too large Load Diff

View File

@ -38,6 +38,7 @@
import spack.paths import spack.paths
import spack.repo import spack.repo
import spack.spec import spack.spec
import spack.stage
import spack.util.git import spack.util.git
import spack.util.gpg as gpg_util import spack.util.gpg as gpg_util
import spack.util.spack_yaml as syaml import spack.util.spack_yaml as syaml
@ -1370,15 +1371,6 @@ def can_verify_binaries():
return len(gpg_util.public_keys()) >= 1 return len(gpg_util.public_keys()) >= 1
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
"""Unchecked version of the public API, for easier mocking"""
bindist.push_or_raise(
spec,
spack.mirror.Mirror.from_url(mirror_url).push_url,
bindist.PushOptions(force=True, unsigned=not sign_binaries),
)
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool: def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
"""Push one or more binary packages to the mirror. """Push one or more binary packages to the mirror.
@ -1389,20 +1381,13 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
sign_binaries: If True, spack will attempt to sign binary package before pushing. sign_binaries: If True, spack will attempt to sign binary package before pushing.
""" """
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})") tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
signing_key = bindist.select_signing_key() if sign_binaries else None
try: try:
_push_to_build_cache(spec, sign_binaries, mirror_url) bindist.push_or_raise([spec], out_url=mirror_url, signing_key=signing_key)
return True return True
except bindist.PushToBuildCacheError as e: except bindist.PushToBuildCacheError as e:
tty.error(str(e)) tty.error(f"Problem writing to {mirror_url}: {e}")
return False return False
except Exception as e:
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
# exception instead of parsing str(e)...
msg = str(e)
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
tty.error(f"Permission problem writing to {mirror_url}: {msg}")
return False
raise
def remove_other_mirrors(mirrors_to_keep, scope=None): def remove_other_mirrors(mirrors_to_keep, scope=None):

View File

@ -3,16 +3,13 @@
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse import argparse
import concurrent.futures
import copy
import glob import glob
import hashlib
import json import json
import os import os
import shutil import shutil
import sys import sys
import tempfile import tempfile
from typing import Dict, List, Optional, Tuple from typing import List, Tuple
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.string import plural from llnl.string import plural
@ -24,7 +21,6 @@
import spack.deptypes as dt import spack.deptypes as dt
import spack.environment as ev import spack.environment as ev
import spack.error import spack.error
import spack.hash_types as ht
import spack.mirror import spack.mirror
import spack.oci.oci import spack.oci.oci
import spack.oci.opener import spack.oci.opener
@ -41,22 +37,7 @@
from spack import traverse from spack import traverse
from spack.cmd import display_specs from spack.cmd import display_specs
from spack.cmd.common import arguments from spack.cmd.common import arguments
from spack.oci.image import ( from spack.oci.image import ImageReference
Digest,
ImageReference,
default_config,
default_index_tag,
default_manifest,
default_tag,
tag_is_spec,
)
from spack.oci.oci import (
copy_missing_layers_with_retry,
get_manifest_and_config_with_retry,
list_tags,
upload_blob_with_retry,
upload_manifest_with_retry,
)
from spack.spec import Spec, save_dependency_specfiles from spack.spec import Spec, save_dependency_specfiles
description = "create, download and install binary packages" description = "create, download and install binary packages"
@ -340,13 +321,6 @@ def _format_spec(spec: Spec) -> str:
return spec.cformat("{name}{@version}{/hash:7}") return spec.cformat("{name}{@version}{/hash:7}")
def _progress(i: int, total: int):
if total > 1:
digits = len(str(total))
return f"[{i+1:{digits}}/{total}] "
return ""
def _skip_no_redistribute_for_public(specs): def _skip_no_redistribute_for_public(specs):
remaining_specs = list() remaining_specs = list()
removed_specs = list() removed_specs = list()
@ -372,7 +346,7 @@ class PackagesAreNotInstalledError(spack.error.SpackError):
def __init__(self, specs: List[Spec]): def __init__(self, specs: List[Spec]):
super().__init__( super().__init__(
"Cannot push non-installed packages", "Cannot push non-installed packages",
", ".join(elide_list(list(_format_spec(s) for s in specs), 5)), ", ".join(elide_list([_format_spec(s) for s in specs], 5)),
) )
@ -380,10 +354,6 @@ class PackageNotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged.""" """Raised when a spec is not installed but picked to be packaged."""
class MissingLayerError(spack.error.SpackError):
"""Raised when a required layer for a dependency is missing in an OCI registry."""
def _specs_to_be_packaged( def _specs_to_be_packaged(
requested: List[Spec], things_to_install: str, build_deps: bool requested: List[Spec], things_to_install: str, build_deps: bool
) -> List[Spec]: ) -> List[Spec]:
@ -394,7 +364,7 @@ def _specs_to_be_packaged(
deptype = dt.ALL deptype = dt.ALL
else: else:
deptype = dt.RUN | dt.LINK | dt.TEST deptype = dt.RUN | dt.LINK | dt.TEST
return [ specs = [
s s
for s in traverse.traverse_nodes( for s in traverse.traverse_nodes(
requested, requested,
@ -405,6 +375,8 @@ def _specs_to_be_packaged(
) )
if not s.external if not s.external
] ]
specs.reverse()
return specs
def push_fn(args): def push_fn(args):
@ -445,6 +417,10 @@ def push_fn(args):
"Code signing is currently not supported for OCI images. " "Code signing is currently not supported for OCI images. "
"Use --unsigned to silence this warning." "Use --unsigned to silence this warning."
) )
unsigned = True
# Select a signing key, or None if unsigned.
signing_key = None if unsigned else (args.key or bindist.select_signing_key())
specs = _specs_to_be_packaged( specs = _specs_to_be_packaged(
roots, roots,
@ -471,13 +447,10 @@ def push_fn(args):
(s, PackageNotInstalledError("package not installed")) for s in not_installed (s, PackageNotInstalledError("package not installed")) for s in not_installed
) )
# TODO: move into bindist.push_or_raise with bindist.default_push_context() as (tmpdir, executor):
if target_image: if target_image:
base_image = ImageReference.from_string(args.base_image) if args.base_image else None base_image = ImageReference.from_string(args.base_image) if args.base_image else None
with tempfile.TemporaryDirectory( skipped, base_images, checksums, upload_errors = bindist._push_oci(
dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
skipped, base_images, checksums, upload_errors = _push_oci(
target_image=target_image, target_image=target_image,
base_image=base_image, base_image=base_image,
installed_specs_with_deps=specs, installed_specs_with_deps=specs,
@ -495,46 +468,28 @@ def push_fn(args):
tagged_image = target_image.with_tag(args.tag) tagged_image = target_image.with_tag(args.tag)
# _push_oci may not populate base_images if binaries were already in the registry # _push_oci may not populate base_images if binaries were already in the registry
for spec in roots: for spec in roots:
_update_base_images( bindist._oci_update_base_images(
base_image=base_image, base_image=base_image,
target_image=target_image, target_image=target_image,
spec=spec, spec=spec,
base_image_cache=base_images, base_image_cache=base_images,
) )
_put_manifest(base_images, checksums, tagged_image, tmpdir, None, None, *roots) bindist._oci_put_manifest(
base_images, checksums, tagged_image, tmpdir, None, None, *roots
)
tty.info(f"Tagged {tagged_image}") tty.info(f"Tagged {tagged_image}")
else: else:
skipped = [] skipped, upload_errors = bindist._push(
specs,
for i, spec in enumerate(specs): out_url=push_url,
try: force=args.force,
bindist.push_or_raise( update_index=args.update_index,
spec, signing_key=signing_key,
push_url, tmpdir=tmpdir,
bindist.PushOptions( executor=executor,
force=args.force, )
unsigned=unsigned, failed.extend(upload_errors)
key=args.key,
regenerate_index=args.update_index,
),
)
msg = f"{_progress(i, len(specs))}Pushed {_format_spec(spec)}"
if len(specs) == 1:
msg += f" to {push_url}"
tty.info(msg)
except bindist.NoOverwriteException:
skipped.append(_format_spec(spec))
# Catch any other exception unless the fail fast option is set
except Exception as e:
if args.fail_fast or isinstance(
e, (bindist.PickKeyException, bindist.NoKeyException)
):
raise
failed.append((spec, e))
if skipped: if skipped:
if len(specs) == 1: if len(specs) == 1:
@ -567,409 +522,12 @@ def push_fn(args):
), ),
) )
# Update the index if requested # Update the OCI index if requested
# TODO: remove update index logic out of bindist; should be once after all specs are pushed
# not once per spec.
if target_image and len(skipped) < len(specs) and args.update_index: if target_image and len(skipped) < len(specs) and args.update_index:
with tempfile.TemporaryDirectory( with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root() dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor: ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
_update_index_oci(target_image, tmpdir, executor) bindist._oci_update_index(target_image, tmpdir, executor)
def _get_spack_binary_blob(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
"""Get the spack tarball layer digests and size if it exists"""
try:
manifest, config = get_manifest_and_config_with_retry(image_ref)
return spack.oci.oci.Blob(
compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
size=manifest["layers"][-1]["size"],
)
except Exception:
return None
def _push_single_spack_binary_blob(image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str):
filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
# Create an oci.image.layer aka tarball of the package
compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
blob = spack.oci.oci.Blob(
Digest.from_sha256(compressed_tarfile_checksum),
Digest.from_sha256(tarfile_checksum),
os.path.getsize(filename),
)
# Upload the blob
upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
# delete the file
os.unlink(filename)
return blob
def _retrieve_env_dict_from_config(config: dict) -> dict:
"""Retrieve the environment variables from the image config file.
Sets a default value for PATH if it is not present.
Args:
config (dict): The image config file.
Returns:
dict: The environment variables.
"""
env = {"PATH": "/bin:/usr/bin"}
if "Env" in config.get("config", {}):
for entry in config["config"]["Env"]:
key, value = entry.split("=", 1)
env[key] = value
return env
def _archspec_to_gooarch(spec: spack.spec.Spec) -> str:
name = spec.target.family.name
name_map = {"aarch64": "arm64", "x86_64": "amd64"}
return name_map.get(name, name)
def _put_manifest(
base_images: Dict[str, Tuple[dict, dict]],
checksums: Dict[str, spack.oci.oci.Blob],
image_ref: ImageReference,
tmpdir: str,
extra_config: Optional[dict],
annotations: Optional[dict],
*specs: spack.spec.Spec,
):
architecture = _archspec_to_gooarch(specs[0])
expected_blobs: List[Spec] = [
s
for s in traverse.traverse_nodes(specs, order="topo", deptype=("link", "run"), root=True)
if not s.external
]
expected_blobs.reverse()
base_manifest, base_config = base_images[architecture]
env = _retrieve_env_dict_from_config(base_config)
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
# This is because Singularity / Apptainer is very strict about not mixing them.
base_manifest_mediaType = base_manifest.get(
"mediaType", "application/vnd.oci.image.manifest.v1+json"
)
use_docker_format = (
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
)
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
# Create an oci.image.config file
config = copy.deepcopy(base_config)
# Add the diff ids of the blobs
for s in expected_blobs:
# If a layer for a dependency has gone missing (due to removed manifest in the registry, a
# failed push, or a local forced uninstall), we cannot create a runnable container image.
# If an OCI registry is only used for storage, this is not a hard error, but for now we
# raise an exception unconditionally, until someone requests a more lenient behavior.
checksum = checksums.get(s.dag_hash())
if not checksum:
raise MissingLayerError(f"missing layer for {_format_spec(s)}")
config["rootfs"]["diff_ids"].append(str(checksum.uncompressed_digest))
# Set the environment variables
config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
if extra_config:
# From the OCI v1.0 spec:
# > Any extra fields in the Image JSON struct are considered implementation
# > specific and MUST be ignored by any implementations which are unable to
# > interpret them.
config.update(extra_config)
config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
with open(config_file, "w") as f:
json.dump(config, f, separators=(",", ":"))
config_file_checksum = Digest.from_sha256(
spack.util.crypto.checksum(hashlib.sha256, config_file)
)
# Upload the config file
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
manifest = {
"mediaType": base_manifest_mediaType,
"schemaVersion": 2,
"config": {
"mediaType": base_manifest["config"]["mediaType"],
"digest": str(config_file_checksum),
"size": os.path.getsize(config_file),
},
"layers": [
*(layer for layer in base_manifest["layers"]),
*(
{
"mediaType": (
"application/vnd.docker.image.rootfs.diff.tar.gzip"
if use_docker_format
else "application/vnd.oci.image.layer.v1.tar+gzip"
),
"digest": str(checksums[s.dag_hash()].compressed_digest),
"size": checksums[s.dag_hash()].size,
}
for s in expected_blobs
),
],
}
if not use_docker_format and annotations:
manifest["annotations"] = annotations
# Finally upload the manifest
upload_manifest_with_retry(image_ref, manifest=manifest)
# delete the config file
os.unlink(config_file)
def _update_base_images(
*,
base_image: Optional[ImageReference],
target_image: ImageReference,
spec: spack.spec.Spec,
base_image_cache: Dict[str, Tuple[dict, dict]],
):
"""For a given spec and base image, copy the missing layers of the base image with matching
arch to the registry of the target image. If no base image is specified, create a dummy
manifest and config file."""
architecture = _archspec_to_gooarch(spec)
if architecture in base_image_cache:
return
if base_image is None:
base_image_cache[architecture] = (
default_manifest(),
default_config(architecture, "linux"),
)
else:
base_image_cache[architecture] = copy_missing_layers_with_retry(
base_image, target_image, architecture
)
def _push_oci(
*,
target_image: ImageReference,
base_image: Optional[ImageReference],
installed_specs_with_deps: List[Spec],
tmpdir: str,
executor: concurrent.futures.Executor,
force: bool = False,
) -> Tuple[
List[str],
Dict[str, Tuple[dict, dict]],
Dict[str, spack.oci.oci.Blob],
List[Tuple[Spec, BaseException]],
]:
"""Push specs to an OCI registry
Args:
image_ref: The target OCI image
base_image: Optional base image, which will be copied to the target registry.
installed_specs_with_deps: The installed specs to push, excluding externals,
including deps, ordered from roots to leaves.
force: Whether to overwrite existing layers and manifests in the buildcache.
Returns:
A tuple consisting of the list of skipped specs already in the build cache,
a dictionary mapping architectures to base image manifests and configs,
a dictionary mapping each spec's dag hash to a blob,
and a list of tuples of specs with errors of failed uploads.
"""
# Reverse the order
installed_specs_with_deps = list(reversed(installed_specs_with_deps))
# Spec dag hash -> blob
checksums: Dict[str, spack.oci.oci.Blob] = {}
# arch -> (manifest, config)
base_images: Dict[str, Tuple[dict, dict]] = {}
# Specs not uploaded because they already exist
skipped = []
if not force:
tty.info("Checking for existing specs in the buildcache")
blobs_to_upload = []
tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
available_blobs = executor.map(_get_spack_binary_blob, tags_to_check)
for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
if maybe_blob is not None:
checksums[spec.dag_hash()] = maybe_blob
skipped.append(_format_spec(spec))
else:
blobs_to_upload.append(spec)
else:
blobs_to_upload = installed_specs_with_deps
if not blobs_to_upload:
return skipped, base_images, checksums, []
tty.info(
f"{len(blobs_to_upload)} specs need to be pushed to "
f"{target_image.domain}/{target_image.name}"
)
# Upload blobs
blob_futures = [
executor.submit(_push_single_spack_binary_blob, target_image, spec, tmpdir)
for spec in blobs_to_upload
]
concurrent.futures.wait(blob_futures)
manifests_to_upload: List[Spec] = []
errors: List[Tuple[Spec, BaseException]] = []
# And update the spec to blob mapping for successful uploads
for spec, blob_future in zip(blobs_to_upload, blob_futures):
error = blob_future.exception()
if error is None:
manifests_to_upload.append(spec)
checksums[spec.dag_hash()] = blob_future.result()
else:
errors.append((spec, error))
# Copy base images if necessary
for spec in manifests_to_upload:
_update_base_images(
base_image=base_image,
target_image=target_image,
spec=spec,
base_image_cache=base_images,
)
def extra_config(spec: Spec):
spec_dict = spec.to_dict(hash=ht.dag_hash)
spec_dict["buildcache_layout_version"] = bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION
spec_dict["binary_cache_checksum"] = {
"hash_algorithm": "sha256",
"hash": checksums[spec.dag_hash()].compressed_digest.digest,
}
return spec_dict
# Upload manifests
tty.info("Uploading manifests")
manifest_futures = [
executor.submit(
_put_manifest,
base_images,
checksums,
target_image.with_tag(default_tag(spec)),
tmpdir,
extra_config(spec),
{"org.opencontainers.image.description": spec.format()},
spec,
)
for spec in manifests_to_upload
]
concurrent.futures.wait(manifest_futures)
# Print the image names of the top-level specs
for spec, manifest_future in zip(manifests_to_upload, manifest_futures):
error = manifest_future.exception()
if error is None:
tty.info(f"Pushed {_format_spec(spec)} to {target_image.with_tag(default_tag(spec))}")
else:
errors.append((spec, error))
return skipped, base_images, checksums, errors
def _config_from_tag(image_ref_and_tag: Tuple[ImageReference, str]) -> Optional[dict]:
image_ref, tag = image_ref_and_tag
# Don't allow recursion here, since Spack itself always uploads
# vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
_, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
# Do very basic validation: if "spec" is a key in the config, it
# must be a Spec object too.
return config if "spec" in config else None
def _update_index_oci(
image_ref: ImageReference, tmpdir: str, pool: concurrent.futures.Executor
) -> None:
tags = list_tags(image_ref)
# Fetch all image config files in parallel
spec_dicts = pool.map(_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag)))
# Populate the database
db_root_dir = os.path.join(tmpdir, "db_root")
db = bindist.BuildCacheDatabase(db_root_dir)
for spec_dict in spec_dicts:
spec = Spec.from_dict(spec_dict)
db.add(spec, directory_layout=None)
db.mark(spec, "in_buildcache", True)
# Create the index.json file
index_json_path = os.path.join(tmpdir, "index.json")
with open(index_json_path, "w") as f:
db._write_to_file(f)
# Create an empty config.json file
empty_config_json_path = os.path.join(tmpdir, "config.json")
with open(empty_config_json_path, "wb") as f:
f.write(b"{}")
# Upload the index.json file
index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
# Upload the config.json file
empty_config_digest = Digest.from_sha256(
spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
)
upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
# Push a manifest file that references the index.json file as a layer
# Notice that we push this as if it is an image, which it of course is not.
# When the ORAS spec becomes official, we can use that instead of a fake image.
# For now we just use the OCI image spec, so that we don't run into issues with
# automatic garbage collection of blobs that are not referenced by any image manifest.
oci_manifest = {
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"schemaVersion": 2,
# Config is just an empty {} file for now, and irrelevant
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"digest": str(empty_config_digest),
"size": os.path.getsize(empty_config_json_path),
},
# The buildcache index is the only layer, and is not a tarball, we lie here.
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"digest": str(index_shasum),
"size": os.path.getsize(index_json_path),
}
],
}
upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
def install_fn(args): def install_fn(args):
@ -1251,13 +809,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
with tempfile.TemporaryDirectory( with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root() dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor: ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
_update_index_oci(image_ref, tmpdir, executor) bindist._oci_update_index(image_ref, tmpdir, executor)
return return
# Otherwise, assume a normal mirror. # Otherwise, assume a normal mirror.
url = mirror.push_url url = mirror.push_url
bindist.generate_package_index(url_util.join(url, bindist.build_cache_relative_path())) with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
bindist.generate_package_index(url, tmpdir)
if update_keys: if update_keys:
keys_url = url_util.join( keys_url = url_util.join(
@ -1265,7 +824,8 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
) )
try: try:
bindist.generate_key_index(keys_url) with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
bindist.generate_key_index(keys_url, tmpdir)
except bindist.CannotListKeys as e: except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path # Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors. # does not exist. TODO: distinguish between this and other errors.

View File

@ -5,10 +5,12 @@
import argparse import argparse
import os import os
import tempfile
import spack.binary_distribution import spack.binary_distribution
import spack.mirror import spack.mirror
import spack.paths import spack.paths
import spack.stage
import spack.util.gpg import spack.util.gpg
import spack.util.url import spack.util.url
from spack.cmd.common import arguments from spack.cmd.common import arguments
@ -115,6 +117,7 @@ def setup_parser(subparser):
help="URL of the mirror where keys will be published", help="URL of the mirror where keys will be published",
) )
publish.add_argument( publish.add_argument(
"--update-index",
"--rebuild-index", "--rebuild-index",
action="store_true", action="store_true",
default=False, default=False,
@ -220,9 +223,10 @@ def gpg_publish(args):
elif args.mirror_url: elif args.mirror_url:
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url) mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
spack.binary_distribution.push_keys( with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
mirror, keys=args.keys, regenerate_index=args.rebuild_index spack.binary_distribution.push_keys(
) mirror, keys=args.keys, tmpdir=tmpdir, update_index=args.update_index
)
def gpg(parser, args): def gpg(parser, args):

View File

@ -23,9 +23,6 @@ def post_install(spec, explicit):
# Push the package to all autopush mirrors # Push the package to all autopush mirrors
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values(): for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
bindist.push_or_raise( signing_key = bindist.select_signing_key() if mirror.signed else None
spec, bindist.push_or_raise([spec], out_url=mirror.push_url, signing_key=signing_key, force=True)
mirror.push_url,
bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
)
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'") tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")

View File

@ -6,7 +6,6 @@
import hashlib import hashlib
import json import json
import os import os
import time
import urllib.error import urllib.error
import urllib.parse import urllib.parse
import urllib.request import urllib.request
@ -43,11 +42,6 @@ def create_tarball(spec: spack.spec.Spec, tarfile_path):
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo) return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
def _log_upload_progress(digest: Digest, size: int, elapsed: float):
elapsed = max(elapsed, 0.001) # guard against division by zero
tty.info(f"Uploaded {digest} ({elapsed:.2f}s, {size / elapsed / 1024 / 1024:.2f} MB/s)")
def with_query_param(url: str, param: str, value: str) -> str: def with_query_param(url: str, param: str, value: str) -> str:
"""Add a query parameter to a URL """Add a query parameter to a URL
@ -141,8 +135,6 @@ def upload_blob(
if not force and blob_exists(ref, digest, _urlopen): if not force and blob_exists(ref, digest, _urlopen):
return False return False
start = time.time()
with open(file, "rb") as f: with open(file, "rb") as f:
file_size = os.fstat(f.fileno()).st_size file_size = os.fstat(f.fileno()).st_size
@ -167,7 +159,6 @@ def upload_blob(
# Created the blob in one go. # Created the blob in one go.
if response.status == 201: if response.status == 201:
_log_upload_progress(digest, file_size, time.time() - start)
return True return True
# Otherwise, do another PUT request. # Otherwise, do another PUT request.
@ -191,8 +182,6 @@ def upload_blob(
spack.oci.opener.ensure_status(request, response, 201) spack.oci.opener.ensure_status(request, response, 201)
# print elapsed time and # MB/s
_log_upload_progress(digest, file_size, time.time() - start)
return True return True

View File

@ -337,7 +337,7 @@ def test_relative_rpaths_install_nondefault(mirror_dir):
buildcache_cmd("install", "-uf", cspec.name) buildcache_cmd("install", "-uf", cspec.name)
def test_push_and_fetch_keys(mock_gnupghome): def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
testpath = str(mock_gnupghome) testpath = str(mock_gnupghome)
mirror = os.path.join(testpath, "mirror") mirror = os.path.join(testpath, "mirror")
@ -357,7 +357,7 @@ def test_push_and_fetch_keys(mock_gnupghome):
assert len(keys) == 1 assert len(keys) == 1
fpr = keys[0] fpr = keys[0]
bindist.push_keys(mirror, keys=[fpr], regenerate_index=True) bindist.push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True)
# dir 2: import the key from the mirror, and confirm that its fingerprint # dir 2: import the key from the mirror, and confirm that its fingerprint
# matches the one created above # matches the one created above
@ -464,7 +464,7 @@ def test_generate_index_missing(monkeypatch, tmpdir, mutable_config):
assert "libelf" not in cache_list assert "libelf" not in cache_list
def test_generate_key_index_failure(monkeypatch): def test_generate_key_index_failure(monkeypatch, tmp_path):
def list_url(url, recursive=False): def list_url(url, recursive=False):
if "fails-listing" in url: if "fails-listing" in url:
raise Exception("Couldn't list the directory") raise Exception("Couldn't list the directory")
@ -477,13 +477,13 @@ def push_to_url(*args, **kwargs):
monkeypatch.setattr(web_util, "push_to_url", push_to_url) monkeypatch.setattr(web_util, "push_to_url", push_to_url)
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"): with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
bindist.generate_key_index("s3://non-existent/fails-listing") bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path))
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"): with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
bindist.generate_key_index("s3://non-existent/fails-uploading") bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path))
def test_generate_package_index_failure(monkeypatch, capfd): def test_generate_package_index_failure(monkeypatch, tmp_path, capfd):
def mock_list_url(url, recursive=False): def mock_list_url(url, recursive=False):
raise Exception("Some HTTP error") raise Exception("Some HTTP error")
@ -492,15 +492,16 @@ def mock_list_url(url, recursive=False):
test_url = "file:///fake/keys/dir" test_url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match="Unable to generate package index"): with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
bindist.generate_package_index(test_url) bindist.generate_package_index(test_url, str(tmp_path))
assert ( assert (
f"Warning: Encountered problem listing packages at {test_url}: Some HTTP error" "Warning: Encountered problem listing packages at "
f"{test_url}/{bindist.BUILD_CACHE_RELATIVE_PATH}: Some HTTP error"
in capfd.readouterr().err in capfd.readouterr().err
) )
def test_generate_indices_exception(monkeypatch, capfd): def test_generate_indices_exception(monkeypatch, tmp_path, capfd):
def mock_list_url(url, recursive=False): def mock_list_url(url, recursive=False):
raise Exception("Test Exception handling") raise Exception("Test Exception handling")
@ -509,10 +510,10 @@ def mock_list_url(url, recursive=False):
url = "file:///fake/keys/dir" url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"): with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
bindist.generate_key_index(url) bindist.generate_key_index(url, str(tmp_path))
with pytest.raises(GenerateIndexError, match="Unable to generate package index"): with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
bindist.generate_package_index(url) bindist.generate_package_index(url, str(tmp_path))
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err

View File

@ -13,34 +13,34 @@
import spack.spec import spack.spec
import spack.util.url import spack.util.url
install = spack.main.SpackCommand("install")
pytestmark = pytest.mark.not_on_windows("does not run on windows") pytestmark = pytest.mark.not_on_windows("does not run on windows")
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmpdir): def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_path):
with tmpdir.as_cwd(): spec = spack.spec.Spec("trivial-install-test-package").concretized()
spec = spack.spec.Spec("trivial-install-test-package").concretized() spec.package.do_install(fake=True)
install(str(spec))
# Runs fine the first time, throws the second time specs = [spec]
out_url = spack.util.url.path_to_file_url(str(tmpdir))
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
with pytest.raises(bd.NoOverwriteException):
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
# Should work fine with force=True # Runs fine the first time, second time it's a no-op
bd.push_or_raise(spec, out_url, bd.PushOptions(force=True, unsigned=True)) out_url = spack.util.url.path_to_file_url(str(tmp_path))
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
assert not skipped
# Remove the tarball and try again. skipped = bd.push_or_raise(specs, out_url, signing_key=None)
# This must *also* throw, because of the existing .spec.json file assert skipped == specs
os.remove(
os.path.join(
bd.build_cache_prefix("."),
bd.tarball_directory_name(spec),
bd.tarball_name(spec, ".spack"),
)
)
with pytest.raises(bd.NoOverwriteException): # Should work fine with force=True
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True)) skipped = bd.push_or_raise(specs, out_url, signing_key=None, force=True)
assert not skipped
# Remove the tarball, which should cause push to push.
os.remove(
tmp_path
/ bd.BUILD_CACHE_RELATIVE_PATH
/ bd.tarball_directory_name(spec)
/ bd.tarball_name(spec, ".spack")
)
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
assert not skipped

View File

@ -286,7 +286,7 @@ def _fail(self, args):
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch): def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
"""Test that create_buildcache returns a list of objects with the correct """Test that create_buildcache returns a list of objects with the correct
keys and types.""" keys and types."""
monkeypatch.setattr(spack.ci, "_push_to_build_cache", lambda a, b, c: True) monkeypatch.setattr(ci, "push_to_build_cache", lambda a, b, c: True)
results = ci.create_buildcache( results = ci.create_buildcache(
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"] None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]

View File

@ -384,11 +384,14 @@ def test_correct_specs_are_pushed(
packages_to_push = [] packages_to_push = []
def fake_push(node, push_url, options): def fake_push(specs, *args, **kwargs):
assert isinstance(node, Spec) assert all(isinstance(s, Spec) for s in specs)
packages_to_push.append(node.name) packages_to_push.extend(s.name for s in specs)
skipped = []
errors = []
return skipped, errors
monkeypatch.setattr(spack.binary_distribution, "push_or_raise", fake_push) monkeypatch.setattr(spack.binary_distribution, "_push", fake_push)
buildcache_create_args = ["create", "--unsigned"] buildcache_create_args = ["create", "--unsigned"]

View File

@ -797,7 +797,7 @@ def test_ci_rebuild_mock_failure_to_push(
def mock_success(*args, **kwargs): def mock_success(*args, **kwargs):
return 0 return 0
monkeypatch.setattr(spack.ci, "process_command", mock_success) monkeypatch.setattr(ci, "process_command", mock_success)
# Mock failure to push to the build cache # Mock failure to push to the build cache
def mock_push_or_raise(*args, **kwargs): def mock_push_or_raise(*args, **kwargs):
@ -1256,15 +1256,15 @@ def test_push_to_build_cache(
def test_push_to_build_cache_exceptions(monkeypatch, tmp_path, capsys): def test_push_to_build_cache_exceptions(monkeypatch, tmp_path, capsys):
def _push_to_build_cache(spec, sign_binaries, mirror_url): def push_or_raise(*args, **kwargs):
raise Exception("Error: Access Denied") raise spack.binary_distribution.PushToBuildCacheError("Error: Access Denied")
monkeypatch.setattr(spack.ci, "_push_to_build_cache", _push_to_build_cache) monkeypatch.setattr(spack.binary_distribution, "push_or_raise", push_or_raise)
# Input doesn't matter, as we are faking exceptional output # Input doesn't matter, as we are faking exceptional output
url = tmp_path.as_uri() url = tmp_path.as_uri()
ci.push_to_build_cache(None, url, None) ci.push_to_build_cache(None, url, None)
assert f"Permission problem writing to {url}" in capsys.readouterr().err assert f"Problem writing to {url}: Error: Access Denied" in capsys.readouterr().err
@pytest.mark.parametrize("match_behavior", ["first", "merge"]) @pytest.mark.parametrize("match_behavior", ["first", "merge"])

View File

@ -612,9 +612,7 @@ def test_install_from_binary_with_missing_patch_succeeds(
# Push it to a binary cache # Push it to a binary cache
build_cache = tmp_path / "my_build_cache" build_cache = tmp_path / "my_build_cache"
binary_distribution.push_or_raise( binary_distribution.push_or_raise(
s, [s], out_url=build_cache.as_uri(), signing_key=None, force=False
build_cache.as_uri(),
binary_distribution.PushOptions(unsigned=True, regenerate_index=True),
) )
# Now re-install it. # Now re-install it.

View File

@ -15,6 +15,7 @@
import pytest import pytest
import spack.binary_distribution
import spack.cmd.buildcache import spack.cmd.buildcache
import spack.database import spack.database
import spack.environment as ev import spack.environment as ev
@ -294,8 +295,8 @@ def test_uploading_with_base_image_in_docker_image_manifest_v2_format(
def test_best_effort_upload(mutable_database: spack.database.Database, monkeypatch): def test_best_effort_upload(mutable_database: spack.database.Database, monkeypatch):
"""Failure to upload a blob or manifest should not prevent others from being uploaded""" """Failure to upload a blob or manifest should not prevent others from being uploaded"""
_push_blob = spack.cmd.buildcache._push_single_spack_binary_blob _push_blob = spack.binary_distribution._oci_push_pkg_blob
_push_manifest = spack.cmd.buildcache._put_manifest _push_manifest = spack.binary_distribution._oci_put_manifest
def push_blob(image_ref, spec, tmpdir): def push_blob(image_ref, spec, tmpdir):
# fail to upload the blob of mpich # fail to upload the blob of mpich
@ -311,8 +312,8 @@ def put_manifest(base_images, checksums, image_ref, tmpdir, extra_config, annota
base_images, checksums, image_ref, tmpdir, extra_config, annotations, *specs base_images, checksums, image_ref, tmpdir, extra_config, annotations, *specs
) )
monkeypatch.setattr(spack.cmd.buildcache, "_push_single_spack_binary_blob", push_blob) monkeypatch.setattr(spack.binary_distribution, "_oci_push_pkg_blob", push_blob)
monkeypatch.setattr(spack.cmd.buildcache, "_put_manifest", put_manifest) monkeypatch.setattr(spack.binary_distribution, "_oci_put_manifest", put_manifest)
registry = InMemoryOCIRegistry("example.com") registry = InMemoryOCIRegistry("example.com")
with oci_servers(registry): with oci_servers(registry):

View File

@ -7,6 +7,7 @@
import functools import functools
import os import os
import re import re
from typing import List
import llnl.util.filesystem import llnl.util.filesystem
@ -124,8 +125,8 @@ def gnupghome_override(dir):
SOCKET_DIR, GNUPGHOME = _SOCKET_DIR, _GNUPGHOME SOCKET_DIR, GNUPGHOME = _SOCKET_DIR, _GNUPGHOME
def _parse_secret_keys_output(output): def _parse_secret_keys_output(output: str) -> List[str]:
keys = [] keys: List[str] = []
found_sec = False found_sec = False
for line in output.split("\n"): for line in output.split("\n"):
if found_sec: if found_sec:
@ -195,9 +196,10 @@ def create(**kwargs):
@_autoinit @_autoinit
def signing_keys(*args): def signing_keys(*args) -> List[str]:
"""Return the keys that can be used to sign binaries.""" """Return the keys that can be used to sign binaries."""
output = GPG("--list-secret-keys", "--with-colons", "--fingerprint", *args, output=str) assert GPG
output: str = GPG("--list-secret-keys", "--with-colons", "--fingerprint", *args, output=str)
return _parse_secret_keys_output(output) return _parse_secret_keys_output(output)

View File

@ -1272,7 +1272,7 @@ _spack_gpg_export() {
_spack_gpg_publish() { _spack_gpg_publish() {
if $list_options if $list_options
then then
SPACK_COMPREPLY="-h --help -d --directory -m --mirror-name --mirror-url --rebuild-index" SPACK_COMPREPLY="-h --help -d --directory -m --mirror-name --mirror-url --update-index --rebuild-index"
else else
_keys _keys
fi fi

View File

@ -1908,7 +1908,7 @@ complete -c spack -n '__fish_spack_using_command gpg export' -l secret -f -a sec
complete -c spack -n '__fish_spack_using_command gpg export' -l secret -d 'export secret keys' complete -c spack -n '__fish_spack_using_command gpg export' -l secret -d 'export secret keys'
# spack gpg publish # spack gpg publish
set -g __fish_spack_optspecs_spack_gpg_publish h/help d/directory= m/mirror-name= mirror-url= rebuild-index set -g __fish_spack_optspecs_spack_gpg_publish h/help d/directory= m/mirror-name= mirror-url= update-index
complete -c spack -n '__fish_spack_using_command_pos_remainder 0 gpg publish' -f -a '(__fish_spack_gpg_keys)' complete -c spack -n '__fish_spack_using_command_pos_remainder 0 gpg publish' -f -a '(__fish_spack_gpg_keys)'
complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -f -a help
complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -d 'show this help message and exit'
@ -1918,8 +1918,8 @@ complete -c spack -n '__fish_spack_using_command gpg publish' -s m -l mirror-nam
complete -c spack -n '__fish_spack_using_command gpg publish' -s m -l mirror-name -r -d 'name of the mirror where keys will be published' complete -c spack -n '__fish_spack_using_command gpg publish' -s m -l mirror-name -r -d 'name of the mirror where keys will be published'
complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -f -a mirror_url complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -f -a mirror_url
complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -d 'URL of the mirror where keys will be published' complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -d 'URL of the mirror where keys will be published'
complete -c spack -n '__fish_spack_using_command gpg publish' -l rebuild-index -f -a rebuild_index complete -c spack -n '__fish_spack_using_command gpg publish' -l update-index -l rebuild-index -f -a update_index
complete -c spack -n '__fish_spack_using_command gpg publish' -l rebuild-index -d 'regenerate buildcache key index after publishing key(s)' complete -c spack -n '__fish_spack_using_command gpg publish' -l update-index -l rebuild-index -d 'regenerate buildcache key index after publishing key(s)'
# spack graph # spack graph
set -g __fish_spack_optspecs_spack_graph h/help a/ascii d/dot s/static c/color i/installed deptype= set -g __fish_spack_optspecs_spack_graph h/help a/ascii d/dot s/static c/color i/installed deptype=