mirror of
https://github.com/jupyterhub/the-littlest-jupyterhub.git
synced 2025-12-18 21:54:05 +08:00
74
.github/integration-test.py
vendored
74
.github/integration-test.py
vendored
@@ -1,19 +1,68 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import argparse
|
import argparse
|
||||||
|
from shutil import which
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import time
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def container_runtime():
|
||||||
|
runtimes = ["docker", "podman"]
|
||||||
|
for runtime in runtimes:
|
||||||
|
if which(runtime):
|
||||||
|
return runtime
|
||||||
|
raise RuntimeError(f"No container runtime found, tried: {' '.join(runtimes)}")
|
||||||
|
|
||||||
|
|
||||||
|
def container_check_output(*args, **kwargs):
|
||||||
|
cmd = [container_runtime()] + list(*args)
|
||||||
|
print(f"Running {cmd} {kwargs}")
|
||||||
|
return subprocess.check_output(cmd, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def container_run(*args, **kwargs):
|
||||||
|
cmd = [container_runtime()] + list(*args)
|
||||||
|
print(f"Running {cmd} {kwargs}")
|
||||||
|
return subprocess.run(cmd, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def build_systemd_image(image_name, source_path, build_args=None):
|
def build_systemd_image(image_name, source_path, build_args=None):
|
||||||
"""
|
"""
|
||||||
Build docker image with systemd at source_path.
|
Build docker image with systemd at source_path.
|
||||||
|
|
||||||
Built image is tagged with image_name
|
Built image is tagged with image_name
|
||||||
"""
|
"""
|
||||||
cmd = ["docker", "build", f"-t={image_name}", source_path]
|
cmd = ["build", f"-t={image_name}", source_path]
|
||||||
if build_args:
|
if build_args:
|
||||||
cmd.extend([f"--build-arg={ba}" for ba in build_args])
|
cmd.extend([f"--build-arg={ba}" for ba in build_args])
|
||||||
subprocess.check_call(cmd)
|
container_check_output(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def check_container_ready(container_name, timeout=60):
|
||||||
|
"""
|
||||||
|
Check if container is ready to run tests
|
||||||
|
"""
|
||||||
|
now = time.time()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
out = container_check_output(["exec", "-t", container_name, "id"])
|
||||||
|
print(out.decode())
|
||||||
|
return
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(e)
|
||||||
|
try:
|
||||||
|
out = container_check_output(["inspect", container_name])
|
||||||
|
print(out.decode())
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(e)
|
||||||
|
try:
|
||||||
|
out = container_check_output(["logs", container_name])
|
||||||
|
print(out.decode())
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(e)
|
||||||
|
if time.time() - now > timeout:
|
||||||
|
raise RuntimeError(f"Container {container_name} hasn't started")
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
|
||||||
def run_systemd_image(image_name, container_name, bootstrap_pip_spec):
|
def run_systemd_image(image_name, container_name, bootstrap_pip_spec):
|
||||||
@@ -25,10 +74,8 @@ def run_systemd_image(image_name, container_name, bootstrap_pip_spec):
|
|||||||
Container named container_name will be started.
|
Container named container_name will be started.
|
||||||
"""
|
"""
|
||||||
cmd = [
|
cmd = [
|
||||||
"docker",
|
|
||||||
"run",
|
"run",
|
||||||
"--privileged",
|
"--privileged",
|
||||||
"--mount=type=bind,source=/sys/fs/cgroup,target=/sys/fs/cgroup",
|
|
||||||
"--detach",
|
"--detach",
|
||||||
f"--name={container_name}",
|
f"--name={container_name}",
|
||||||
# A bit less than 1GB to ensure TLJH runs on 1GB VMs.
|
# A bit less than 1GB to ensure TLJH runs on 1GB VMs.
|
||||||
@@ -42,7 +89,7 @@ def run_systemd_image(image_name, container_name, bootstrap_pip_spec):
|
|||||||
|
|
||||||
cmd.append(image_name)
|
cmd.append(image_name)
|
||||||
|
|
||||||
subprocess.check_call(cmd)
|
container_check_output(cmd)
|
||||||
|
|
||||||
|
|
||||||
def stop_container(container_name):
|
def stop_container(container_name):
|
||||||
@@ -50,21 +97,20 @@ def stop_container(container_name):
|
|||||||
Stop & remove docker container if it exists.
|
Stop & remove docker container if it exists.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(
|
container_check_output(["inspect", container_name], stderr=subprocess.STDOUT)
|
||||||
["docker", "inspect", container_name], stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
# No such container exists, nothing to do
|
# No such container exists, nothing to do
|
||||||
return
|
return
|
||||||
subprocess.check_call(["docker", "rm", "-f", container_name])
|
container_check_output(["rm", "-f", container_name])
|
||||||
|
|
||||||
|
|
||||||
def run_container_command(container_name, cmd):
|
def run_container_command(container_name, cmd):
|
||||||
"""
|
"""
|
||||||
Run cmd in a running container with a bash shell
|
Run cmd in a running container with a bash shell
|
||||||
"""
|
"""
|
||||||
proc = subprocess.run(
|
proc = container_run(
|
||||||
["docker", "exec", "-t", container_name, "/bin/bash", "-c", cmd], check=True
|
["exec", "-t", container_name, "/bin/bash", "-c", cmd],
|
||||||
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -72,7 +118,7 @@ def copy_to_container(container_name, src_path, dest_path):
|
|||||||
"""
|
"""
|
||||||
Copy files from src_path to dest_path inside container_name
|
Copy files from src_path to dest_path inside container_name
|
||||||
"""
|
"""
|
||||||
subprocess.check_call(["docker", "cp", src_path, f"{container_name}:{dest_path}"])
|
container_check_output(["cp", src_path, f"{container_name}:{dest_path}"])
|
||||||
|
|
||||||
|
|
||||||
def run_test(
|
def run_test(
|
||||||
@@ -84,6 +130,8 @@ def run_test(
|
|||||||
stop_container(test_name)
|
stop_container(test_name)
|
||||||
run_systemd_image(image_name, test_name, bootstrap_pip_spec)
|
run_systemd_image(image_name, test_name, bootstrap_pip_spec)
|
||||||
|
|
||||||
|
check_container_ready(test_name)
|
||||||
|
|
||||||
source_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
|
source_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
|
||||||
|
|
||||||
copy_to_container(test_name, os.path.join(source_path, "bootstrap/."), "/srv/src")
|
copy_to_container(test_name, os.path.join(source_path, "bootstrap/."), "/srv/src")
|
||||||
@@ -93,7 +141,7 @@ def run_test(
|
|||||||
|
|
||||||
# These logs can be very relevant to debug a container startup failure
|
# These logs can be very relevant to debug a container startup failure
|
||||||
print(f"--- Start of logs from the container: {test_name}")
|
print(f"--- Start of logs from the container: {test_name}")
|
||||||
print(subprocess.check_output(["docker", "logs", test_name]).decode())
|
print(container_check_output(["logs", test_name]).decode())
|
||||||
print(f"--- End of logs from the container: {test_name}")
|
print(f"--- End of logs from the container: {test_name}")
|
||||||
|
|
||||||
# Install TLJH from the default branch first to test upgrades
|
# Install TLJH from the default branch first to test upgrades
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
pydata-sphinx-theme
|
pydata-sphinx-theme
|
||||||
sphinx>=4
|
# Sphix 6.0.0 breaks pydata-sphinx-theme
|
||||||
|
# See pydata/pydata-sphinx-theme#1094
|
||||||
|
sphinx<6
|
||||||
sphinx_copybutton
|
sphinx_copybutton
|
||||||
sphinx-autobuild
|
sphinx-autobuild
|
||||||
sphinxext-opengraph
|
sphinxext-opengraph
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ def setgroup(group):
|
|||||||
gid = grp.getgrnam(group).gr_gid
|
gid = grp.getgrnam(group).gr_gid
|
||||||
uid = pwd.getpwnam("nobody").pw_uid
|
uid = pwd.getpwnam("nobody").pw_uid
|
||||||
os.setgid(gid)
|
os.setgid(gid)
|
||||||
|
os.setgroups([])
|
||||||
os.setuid(uid)
|
os.setuid(uid)
|
||||||
os.environ["HOME"] = "/tmp/test-home-%i-%i" % (uid, gid)
|
os.environ["HOME"] = "/tmp/test-home-%i-%i" % (uid, gid)
|
||||||
|
|
||||||
@@ -45,6 +46,10 @@ def test_groups_exist(group):
|
|||||||
grp.getgrnam(group)
|
grp.getgrnam(group)
|
||||||
|
|
||||||
|
|
||||||
|
def debug_uid_gid():
|
||||||
|
return subprocess.check_output("id").decode()
|
||||||
|
|
||||||
|
|
||||||
def permissions_test(group, path, *, readable=None, writable=None, dirs_only=False):
|
def permissions_test(group, path, *, readable=None, writable=None, dirs_only=False):
|
||||||
"""Run a permissions test on all files in a path path"""
|
"""Run a permissions test on all files in a path path"""
|
||||||
# start a subprocess and become nobody:group in the process
|
# start a subprocess and become nobody:group in the process
|
||||||
@@ -88,18 +93,22 @@ def permissions_test(group, path, *, readable=None, writable=None, dirs_only=Fal
|
|||||||
# check if the path should be writable
|
# check if the path should be writable
|
||||||
if writable is not None:
|
if writable is not None:
|
||||||
if access(path, os.W_OK) != writable:
|
if access(path, os.W_OK) != writable:
|
||||||
|
stat = os.stat(path)
|
||||||
|
info = pool.submit(debug_uid_gid).result()
|
||||||
failures.append(
|
failures.append(
|
||||||
"{} {} should {}be writable by {}".format(
|
"{} {} should {}be writable by {} [{}]".format(
|
||||||
stat_str, path, "" if writable else "not ", group
|
stat_str, path, "" if writable else "not ", group, info
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# check if the path should be readable
|
# check if the path should be readable
|
||||||
if readable is not None:
|
if readable is not None:
|
||||||
if access(path, os.R_OK) != readable:
|
if access(path, os.R_OK) != readable:
|
||||||
|
stat = os.stat(path)
|
||||||
|
info = pool.submit(debug_uid_gid).result()
|
||||||
failures.append(
|
failures.append(
|
||||||
"{} {} should {}be readable by {}".format(
|
"{} {} should {}be readable by {} [{}]".format(
|
||||||
stat_str, path, "" if readable else "not ", group
|
stat_str, path, "" if readable else "not ", group, info
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
# verify that we actually tested some files
|
# verify that we actually tested some files
|
||||||
|
|||||||
Reference in New Issue
Block a user