Merge pull request #916 from consideRatio/pr/refactor-tests

maint: refactor tests, fix upgrade tests (now correctly failing)
This commit is contained in:
Min RK
2023-06-09 12:43:48 +02:00
committed by GitHub
15 changed files with 476 additions and 600 deletions

View File

@@ -1,154 +1,161 @@
#!/usr/bin/env python3
import argparse
import functools
import os
import subprocess
import time
from shutil import which
GIT_REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
TEST_IMAGE_NAME = "test-systemd"
def container_runtime():
@functools.lru_cache()
def _get_container_runtime_cli():
runtimes = ["docker", "podman"]
for runtime in runtimes:
if which(runtime):
return runtime
raise RuntimeError(f"No container runtime found, tried: {' '.join(runtimes)}")
raise RuntimeError(f"No container runtime CLI found, tried: {' '.join(runtimes)}")
def container_check_output(*args, **kwargs):
cmd = [container_runtime()] + list(*args)
print(f"Running {cmd} {kwargs}")
return subprocess.check_output(cmd, **kwargs)
def _cli(args, log_failure=True):
cmd = [_get_container_runtime_cli(), *args]
try:
return subprocess.check_output(cmd, text=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
if log_failure:
print(f"{cmd} failed!", flush=True)
raise
def container_run(*args, **kwargs):
cmd = [container_runtime()] + list(*args)
print(f"Running {cmd} {kwargs}")
return subprocess.run(cmd, **kwargs)
def build_systemd_image(image_name, source_path, build_args=None):
def _await_container_startup(container_name, timeout=60):
"""
Build docker image with systemd at source_path.
Built image is tagged with image_name
Await container to become ready, as checked by attempting to run a basic
command (id) inside it.
"""
cmd = ["build", f"-t={image_name}", source_path]
if build_args:
cmd.extend([f"--build-arg={ba}" for ba in build_args])
container_check_output(cmd)
def check_container_ready(container_name, timeout=60):
"""
Check if container is ready to run tests
"""
now = time.time()
start = time.time()
while True:
try:
out = container_check_output(["exec", "-t", container_name, "id"])
print(out.decode())
_cli(["exec", "-t", container_name, "id"], log_failure=False)
return
except subprocess.CalledProcessError as e:
print(e)
try:
out = container_check_output(["inspect", container_name])
print(out.decode())
except subprocess.CalledProcessError as e:
print(e)
try:
out = container_check_output(["logs", container_name])
print(out.decode())
except subprocess.CalledProcessError as e:
print(e)
if time.time() - now > timeout:
raise RuntimeError(f"Container {container_name} hasn't started")
time.sleep(5)
except subprocess.CalledProcessError:
if time.time() - start > timeout:
inspect = ""
logs = ""
try:
inspect = _cli(["inspect", container_name], log_failure=False)
except subprocess.CalledProcessError as e:
inspect = e.output
try:
logs = _cli(["logs", container_name], log_failure=False)
except subprocess.CalledProcessError as e:
logs = e.output
raise RuntimeError(
f"Container {container_name} failed to start! Debugging info follows...\n\n"
f"> docker inspect {container_name}\n"
"----------------------------------------\n"
f"{inspect}\n"
f"> docker logs {container_name}\n"
"----------------------------------------\n"
f"{logs}\n"
)
time.sleep(1)
def run_systemd_image(image_name, container_name, bootstrap_pip_spec):
def build_image(build_args=None):
"""
Run docker image with systemd
Build Dockerfile with systemd in the integration-tests folder to run tests
from.
"""
cmd = [
_get_container_runtime_cli(),
"build",
f"--tag={TEST_IMAGE_NAME}",
"integration-tests",
]
if build_args:
cmd.extend([f"--build-arg={ba}" for ba in build_args])
Image named image_name should be built with build_systemd_image.
subprocess.run(cmd, check=True, text=True)
Container named container_name will be started.
def start_container(container_name, bootstrap_pip_spec):
"""
Starts a container based on an image expected to start systemd.
"""
cmd = [
"run",
"--privileged",
"--rm",
"--detach",
"--privileged",
f"--name={container_name}",
# A bit less than 1GB to ensure TLJH runs on 1GB VMs.
# If this is changed all docs references to the required memory must be changed too.
"--memory=900m",
]
if bootstrap_pip_spec:
cmd.append("-e")
cmd.append(f"TLJH_BOOTSTRAP_PIP_SPEC={bootstrap_pip_spec}")
cmd.append(f"--env=TLJH_BOOTSTRAP_PIP_SPEC={bootstrap_pip_spec}")
else:
cmd.append("--env=TLJH_BOOTSTRAP_DEV=yes")
cmd.append("--env=TLJH_BOOTSTRAP_PIP_SPEC=/srv/src")
cmd.append(TEST_IMAGE_NAME)
cmd.append(image_name)
container_check_output(cmd)
return _cli(cmd)
def stop_container(container_name):
"""
Stop & remove docker container if it exists.
Stop and remove docker container if it exists.
"""
try:
container_check_output(["inspect", container_name], stderr=subprocess.STDOUT)
return _cli(["rm", "--force", container_name], log_failure=False)
except subprocess.CalledProcessError:
# No such container exists, nothing to do
return
container_check_output(["rm", "-f", container_name])
pass
def run_container_command(container_name, cmd):
def run_command(container_name, command):
"""
Run cmd in a running container with a bash shell
Run a bash command in a running container and error if it fails
"""
proc = container_run(
["exec", "-t", container_name, "/bin/bash", "-c", cmd],
check=True,
)
cmd = [
_get_container_runtime_cli(),
"exec",
"-t",
container_name,
"/bin/bash",
"-c",
command,
]
print(f"\nRunning: {cmd}\n----------------------------------------", flush=True)
subprocess.run(cmd, check=True, text=True)
def copy_to_container(container_name, src_path, dest_path):
"""
Copy files from src_path to dest_path inside container_name
Copy files from a path on the local file system to a destination in a
running container
"""
container_check_output(["cp", src_path, f"{container_name}:{dest_path}"])
_cli(["cp", src_path, f"{container_name}:{dest_path}"])
def run_test(
image_name,
test_name,
container_name,
bootstrap_pip_spec,
test_files,
upgrade_from,
installer_args,
):
"""
Starts a new container based on image_name, runs the bootstrap script to
setup tljh with installer_args, and runs test_name.
(Re-)starts a named container with given (Systemd based) image, then runs
the bootstrap script inside it to setup tljh with installer_args.
Thereafter, source files are copied to the container and
"""
stop_container(test_name)
run_systemd_image(image_name, test_name, bootstrap_pip_spec)
check_container_ready(test_name)
source_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
copy_to_container(test_name, os.path.join(source_path, "bootstrap/."), "/srv/src")
copy_to_container(
test_name, os.path.join(source_path, "integration-tests/"), "/srv/src"
)
# These logs can be very relevant to debug a container startup failure
print(f"--- Start of logs from the container: {test_name}")
print(container_check_output(["logs", test_name]).decode())
print(f"--- End of logs from the container: {test_name}")
stop_container(container_name)
start_container(container_name, bootstrap_pip_spec)
_await_container_startup(container_name)
copy_to_container(container_name, GIT_REPO_PATH, "/srv/src")
# To test upgrades, we run a bootstrap.py script two times instead of one,
# where the initial run first installs some older version.
@@ -156,56 +163,37 @@ def run_test(
# We want to support testing a PR by upgrading from "main", "latest" (latest
# released version), and from a previous major-like version.
#
# FIXME: We currently always rely on the main branch's bootstrap.py script.
# Realistically, we should run previous versions of the bootstrap
# script which also installs previous versions of TLJH.
#
# 2023-04-15 Erik observed that https://tljh.jupyter.org/bootstrap.py
# is referencing to the master (now main) branch which didn't seem
# obvious, thinking it could have been the latest released version
# also.
#
if upgrade_from:
run_container_command(
test_name,
f"curl -L https://tljh.jupyter.org/bootstrap.py | python3 - --version={upgrade_from}",
)
run_container_command(test_name, f"python3 /srv/src/bootstrap.py {installer_args}")
command = f"python3 /srv/src/bootstrap/bootstrap.py --version={upgrade_from}"
run_command(container_name, command)
command = f"python3 /srv/src/bootstrap/bootstrap.py {' '.join(installer_args)}"
run_command(container_name, command)
# Install pkgs from requirements in hub's pip, where
# the bootstrap script installed the others
run_container_command(
test_name,
"/opt/tljh/hub/bin/python3 -m pip install -r /srv/src/integration-tests/requirements.txt",
)
command = "/opt/tljh/hub/bin/python3 -m pip install -r /srv/src/integration-tests/requirements.txt"
run_command(container_name, command)
# show environment
run_container_command(
test_name,
"/opt/tljh/hub/bin/python3 -m pip freeze",
)
command = "/opt/tljh/hub/bin/python3 -m pip freeze"
run_command(container_name, command)
run_container_command(
test_name,
# We abort pytest after two failures as a compromise between wanting to
# avoid a flood of logs while still understanding if multiple tests
# would fail.
"/opt/tljh/hub/bin/python3 -m pytest --verbose --maxfail=2 --color=yes --durations=10 --capture=no {}".format(
" ".join(
[os.path.join("/srv/src/integration-tests/", f) for f in test_files]
)
),
)
# run tests
test_files = " ".join([f"/srv/src/integration-tests/{f}" for f in test_files])
command = f"/opt/tljh/hub/bin/python3 -m pytest {test_files}"
run_command(container_name, command)
def show_logs(container_name):
"""
Print logs from inside container to stdout
Print jupyterhub and traefik status and logs from both.
tljh logs ref: https://tljh.jupyter.org/en/latest/troubleshooting/logs.html
"""
run_container_command(container_name, "journalctl --no-pager")
run_container_command(
container_name, "systemctl --no-pager status jupyterhub traefik"
)
run_command(container_name, "systemctl --no-pager status jupyterhub traefik")
run_command(container_name, "journalctl --no-pager -u jupyterhub")
run_command(container_name, "journalctl --no-pager -u traefik")
def main():
@@ -213,18 +201,14 @@ def main():
subparsers = argparser.add_subparsers(dest="action")
build_image_parser = subparsers.add_parser("build-image")
build_image_parser.add_argument(
"--build-arg",
action="append",
dest="build_args",
)
stop_container_parser = subparsers.add_parser("stop-container")
stop_container_parser.add_argument("container_name")
build_image_parser.add_argument("--build-arg", action="append", dest="build_args")
start_container_parser = subparsers.add_parser("start-container")
start_container_parser.add_argument("container_name")
stop_container_parser = subparsers.add_parser("stop-container")
stop_container_parser.add_argument("container_name")
run_parser = subparsers.add_parser("run")
run_parser.add_argument("container_name")
run_parser.add_argument("command")
@@ -235,12 +219,10 @@ def main():
copy_parser.add_argument("dest")
run_test_parser = subparsers.add_parser("run-test")
run_test_parser.add_argument("--installer-args", default="")
run_test_parser.add_argument("--installer-args", action="append")
run_test_parser.add_argument("--upgrade-from", default="")
run_test_parser.add_argument(
"--bootstrap-pip-spec", nargs="?", default="", type=str
)
run_test_parser.add_argument("test_name")
run_test_parser.add_argument("--bootstrap-pip-spec", default="/srv/src")
run_test_parser.add_argument("container_name")
run_test_parser.add_argument("test_files", nargs="+")
show_logs_parser = subparsers.add_parser("show-logs")
@@ -248,12 +230,19 @@ def main():
args = argparser.parse_args()
image_name = "tljh-systemd"
if args.action == "run-test":
if args.action == "build-image":
build_image(args.build_args)
elif args.action == "start-container":
start_container(args.container_name, args.bootstrap_pip_spec)
elif args.action == "stop-container":
stop_container(args.container_name)
elif args.action == "run":
run_command(args.container_name, args.command)
elif args.action == "copy":
copy_to_container(args.container_name, args.src, args.dest)
elif args.action == "run-test":
run_test(
image_name,
args.test_name,
args.container_name,
args.bootstrap_pip_spec,
args.test_files,
args.upgrade_from,
@@ -261,16 +250,6 @@ def main():
)
elif args.action == "show-logs":
show_logs(args.container_name)
elif args.action == "run":
run_container_command(args.container_name, args.command)
elif args.action == "copy":
copy_to_container(args.container_name, args.src, args.dest)
elif args.action == "start-container":
run_systemd_image(image_name, args.container_name, args.bootstrap_pip_spec)
elif args.action == "stop-container":
stop_container(args.container_name)
elif args.action == "build-image":
build_systemd_image(image_name, "integration-tests", args.build_args)
if __name__ == "__main__":

View File

@@ -59,17 +59,20 @@ jobs:
with:
python-version: "3.10"
- name: Install pytest
run: python3 -m pip install pytest
# FIXME: The test_bootstrap.py script has duplicated logic to run build
# and start images and run things in them. This makes tests slower,
# and adds code to maintain. Let's try to remove it.
#
# - bootstrap.py's failure detections, put in unit tests?
# - bootstrap.py's --show-progress-page test, include as integration test?
#
- name: Install integration-tests/requirements.txt for test_bootstrap.py
run: pip install -r integration-tests/requirements.txt
# We abort pytest after two failures as a compromise between wanting to
# avoid a flood of logs while still understanding if multiple tests would
# fail.
- name: Run bootstrap tests (Runs in/Builds ${{ matrix.distro_image }} derived image)
run: |
pytest --verbose --maxfail=2 --color=yes --durations=10 --capture=no \
integration-tests/test_bootstrap.py
timeout-minutes: 20
pytest integration-tests/test_bootstrap.py
timeout-minutes: 10
env:
# integration-tests/test_bootstrap.py will build and start containers
# based on this environment variable. This is similar to how
@@ -77,64 +80,45 @@ jobs:
# setting the base image via a Dockerfile ARG.
BASE_IMAGE: ${{ matrix.distro_image }}
# We build a docker image from wherein we will work
- name: Build systemd image (Builds ${{ matrix.distro_image }} derived image)
- name: Build systemd image, derived from ${{ matrix.distro_image }}
run: |
.github/integration-test.py build-image \
--build-arg "BASE_IMAGE=${{ matrix.distro_image }}"
# FIXME: Make the logic below easier to follow.
# - In short, setting BOOTSTRAP_PIP_SPEC here, specifies from what
# location the tljh python package should be installed from. In this
# GitHub Workflow's test job, we provide a remote reference to itself as
# found on GitHub - this could be the HEAD of a PR branch or the default
# branch on merge.
#
# Overview of how this logic influences the end result.
# - integration-test.yaml:
# Runs integration-test.py by passing --bootstrap-pip-spec flag with a
# reference to the pull request on GitHub.
# - integration-test.py:
# Starts a pre-build systemd container, setting the
# TLJH_BOOTSTRAP_PIP_SPEC based on its passed --bootstrap-pip-spec value.
# - systemd container:
# Runs bootstrap.py
# - bootstrap.py
# Makes use of TLJH_BOOTSTRAP_PIP_SPEC environment variable to install
# the tljh package from a given location, which could be a local git
# clone of this repo where setup.py resides, or a reference to some
# GitHub branch for example.
- name: Set BOOTSTRAP_PIP_SPEC value
#
# - Runs integration-test.py build-image, to build a systemd based image
# to use later.
#
# - Runs integration-test.py run-tests, to start a systemd based
# container, run the bootstrap.py script inside it, and then run
# pytest from the hub python environment setup by the bootstrap
# script.
#
# About passed --installer-args:
#
# - --admin admin:admin
# Required for test_admin_installer.py
#
# - --plugin /srv/src/integration-tests/plugins/simplest
# Required for test_simplest_plugin.py
#
- name: pytest integration-tests/
id: integration-tests
run: |
BOOTSTRAP_PIP_SPEC="git+https://github.com/$GITHUB_REPOSITORY.git@$GITHUB_REF"
echo "BOOTSTRAP_PIP_SPEC=$BOOTSTRAP_PIP_SPEC" >> $GITHUB_ENV
echo $BOOTSTRAP_PIP_SPEC
- name: Run basic tests (Runs in ${{ matrix.distro_image }} derived image)
run: |
.github/integration-test.py run-test basic-tests \
--bootstrap-pip-spec "$BOOTSTRAP_PIP_SPEC" \
.github/integration-test.py run-test integration-tests \
--installer-args "--admin test-admin-username:test-admin-password" \
--installer-args "--plugin /srv/src/integration-tests/plugins/simplest" \
${{ matrix.extra_flags }} \
test_hub.py \
test_proxy.py \
test_install.py \
test_extensions.py
timeout-minutes: 15
- name: Run admin tests (Runs in ${{ matrix.distro_image }} derived image)
run: |
.github/integration-test.py run-test admin-tests \
--installer-args "--admin admin:admin" \
--bootstrap-pip-spec "$BOOTSTRAP_PIP_SPEC" \
${{ matrix.extra_flags }} \
test_admin_installer.py
timeout-minutes: 15
- name: Run plugin tests (Runs in ${{ matrix.distro_image }} derived image)
run: |
.github/integration-test.py run-test plugin-tests \
--bootstrap-pip-spec "$BOOTSTRAP_PIP_SPEC" \
--installer-args "--plugin /srv/src/integration-tests/plugins/simplest" \
${{ matrix.extra_flags }} \
test_extensions.py \
test_admin_installer.py \
test_simplest_plugin.py
timeout-minutes: 15
- name: show logs
if: always() && steps.integration-tests.outcome != 'skipped'
run: |
.github/integration-test.py show-logs integration-tests

View File

@@ -82,15 +82,15 @@ jobs:
- name: Install Python dependencies
run: |
python3 -m pip install -r dev-requirements.txt
python3 -m pip install -e .
pip install -r dev-requirements.txt
pip install -e .
- name: List Python dependencies
run: |
pip freeze
# We abort pytest after two failures as a compromise between wanting to
# avoid a flood of logs while still understanding if multiple tests would
# fail.
- name: Run unit tests
run: pytest --verbose --maxfail=2 --color=yes --durations=10 --cov=tljh tests/
run: pytest tests
timeout-minutes: 15
- uses: codecov/codecov-action@v3

View File

@@ -36,7 +36,7 @@ Command line flags, from "bootstrap.py --help":
logs can be accessed during installation. If this is
passed, it will pass --progress-page-server-pid=<pid>
to the tljh installer for later termination.
--version TLJH version or Git reference. Default 'latest' is
--version VERSION TLJH version or Git reference. Default 'latest' is
the most recent release. Partial versions can be
specified, for example '1', '1.0' or '1.0.0'. You
can also pass a branch name such as 'main' or a
@@ -183,32 +183,32 @@ def run_subprocess(cmd, *args, **kwargs):
return output
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return (
subprocess.check_output(
[
"/bin/bash",
"-c",
"source /etc/os-release && echo ${{{key}}}".format(key=key),
]
)
.decode()
.strip()
)
def ensure_host_system_can_install_tljh():
"""
Check if TLJH is installable in current host system and exit with a clear
error message otherwise.
"""
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return (
subprocess.check_output(
[
"/bin/bash",
"-c",
"source /etc/os-release && echo ${{{key}}}".format(key=key),
]
)
.decode()
.strip()
)
# Require Ubuntu 20.04+ or Debian 11+
distro = get_os_release_variable("ID")
version = get_os_release_variable("VERSION_ID")
@@ -364,7 +364,7 @@ def main():
)
parser.add_argument(
"--version",
default="latest",
default="",
help=(
"TLJH version or Git reference. "
"Default 'latest' is the most recent release. "
@@ -478,21 +478,26 @@ def main():
logger.info("Upgrading pip...")
run_subprocess([hub_env_pip, "install", "--upgrade", "pip"])
# Install/upgrade TLJH installer
# pip install TLJH installer based on
#
# 1. --version, _resolve_git_version is used
# 2. TLJH_BOOTSTRAP_PIP_SPEC (then also respect TLJH_BOOTSTRAP_DEV)
# 3. latest, _resolve_git_version is used
#
tljh_install_cmd = [hub_env_pip, "install", "--upgrade"]
if os.environ.get("TLJH_BOOTSTRAP_DEV", "no") == "yes":
logger.info("Selected TLJH_BOOTSTRAP_DEV=yes...")
tljh_install_cmd.append("--editable")
bootstrap_pip_spec = os.environ.get("TLJH_BOOTSTRAP_PIP_SPEC")
if not bootstrap_pip_spec:
if args.version or not bootstrap_pip_spec:
version_to_resolve = args.version or "latest"
bootstrap_pip_spec = (
"git+https://github.com/jupyterhub/the-littlest-jupyterhub.git@{}".format(
_resolve_git_version(args.version)
_resolve_git_version(version_to_resolve)
)
)
elif os.environ.get("TLJH_BOOTSTRAP_DEV", "no") == "yes":
logger.info("Selected TLJH_BOOTSTRAP_DEV=yes...")
tljh_install_cmd.append("--editable")
tljh_install_cmd.append(bootstrap_pip_spec)
if initial_setup:
logger.info("Installing TLJH installer...")
else:

View File

@@ -1,4 +1,5 @@
packaging
pytest
pytest-cov
pytest-asyncio
pytest-mock

View File

@@ -24,8 +24,6 @@ RUN find /etc/systemd/system \
-not -name '*systemd-user-sessions*' \
-exec rm \{} \;
RUN mkdir -p /etc/sudoers.d
RUN systemctl set-default multi-user.target
STOPSIGNAL SIGRTMIN+3

View File

@@ -1,55 +1,47 @@
"""
Simplest plugin that exercises all the hooks
Simplest plugin that exercises all the hooks defined in tljh/hooks.py.
"""
from tljh.hooks import hookimpl
@hookimpl
def tljh_extra_user_conda_packages():
return [
"hypothesis",
]
return ["tqdm"]
@hookimpl
def tljh_extra_user_pip_packages():
return [
"django",
]
return ["django"]
@hookimpl
def tljh_extra_hub_pip_packages():
return [
"there",
]
return ["there"]
@hookimpl
def tljh_extra_apt_packages():
return [
"sl",
]
@hookimpl
def tljh_config_post_install(config):
# Put an arbitrary marker we can test for
config["simplest_plugin"] = {"present": True}
return ["sl"]
@hookimpl
def tljh_custom_jupyterhub_config(c):
c.JupyterHub.authenticator_class = "tmpauthenticator.TmpAuthenticator"
c.Test.jupyterhub_config_set_by_simplest_plugin = True
@hookimpl
def tljh_config_post_install(config):
config["Test"] = {"tljh_config_set_by_simplest_plugin": True}
@hookimpl
def tljh_post_install():
with open("test_post_install", "w") as f:
f.write("123456789")
with open("test_tljh_post_install", "w") as f:
f.write("file_written_by_simplest_plugin")
@hookimpl
def tljh_new_user_create(username):
with open("test_new_user_create", "w") as f:
f.write("file_written_by_simplest_plugin")
f.write(username)

View File

@@ -1,3 +1,4 @@
pytest
pytest-cov
pytest-asyncio
git+https://github.com/yuvipanda/hubtraf.git

View File

@@ -1,42 +1,56 @@
import asyncio
from functools import partial
import pytest
from hubtraf.auth.dummy import login_dummy
from hubtraf.user import User
# Use sudo to invoke it, since this is how users invoke it.
# This catches issues with PATH
TLJH_CONFIG_PATH = ["sudo", "tljh-config"]
@pytest.mark.asyncio
async def test_admin_login():
"""
Test if the admin that was added during install can login with
the password provided.
"""
hub_url = "http://localhost"
username = "admin"
password = "admin"
async with User(username, hub_url, partial(login_dummy, password=password)) as u:
await u.login()
# If user is not logged in, this will raise an exception
await u.ensure_server_simulate()
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
HUB_URL = "http://localhost"
# FIXME: Other tests may have set the auth.type to dummy, so we reset it here to
# get the default of firstuseauthenticator. Tests should cleanup after
# themselves to a better degree, but its a bit trouble to reload the
# jupyterhub between each test as well if thats needed...
async def test_restore_relevant_tljh_state():
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH,
"set",
"auth.type",
"firstuseauthenticator.FirstUseAuthenticator",
)
).wait()
)
assert (
0
== await (
await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, "reload")
).wait()
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"username, password",
"username, password, expect_successful_login",
[
("admin", ""),
("admin", "wrong_passw"),
("user", "password"),
("test-admin-username", "test-admin-password", True),
("user", "", False),
],
)
async def test_unsuccessful_login(username, password):
async def test_pre_configured_admin_login(username, password, expect_successful_login):
"""
Ensure nobody but the admin that was added during install can login
Verify that the "--admin <username>:<password>" flag allows that user/pass
combination and no other user can login.
"""
hub_url = "http://localhost"
async with User(username, hub_url, partial(login_dummy, password="")) as u:
async with User(username, HUB_URL, partial(login_dummy, password=password)) as u:
user_logged_in = await u.login()
assert user_logged_in == False
assert user_logged_in == expect_successful_login

View File

@@ -1,129 +1,82 @@
"""
Test running bootstrap script in different circumstances
This test file tests bootstrap.py ability to
- error verbosely for old ubuntu
- error verbosely for no systemd
- start and provide a progress page web server
FIXME: The last test stands out and could be part of the other tests, and the
first two could be more like unit tests. Ideally, this file is
significantly reduced.
"""
import concurrent.futures
import os
import subprocess
import sys
import time
GIT_REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
BASE_IMAGE = os.getenv("BASE_IMAGE", "ubuntu:20.04")
def install_pkgs(container_name, show_progress_page):
# Install python3 inside the ubuntu container
# There is no trusted Ubuntu+Python3 container we can use
pkgs = ["python3"]
if show_progress_page:
pkgs += ["systemd", "git", "curl"]
# Create the sudoers dir, so that the installer successfully gets to the
# point of starting jupyterhub and stopping the progress page server.
subprocess.check_output(
["docker", "exec", container_name, "mkdir", "-p", "etc/sudoers.d"]
)
subprocess.check_output(["docker", "exec", container_name, "apt-get", "update"])
subprocess.check_output(
["docker", "exec", container_name, "apt-get", "install", "--yes"] + pkgs
def _stop_container():
"""
Stops a container if its already running.
"""
subprocess.run(
["docker", "rm", "--force", "test-bootstrap"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def get_bootstrap_script_location(container_name, show_progress_page):
# Copy only the bootstrap script to container when progress page not enabled, to be faster
source_path = "bootstrap/"
bootstrap_script = "/srv/src/bootstrap.py"
if show_progress_page:
source_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
)
bootstrap_script = "/srv/src/bootstrap/bootstrap.py"
subprocess.check_call(["docker", "cp", source_path, f"{container_name}:/srv/src"])
return bootstrap_script
# FIXME: Refactor this function to easier to understand using the following
# parameters
#
# - param: container_apt_packages
# - param: bootstrap_tljh_source
# - local: copies local tljh repo to container and configures bootstrap to
# install tljh from copied repo
# - github: configures bootstrap to install tljh from the official github repo
# - <pip spec>: configures bootstrap to install tljh from any given remote location
# - param: bootstrap_flags
#
# FIXME: Consider stripping logic in this file to only testing if the bootstrap
# script successfully detects the too old Ubuntu version and the lack of
# systemd. The remaining test named test_progress_page could rely on
# running against the systemd container that cab be built by
# integration-test.py.
#
def run_bootstrap_after_preparing_container(
container_name, image, show_progress_page=False
):
def _run_bootstrap_in_container(image, complete_setup=True):
"""
1. Stops old container
2. Starts --detached container
3. Installs apt packages in container
4. Two situations
A) limited test (--show-progress-page=false)
- Copies ./bootstrap/ folder content to container /srv/src
- Runs copied bootstrap/bootstrap.py without flags
B) full test (--show-progress-page=true)
- Copies ./ folder content to the container /srv/src
- Runs copied bootstrap/bootstrap.py with environment variables
- TLJH_BOOTSTRAP_DEV=yes
This makes --editable be used when installing the tljh package
- TLJH_BOOTSTRAP_PIP_SPEC=/srv/src
This makes us install tljh from the given location instead of from
github.com/jupyterhub/the-littlest-jupyterhub
1. (Re-)starts a container named test-bootstrap based on image, mounting
local git repo and exposing port 8080 to the containers port 80.
2. Installs python3, systemd, git, and curl in container
3. Runs bootstrap/bootstrap.py in container to install the mounted git
repo's tljh package in --editable mode.
"""
# stop container if it is already running
subprocess.run(["docker", "rm", "-f", container_name])
_stop_container()
# Start a detached container
subprocess.check_call(
subprocess.check_output(
[
"docker",
"run",
"--env=DEBIAN_FRONTEND=noninteractive",
"--env=TLJH_BOOTSTRAP_DEV=yes",
"--env=TLJH_BOOTSTRAP_PIP_SPEC=/srv/src",
f"--volume={GIT_REPO_PATH}:/srv/src",
"--publish=8080:80",
"--detach",
f"--name={container_name}",
"--name=test-bootstrap",
image,
"/bin/bash",
"bash",
"-c",
"sleep 1000s",
"sleep 300s",
]
)
install_pkgs(container_name, show_progress_page)
bootstrap_script = get_bootstrap_script_location(container_name, show_progress_page)
exec_flags = [
"-i",
container_name,
"python3",
bootstrap_script,
"--version",
"main",
]
if show_progress_page:
exec_flags = (
["-e", "TLJH_BOOTSTRAP_DEV=yes", "-e", "TLJH_BOOTSTRAP_PIP_SPEC=/srv/src"]
+ exec_flags
+ ["--show-progress-page"]
run = ["docker", "exec", "-i", "test-bootstrap"]
subprocess.check_output(run + ["apt-get", "update"])
subprocess.check_output(run + ["apt-get", "install", "--yes", "python3"])
if complete_setup:
subprocess.check_output(
run + ["apt-get", "install", "--yes", "systemd", "git", "curl"]
)
# Run bootstrap script, return the output
run_bootstrap = run + [
"python3",
"/srv/src/bootstrap/bootstrap.py",
"--show-progress-page",
]
# Run bootstrap script inside detached container, return the output
return subprocess.run(
["docker", "exec"] + exec_flags,
check=False,
stdout=subprocess.PIPE,
encoding="utf-8",
run_bootstrap,
text=True,
capture_output=True,
)
@@ -131,66 +84,72 @@ def test_ubuntu_too_old():
"""
Error with a useful message when running in older Ubuntu
"""
output = run_bootstrap_after_preparing_container("old-distro-test", "ubuntu:18.04")
output = _run_bootstrap_in_container("ubuntu:18.04", False)
_stop_container()
assert output.stdout == "The Littlest JupyterHub requires Ubuntu 20.04 or higher\n"
assert output.returncode == 1
def test_inside_no_systemd_docker():
output = run_bootstrap_after_preparing_container(
"plain-docker-test",
BASE_IMAGE,
)
def test_no_systemd():
output = _run_bootstrap_in_container("ubuntu:22.04", False)
assert "Systemd is required to run TLJH" in output.stdout
assert output.returncode == 1
def verify_progress_page(expected_status_code, timeout):
progress_page_status = False
def _wait_for_progress_page_response(expected_status_code, timeout):
start = time.time()
while not progress_page_status and (time.time() - start < timeout):
while time.time() - start < timeout:
try:
resp = subprocess.check_output(
[
"docker",
"exec",
"progress-page",
"curl",
"-i",
"http://localhost/index.html",
]
"--include",
"http://localhost:8080/index.html",
],
text=True,
stderr=subprocess.DEVNULL,
)
if b"HTTP/1.0 200 OK" in resp:
progress_page_status = True
break
else:
print(
f"Unexpected progress page response: {resp[:100]}", file=sys.stderr
)
except Exception as e:
print(f"Error getting progress page: {e}", file=sys.stderr)
time.sleep(1)
continue
if "HTTP/1.0 200 OK" in resp:
return True
except Exception:
pass
time.sleep(1)
return progress_page_status
return False
def test_progress_page():
def test_show_progress_page():
with concurrent.futures.ThreadPoolExecutor() as executor:
installer = executor.submit(
run_bootstrap_after_preparing_container,
"progress-page",
BASE_IMAGE,
True,
run_bootstrap_job = executor.submit(_run_bootstrap_in_container, BASE_IMAGE)
# Check that the bootstrap script started the web server reporting
# progress successfully responded.
success = _wait_for_progress_page_response(
expected_status_code=200, timeout=180
)
if success:
# Let's terminate the test here and save a minute or so in test
# executation time, because we can know that the will be stopped
# successfully in other tests as otherwise traefik won't be able to
# start and use the same port for example.
return
# Check if progress page started
started = verify_progress_page(expected_status_code=200, timeout=180)
assert started
# Now await an expected failure to startup JupyterHub by tljh.installer,
# which should have taken over the work started by the bootstrap script.
#
# This failure is expected to occur in
# tljh.installer.ensure_jupyterhub_service calling systemd.reload_daemon
# like this:
#
# > System has not been booted with systemd as init system (PID 1).
# > Can't operate.
#
output = run_bootstrap_job.result()
print(output.stdout)
print(output.stderr)
# This will fail start tljh but should successfully get to the point
# Where it stops the progress page server.
output = installer.result()
# Check if progress page stopped
# At this point we should be able to see that tljh.installer
# intentionally stopped the web server reporting progress as the port
# were about to become needed by Traefik.
assert "Progress page server stopped successfully." in output.stdout
assert success

View File

@@ -21,22 +21,21 @@ TLJH_CONFIG_PATH = ["sudo", "tljh-config"]
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
HUB_URL = "http://localhost"
def test_hub_up():
r = requests.get(hub_url)
r = requests.get(HUB_URL)
r.raise_for_status()
def test_hub_version():
r = requests.get(hub_url + "/hub/api")
r = requests.get(HUB_URL + "/hub/api")
r.raise_for_status()
info = r.json()
assert V("4") <= V(info["version"]) <= V("5")
@pytest.mark.asyncio
async def test_user_code_execute():
"""
User logs in, starts a server & executes code
@@ -58,17 +57,13 @@ async def test_user_code_execute():
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
assert await u.login()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
await u.start_kernel()
await u.assert_code_output("5 * 4", "20", 5, 5)
# Assert that the user exists
assert pwd.getpwnam(f"jupyter-{username}") is not None
@pytest.mark.asyncio
async def test_user_server_started_with_custom_base_url():
"""
User logs in, starts a server with a custom base_url & executes code
@@ -76,7 +71,7 @@ async def test_user_server_started_with_custom_base_url():
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
base_url = "/custom-base"
hub_url = f"http://localhost{base_url}"
custom_hub_url = f"{HUB_URL}{base_url}"
username = secrets.token_hex(8)
assert (
@@ -102,9 +97,9 @@ async def test_user_server_started_with_custom_base_url():
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
async with User(username, custom_hub_url, partial(login_dummy, password="")) as u:
assert await u.login()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# unset base_url to avoid problems with other tests
assert (
@@ -123,14 +118,12 @@ async def test_user_server_started_with_custom_base_url():
)
@pytest.mark.asyncio
async def test_user_admin_add():
"""
User is made an admin, logs in and we check if they are in admin group
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(8)
assert (
@@ -156,9 +149,9 @@ async def test_user_admin_add():
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
assert await u.login()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# Assert that the user exists
assert pwd.getpwnam(f"jupyter-{username}") is not None
@@ -167,83 +160,10 @@ async def test_user_admin_add():
assert f"jupyter-{username}" in grp.getgrnam("jupyterhub-admins").gr_mem
# FIXME: Make this test pass
@pytest.mark.asyncio
@pytest.mark.xfail(reason="Unclear why this is failing")
async def test_user_admin_remove():
"""
User is made an admin, logs in and we check if they are in admin group.
Then we remove them from admin group, and check they *aren't* in admin group :D
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(8)
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH, "set", "auth.type", "dummy"
)
).wait()
)
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH, "add-item", "users.admin", username
)
).wait()
)
assert (
0
== await (
await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, "reload")
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
# Assert that the user exists
assert pwd.getpwnam(f"jupyter-{username}") is not None
# Assert that the user has admin rights
assert f"jupyter-{username}" in grp.getgrnam("jupyterhub-admins").gr_mem
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH, "remove-item", "users.admin", username
)
).wait()
)
assert (
0
== await (
await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, "reload")
).wait()
)
await u.stop_server()
await u.ensure_server_simulate()
# Assert that the user does *not* have admin rights
assert f"jupyter-{username}" not in grp.getgrnam("jupyterhub-admins").gr_mem
@pytest.mark.asyncio
async def test_long_username():
"""
User with a long name logs in, and we check if their name is properly truncated.
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(32)
assert (
@@ -262,9 +182,9 @@ async def test_long_username():
)
try:
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
assert await u.login()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# Assert that the user exists
system_username = generate_system_username(f"jupyter-{username}")
@@ -277,14 +197,12 @@ async def test_long_username():
raise
@pytest.mark.asyncio
async def test_user_group_adding():
"""
User logs in, and we check if they are added to the specified group.
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(8)
groups = {"somegroup": [username]}
# Create the group we want to add the user to
@@ -317,9 +235,9 @@ async def test_user_group_adding():
)
try:
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
await u.ensure_server_simulate()
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
assert await u.login()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# Assert that the user exists
system_username = generate_system_username(f"jupyter-{username}")
@@ -337,15 +255,11 @@ async def test_user_group_adding():
raise
@pytest.mark.asyncio
async def test_idle_server_culled():
"""
User logs in, starts a server & stays idle for 1 min.
User logs in, starts a server & stays idle for a while.
(the user's server should be culled during this period)
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(8)
assert (
@@ -374,12 +288,12 @@ async def test_idle_server_culled():
)
).wait()
)
# Cull servers and users after 30s, regardless of activity
# Cull servers and users after a while, regardless of activity
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH, "set", "services.cull.max_age", "30"
*TLJH_CONFIG_PATH, "set", "services.cull.max_age", "15"
)
).wait()
)
@@ -390,12 +304,12 @@ async def test_idle_server_culled():
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
# Login the user
await u.login()
assert await u.login()
# Start user's server
await u.ensure_server_simulate()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# Assert that the user exists
assert pwd.getpwnam(f"jupyter-{username}") is not None
@@ -432,7 +346,7 @@ async def test_idle_server_culled():
# Wait for culling
# step 1: check if the server is still running
timeout = 100
timeout = 30
async def server_stopped():
"""Has the server been stopped?"""
@@ -448,7 +362,7 @@ async def test_idle_server_culled():
# step 2. wait for user to be deleted
async def user_removed():
# Check that after 60s, the user has been culled
# Check that after a while, the user has been culled
r = await hub_api_request()
print(f"{r.status} {r.url}")
return r.status == 403
@@ -460,15 +374,13 @@ async def test_idle_server_culled():
)
@pytest.mark.asyncio
async def test_active_server_not_culled():
"""
User logs in, starts a server & stays idle for 30s
User logs in, starts a server & stays idle for a while
(the user's server should not be culled during this period).
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = "http://localhost"
username = secrets.token_hex(8)
assert (
@@ -497,12 +409,12 @@ async def test_active_server_not_culled():
)
).wait()
)
# Cull servers and users after 30s, regardless of activity
# Cull servers and users after a while, regardless of activity
assert (
0
== await (
await asyncio.create_subprocess_exec(
*TLJH_CONFIG_PATH, "set", "services.cull.max_age", "60"
*TLJH_CONFIG_PATH, "set", "services.cull.max_age", "30"
)
).wait()
)
@@ -513,10 +425,10 @@ async def test_active_server_not_culled():
).wait()
)
async with User(username, hub_url, partial(login_dummy, password="")) as u:
await u.login()
async with User(username, HUB_URL, partial(login_dummy, password="")) as u:
assert await u.login()
# Start user's server
await u.ensure_server_simulate()
await u.ensure_server_simulate(timeout=60, spawn_refresh_time=5)
# Assert that the user exists
assert pwd.getpwnam(f"jupyter-{username}") is not None
@@ -526,7 +438,7 @@ async def test_active_server_not_culled():
assert r.status == 200
async def server_has_stopped():
# Check that after 30s, we can still reach the user's server
# Check that after a while, we can still reach the user's server
r = await u.session.get(user_url, allow_redirects=False)
print(f"{r.status} {r.url}")
return r.status != 200
@@ -535,7 +447,7 @@ async def test_active_server_not_culled():
await exponential_backoff(
server_has_stopped,
"User's server is still reachable (good!)",
timeout=30,
timeout=15,
)
except asyncio.TimeoutError:
# timeout error means the test passed - the server didn't go away while we were waiting

View File

@@ -1,79 +1,85 @@
"""
Test simplest plugin
Test the plugin in integration-tests/plugins/simplest that makes use of all tljh
recognized plugin hooks that are defined in tljh/hooks.py.
"""
import os
import subprocess
import requests
from ruamel.yaml import YAML
from tljh import user
from tljh.config import CONFIG_FILE, HUB_ENV_PREFIX, USER_ENV_PREFIX
GIT_REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
yaml = YAML(typ="rt")
def test_apt_packages():
"""
Test extra apt packages are installed
"""
assert os.path.exists("/usr/games/sl")
def test_tljh_extra_user_conda_packages():
subprocess.check_call([f"{USER_ENV_PREFIX}/bin/python3", "-c", "import tqdm"])
def test_pip_packages():
"""
Test extra user & hub pip packages are installed
"""
def test_tljh_extra_user_pip_packages():
subprocess.check_call([f"{USER_ENV_PREFIX}/bin/python3", "-c", "import django"])
def test_tljh_extra_hub_pip_packages():
subprocess.check_call([f"{HUB_ENV_PREFIX}/bin/python3", "-c", "import there"])
def test_conda_packages():
"""
Test extra user conda packages are installed
"""
subprocess.check_call([f"{USER_ENV_PREFIX}/bin/python3", "-c", "import hypothesis"])
def test_tljh_extra_apt_packages():
assert os.path.exists("/usr/games/sl")
def test_config_hook():
def test_tljh_custom_jupyterhub_config():
"""
Check config changes are present
Test that the provided tljh_custom_jupyterhub_config hook has made the tljh
jupyterhub load additional jupyterhub config.
"""
tljh_jupyterhub_config = os.path.join(GIT_REPO_PATH, "tljh", "jupyterhub_config.py")
output = subprocess.check_output(
[
f"{HUB_ENV_PREFIX}/bin/python3",
"-m",
"jupyterhub",
"--show-config",
"--config",
tljh_jupyterhub_config,
],
text=True,
)
assert "jupyterhub_config_set_by_simplest_plugin" in output
def test_tljh_config_post_install():
"""
Test that the provided tljh_config_post_install hook has made tljh recognize
additional tljh config.
"""
with open(CONFIG_FILE) as f:
data = yaml.load(f)
assert data["simplest_plugin"]["present"]
tljh_config = yaml.load(f)
assert tljh_config["Test"]["tljh_config_set_by_simplest_plugin"]
def test_jupyterhub_config_hook():
def test_tljh_post_install():
"""
Test that tmpauthenticator is enabled by our custom config plugin
Test that the provided tljh_post_install hook has been executed by looking
for a specific file written.
"""
resp = requests.get("http://localhost/hub/tmplogin", allow_redirects=False)
assert resp.status_code == 302
assert resp.headers["Location"] == "/hub/spawn"
def test_post_install_hook():
"""
Test that the test_post_install file has the correct content
"""
with open("test_post_install") as f:
with open("test_tljh_post_install") as f:
content = f.read()
assert content == "123456789"
assert "file_written_by_simplest_plugin" in content
def test_new_user_create():
def test_tljh_new_user_create():
"""
Test that plugin receives username as arg
Test that the provided tljh_new_user_create hook has been executed by
looking for a specific file written.
"""
# Trigger the hook by letting tljh's code create a user
username = "user1"
# Call ensure_user to make sure the user plugin gets called
user.ensure_user(username)
with open("test_new_user_create") as f:
content = f.read()
assert content == username
assert "file_written_by_simplest_plugin" in content
assert username in content

View File

@@ -32,3 +32,27 @@ target_version = [
"py310",
"py311",
]
# pytest is used for running Python based tests
#
# ref: https://docs.pytest.org/en/stable/
#
[tool.pytest.ini_options]
addopts = "--verbose --color=yes --durations=10 --maxfail=1 --cov=tljh"
asyncio_mode = "auto"
filterwarnings = [
'ignore:.*Module bootstrap was never imported.*:coverage.exceptions.CoverageWarning',
]
# pytest-cov / coverage is used to measure code coverage of tests
#
# ref: https://coverage.readthedocs.io/en/stable/config.html
#
[tool.coverage.run]
parallel = true
omit = [
"tests/**",
"integration-tests/**",
]

View File

@@ -1,12 +1,12 @@
# Unit test some functions from bootstrap.py
# Since bootstrap.py isn't part of the package, it's not automatically importable
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import pytest
# Since bootstrap.py isn't part of the package, it's not automatically importable
GIT_REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, GIT_REPO_PATH)
from bootstrap import bootstrap

View File

@@ -126,6 +126,7 @@ def ensure_usergroups():
user.ensure_group("jupyterhub-users")
logger.info("Granting passwordless sudo to JupyterHub admins...")
os.makedirs("/etc/sudoers.d/", exist_ok=True)
with open("/etc/sudoers.d/jupyterhub-admins", "w") as f:
# JupyterHub admins should have full passwordless sudo access
f.write("%jupyterhub-admins ALL = (ALL) NOPASSWD: ALL\n")
@@ -283,7 +284,7 @@ def ensure_user_environment(user_requirements_txt_file):
def ensure_admins(admin_password_list):
"""
Setup given list of users as admins.
Setup given list of user[:password] strings as admins.
"""
os.makedirs(STATE_DIR, mode=0o700, exist_ok=True)